From c1ad439ee614877e02cafd852bebbb462c6ea8f9 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 12 Dec 2025 12:30:36 +0100 Subject: [PATCH 01/25] CSPL-4358 Splitting BusConfiguration into Bus and LargeMessageStore --- PROJECT | 11 +- ...busconfiguration_types.go => bus_types.go} | 83 +++--- api/v4/indexercluster_types.go | 16 +- api/v4/ingestorcluster_types.go | 14 +- api/v4/largemessagestore.go | 137 +++++++++ api/v4/zz_generated.deepcopy.go | 168 +++++++++-- cmd/main.go | 11 +- .../bases/enterprise.splunk.com_buses.yaml | 123 ++++++++ ...enterprise.splunk.com_indexerclusters.yaml | 118 +++++++- ...nterprise.splunk.com_ingestorclusters.yaml | 114 +++++++- ...rprise.splunk.com_largemessagestores.yaml} | 55 ++-- config/crd/kustomization.yaml | 3 +- ..._editor_role.yaml => bus_editor_role.yaml} | 6 +- ..._viewer_role.yaml => bus_viewer_role.yaml} | 6 +- .../rbac/largemessagestore_editor_role.yaml | 30 ++ .../rbac/largemessagestore_viewer_role.yaml | 26 ++ config/rbac/role.yaml | 9 +- ...figuration.yaml => enterprise_v4_bus.yaml} | 4 +- .../enterprise_v4_largemessagestore.yaml | 8 + config/samples/kustomization.yaml | 3 +- docs/CustomResources.md | 8 +- docs/IndexIngestionSeparation.md | 74 +++-- .../enterprise_v4_busconfigurations.yaml | 40 --- .../templates/enterprise_v4_buses.yaml | 30 ++ .../enterprise_v4_indexercluster.yaml | 10 +- .../enterprise_v4_ingestorcluster.yaml | 17 +- .../enterprise_v4_largemessagestores.yaml | 28 ++ helm-chart/splunk-enterprise/values.yaml | 8 +- ..._editor_role.yaml => bus_editor_role.yaml} | 12 +- ..._viewer_role.yaml => bus_viewer_role.yaml} | 12 +- .../splunk-operator/templates/rbac/role.yaml | 32 ++- ...ration_controller.go => bus_controller.go} | 38 +-- ...troller_test.go => bus_controller_test.go} | 127 +++++---- .../controller/indexercluster_controller.go | 36 ++- .../controller/ingestorcluster_controller.go | 36 ++- .../ingestorcluster_controller_test.go | 71 ++++- .../largemessagestore_controller.go | 120 ++++++++ .../largemessagestore_controller_test.go | 263 ++++++++++++++++++ internal/controller/suite_test.go | 25 +- internal/controller/testutils/new.go | 33 +-- .../01-assert.yaml | 68 +++-- .../02-assert.yaml | 19 +- .../splunk_index_ingest_sep.yaml | 32 ++- pkg/splunk/enterprise/bus.go | 75 +++++ pkg/splunk/enterprise/bus_test.go | 69 +++++ pkg/splunk/enterprise/busconfiguration.go | 140 ---------- .../enterprise/busconfiguration_test.go | 151 ---------- pkg/splunk/enterprise/indexercluster.go | 159 +++++++---- pkg/splunk/enterprise/indexercluster_test.go | 229 ++++++++------- pkg/splunk/enterprise/ingestorcluster.go | 110 +++++--- pkg/splunk/enterprise/ingestorcluster_test.go | 251 ++++++++++------- pkg/splunk/enterprise/largemessagestore.go | 75 +++++ .../enterprise/largemessagestore_test.go | 83 ++++++ pkg/splunk/enterprise/types.go | 13 +- pkg/splunk/enterprise/util.go | 24 +- .../c3/appframework_aws_test.go | 2 +- .../c3/manager_appframework_test.go | 4 +- .../c3/appframework_azure_test.go | 2 +- .../c3/manager_appframework_azure_test.go | 2 +- .../c3/manager_appframework_test.go | 4 +- ...dex_and_ingestion_separation_suite_test.go | 35 +-- .../index_and_ingestion_separation_test.go | 112 +++++--- test/testenv/deployment.go | 79 ++++-- test/testenv/util.go | 37 ++- 64 files changed, 2674 insertions(+), 1066 deletions(-) rename api/v4/{busconfiguration_types.go => bus_types.go} (56%) create mode 100644 api/v4/largemessagestore.go create mode 100644 config/crd/bases/enterprise.splunk.com_buses.yaml rename config/crd/bases/{enterprise.splunk.com_busconfigurations.yaml => enterprise.splunk.com_largemessagestores.yaml} (64%) rename config/rbac/{busconfiguration_editor_role.yaml => bus_editor_role.yaml} (88%) rename config/rbac/{busconfiguration_viewer_role.yaml => bus_viewer_role.yaml} (87%) create mode 100644 config/rbac/largemessagestore_editor_role.yaml create mode 100644 config/rbac/largemessagestore_viewer_role.yaml rename config/samples/{enterprise_v4_busconfiguration.yaml => enterprise_v4_bus.yaml} (72%) create mode 100644 config/samples/enterprise_v4_largemessagestore.yaml delete mode 100644 helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml create mode 100644 helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml create mode 100644 helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml rename helm-chart/splunk-operator/templates/rbac/{busconfiguration_editor_role.yaml => bus_editor_role.yaml} (78%) rename helm-chart/splunk-operator/templates/rbac/{busconfiguration_viewer_role.yaml => bus_viewer_role.yaml} (76%) rename internal/controller/{busconfiguration_controller.go => bus_controller.go} (70%) rename internal/controller/{busconfiguration_controller_test.go => bus_controller_test.go} (56%) create mode 100644 internal/controller/largemessagestore_controller.go create mode 100644 internal/controller/largemessagestore_controller_test.go create mode 100644 pkg/splunk/enterprise/bus.go create mode 100644 pkg/splunk/enterprise/bus_test.go delete mode 100644 pkg/splunk/enterprise/busconfiguration.go delete mode 100644 pkg/splunk/enterprise/busconfiguration_test.go create mode 100644 pkg/splunk/enterprise/largemessagestore.go create mode 100644 pkg/splunk/enterprise/largemessagestore_test.go diff --git a/PROJECT b/PROJECT index 983f3418b..aa4aa1078 100644 --- a/PROJECT +++ b/PROJECT @@ -128,7 +128,16 @@ resources: controller: true domain: splunk.com group: enterprise - kind: BusConfiguration + kind: Bus + path: github.com/splunk/splunk-operator/api/v4 + version: v4 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: splunk.com + group: enterprise + kind: LargeMessageStore path: github.com/splunk/splunk-operator/api/v4 version: v4 version: "3" diff --git a/api/v4/busconfiguration_types.go b/api/v4/bus_types.go similarity index 56% rename from api/v4/busconfiguration_types.go rename to api/v4/bus_types.go index a4b76a00b..10958f56b 100644 --- a/api/v4/busconfiguration_types.go +++ b/api/v4/bus_types.go @@ -23,35 +23,48 @@ import ( ) const ( - // BusConfigurationPausedAnnotation is the annotation that pauses the reconciliation (triggers + // BusPausedAnnotation is the annotation that pauses the reconciliation (triggers // an immediate requeue) - BusConfigurationPausedAnnotation = "busconfiguration.enterprise.splunk.com/paused" + BusPausedAnnotation = "bus.enterprise.splunk.com/paused" ) -// BusConfigurationSpec defines the desired state of BusConfiguration -type BusConfigurationSpec struct { - Type string `json:"type"` +// +kubebuilder:validation:XValidation:rule="self.provider != 'sqs' || has(self.sqs)",message="sqs must be provided when provider is sqs" +// BusSpec defines the desired state of Bus +type BusSpec struct { + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=sqs + // Provider of queue resources + Provider string `json:"provider"` + + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // Name of the queue + QueueName string `json:"queueName"` + + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$` + // Region of the resources + Region string `json:"region"` + // sqs specific inputs SQS SQSSpec `json:"sqs"` } type SQSSpec struct { - QueueName string `json:"queueName"` - - AuthRegion string `json:"authRegion"` - + // +kubebuilder:validation:Required + // +kubebuilder:validation:MinLength=1 + // Name of the dead letter queue resource + DLQ string `json:"dlq"` + + // +optional + // +kubebuilder:validation:Pattern=`^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$` + // Amazon SQS Service endpoint Endpoint string `json:"endpoint"` - - LargeMessageStoreEndpoint string `json:"largeMessageStoreEndpoint"` - - LargeMessageStorePath string `json:"largeMessageStorePath"` - - DeadLetterQueueName string `json:"deadLetterQueueName"` } -// BusConfigurationStatus defines the observed state of BusConfiguration. -type BusConfigurationStatus struct { - // Phase of the bus configuration +// BusStatus defines the observed state of Bus +type BusStatus struct { + // Phase of the bus Phase Phase `json:"phase"` // Resource revision tracker @@ -64,27 +77,27 @@ type BusConfigurationStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// BusConfiguration is the Schema for a Splunk Enterprise bus configuration +// Bus is the Schema for a Splunk Enterprise bus // +k8s:openapi-gen=true // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector -// +kubebuilder:resource:path=busconfigurations,scope=Namespaced,shortName=bus -// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of bus configuration" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of bus configuration resource" +// +kubebuilder:resource:path=buses,scope=Namespaced,shortName=bus +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of bus" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of bus resource" // +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status" // +kubebuilder:storageversion -// BusConfiguration is the Schema for the busconfigurations API -type BusConfiguration struct { +// Bus is the Schema for the buses API +type Bus struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` - Spec BusConfigurationSpec `json:"spec"` - Status BusConfigurationStatus `json:"status,omitempty,omitzero"` + Spec BusSpec `json:"spec"` + Status BusStatus `json:"status,omitempty,omitzero"` } // DeepCopyObject implements runtime.Object -func (in *BusConfiguration) DeepCopyObject() runtime.Object { +func (in *Bus) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -93,20 +106,20 @@ func (in *BusConfiguration) DeepCopyObject() runtime.Object { // +kubebuilder:object:root=true -// BusConfigurationList contains a list of BusConfiguration -type BusConfigurationList struct { +// BusList contains a list of Bus +type BusList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` - Items []BusConfiguration `json:"items"` + Items []Bus `json:"items"` } func init() { - SchemeBuilder.Register(&BusConfiguration{}, &BusConfigurationList{}) + SchemeBuilder.Register(&Bus{}, &BusList{}) } // NewEvent creates a new event associated with the object and ready // to be published to Kubernetes API -func (bc *BusConfiguration) NewEvent(eventType, reason, message string) corev1.Event { +func (bc *Bus) NewEvent(eventType, reason, message string) corev1.Event { t := metav1.Now() return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ @@ -114,7 +127,7 @@ func (bc *BusConfiguration) NewEvent(eventType, reason, message string) corev1.E Namespace: bc.ObjectMeta.Namespace, }, InvolvedObject: corev1.ObjectReference{ - Kind: "BusConfiguration", + Kind: "Bus", Namespace: bc.Namespace, Name: bc.Name, UID: bc.UID, @@ -123,12 +136,12 @@ func (bc *BusConfiguration) NewEvent(eventType, reason, message string) corev1.E Reason: reason, Message: message, Source: corev1.EventSource{ - Component: "splunk-busconfiguration-controller", + Component: "splunk-bus-controller", }, FirstTimestamp: t, LastTimestamp: t, Count: 1, Type: eventType, - ReportingController: "enterprise.splunk.com/busconfiguration-controller", + ReportingController: "enterprise.splunk.com/bus-controller", } } diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 493aeb0f3..0ec425240 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -38,8 +38,13 @@ const ( type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` - // Bus configuration reference - BusConfigurationRef corev1.ObjectReference `json:"busConfigurationRef,omitempty"` + // +optional + // Bus reference + BusRef corev1.ObjectReference `json:"busRef"` + + // +optional + // Large Message Store reference + LargeMessageStoreRef corev1.ObjectReference `json:"largeMessageStoreRef"` // Number of search head pods; a search head cluster will be created if > 1 Replicas int32 `json:"replicas"` @@ -115,8 +120,11 @@ type IndexerClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Bus configuration - BusConfiguration BusConfigurationSpec `json:"busConfiguration,omitempty"` + // Bus + Bus *BusSpec `json:"bus,omitempty"` + + // Large Message Store + LargeMessageStore *LargeMessageStoreSpec `json:"largeMessageStore,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index 364625e97..27fa5d1e0 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -39,8 +39,11 @@ type IngestorClusterSpec struct { // Splunk Enterprise app repository that specifies remote app location and scope for Splunk app management AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` - // Bus configuration reference - BusConfigurationRef corev1.ObjectReference `json:"busConfigurationRef"` + // Bus reference + BusRef corev1.ObjectReference `json:"busRef"` + + // Large Message Store reference + LargeMessageStoreRef corev1.ObjectReference `json:"largeMessageStoreRef"` } // IngestorClusterStatus defines the observed state of Ingestor Cluster @@ -69,8 +72,11 @@ type IngestorClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Bus configuration - BusConfiguration BusConfigurationSpec `json:"busConfiguration,omitempty"` + // Bus + Bus *BusSpec `json:"bus,omitempty"` + + // Large Message Store + LargeMessageStore *LargeMessageStoreSpec `json:"largeMessageStore,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v4/largemessagestore.go b/api/v4/largemessagestore.go new file mode 100644 index 000000000..3e9f4b62b --- /dev/null +++ b/api/v4/largemessagestore.go @@ -0,0 +1,137 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v4 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // LargeMessageStorePausedAnnotation is the annotation that pauses the reconciliation (triggers + // an immediate requeue) + LargeMessageStorePausedAnnotation = "largemessagestore.enterprise.splunk.com/paused" +) + +// +kubebuilder:validation:XValidation:rule="self.provider != 's3' || has(self.s3)",message="s3 must be provided when provider is s3" +// LargeMessageStoreSpec defines the desired state of LargeMessageStore +type LargeMessageStoreSpec struct { + // +kubebuilder:validation:Required + // +kubebuilder:validation:Enum=s3 + // Provider of queue resources + Provider string `json:"provider"` + + // s3 specific inputs + S3 S3Spec `json:"s3"` +} + +type S3Spec struct { + // +optional + // +kubebuilder:validation:Pattern=`^https://s3(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$` + // S3-compatible Service endpoint + Endpoint string `json:"endpoint"` + + // +kubebuilder:validation:Required + // +kubebuilder:validation:Pattern=`^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$` + // S3 bucket path + Path string `json:"path"` +} + +// LargeMessageStoreStatus defines the observed state of LargeMessageStore. +type LargeMessageStoreStatus struct { + // Phase of the large message store + Phase Phase `json:"phase"` + + // Resource revision tracker + ResourceRevMap map[string]string `json:"resourceRevMap"` + + // Auxillary message describing CR status + Message string `json:"message"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// LargeMessageStore is the Schema for a Splunk Enterprise large message store +// +k8s:openapi-gen=true +// +kubebuilder:subresource:status +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector +// +kubebuilder:resource:path=largemessagestores,scope=Namespaced,shortName=lms +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of large message store" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of large message store resource" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status" +// +kubebuilder:storageversion + +// LargeMessageStore is the Schema for the largemessagestores API +type LargeMessageStore struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` + + Spec LargeMessageStoreSpec `json:"spec"` + Status LargeMessageStoreStatus `json:"status,omitempty,omitzero"` +} + +// DeepCopyObject implements runtime.Object +func (in *LargeMessageStore) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// +kubebuilder:object:root=true + +// LargeMessageStoreList contains a list of LargeMessageStore +type LargeMessageStoreList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LargeMessageStore `json:"items"` +} + +func init() { + SchemeBuilder.Register(&LargeMessageStore{}, &LargeMessageStoreList{}) +} + +// NewEvent creates a new event associated with the object and ready +// to be published to Kubernetes API +func (bc *LargeMessageStore) NewEvent(eventType, reason, message string) corev1.Event { + t := metav1.Now() + return corev1.Event{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: reason + "-", + Namespace: bc.ObjectMeta.Namespace, + }, + InvolvedObject: corev1.ObjectReference{ + Kind: "LargeMessageStore", + Namespace: bc.Namespace, + Name: bc.Name, + UID: bc.UID, + APIVersion: GroupVersion.String(), + }, + Reason: reason, + Message: message, + Source: corev1.EventSource{ + Component: "splunk-large-message-store-controller", + }, + FirstTimestamp: t, + LastTimestamp: t, + Count: 1, + Type: eventType, + ReportingController: "enterprise.splunk.com/large-message-store-controller", + } +} diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index fa23c996a..dc19b7f10 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -181,7 +181,7 @@ func (in *BundlePushTracker) DeepCopy() *BundlePushTracker { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusConfiguration) DeepCopyInto(out *BusConfiguration) { +func (in *Bus) DeepCopyInto(out *Bus) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -189,42 +189,42 @@ func (in *BusConfiguration) DeepCopyInto(out *BusConfiguration) { in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfiguration. -func (in *BusConfiguration) DeepCopy() *BusConfiguration { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bus. +func (in *Bus) DeepCopy() *Bus { if in == nil { return nil } - out := new(BusConfiguration) + out := new(Bus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusConfigurationList) DeepCopyInto(out *BusConfigurationList) { +func (in *BusList) DeepCopyInto(out *BusList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]BusConfiguration, len(*in)) + *out = make([]Bus, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationList. -func (in *BusConfigurationList) DeepCopy() *BusConfigurationList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusList. +func (in *BusList) DeepCopy() *BusList { if in == nil { return nil } - out := new(BusConfigurationList) + out := new(BusList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *BusConfigurationList) DeepCopyObject() runtime.Object { +func (in *BusList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -232,23 +232,23 @@ func (in *BusConfigurationList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusConfigurationSpec) DeepCopyInto(out *BusConfigurationSpec) { +func (in *BusSpec) DeepCopyInto(out *BusSpec) { *out = *in out.SQS = in.SQS } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationSpec. -func (in *BusConfigurationSpec) DeepCopy() *BusConfigurationSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusSpec. +func (in *BusSpec) DeepCopy() *BusSpec { if in == nil { return nil } - out := new(BusConfigurationSpec) + out := new(BusSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusConfigurationStatus) DeepCopyInto(out *BusConfigurationStatus) { +func (in *BusStatus) DeepCopyInto(out *BusStatus) { *out = *in if in.ResourceRevMap != nil { in, out := &in.ResourceRevMap, &out.ResourceRevMap @@ -259,12 +259,12 @@ func (in *BusConfigurationStatus) DeepCopyInto(out *BusConfigurationStatus) { } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusConfigurationStatus. -func (in *BusConfigurationStatus) DeepCopy() *BusConfigurationStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusStatus. +func (in *BusStatus) DeepCopy() *BusStatus { if in == nil { return nil } - out := new(BusConfigurationStatus) + out := new(BusStatus) in.DeepCopyInto(out) return out } @@ -600,7 +600,8 @@ func (in *IndexerClusterMemberStatus) DeepCopy() *IndexerClusterMemberStatus { func (in *IndexerClusterSpec) DeepCopyInto(out *IndexerClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) - out.BusConfigurationRef = in.BusConfigurationRef + out.BusRef = in.BusRef + out.LargeMessageStoreRef = in.LargeMessageStoreRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterSpec. @@ -633,7 +634,16 @@ func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { *out = make([]IndexerClusterMemberStatus, len(*in)) copy(*out, *in) } - out.BusConfiguration = in.BusConfiguration + if in.Bus != nil { + in, out := &in.Bus, &out.Bus + *out = new(BusSpec) + **out = **in + } + if in.LargeMessageStore != nil { + in, out := &in.LargeMessageStore, &out.LargeMessageStore + *out = new(LargeMessageStoreSpec) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterStatus. @@ -702,7 +712,8 @@ func (in *IngestorClusterSpec) DeepCopyInto(out *IngestorClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) - out.BusConfigurationRef = in.BusConfigurationRef + out.BusRef = in.BusRef + out.LargeMessageStoreRef = in.LargeMessageStoreRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterSpec. @@ -726,7 +737,16 @@ func (in *IngestorClusterStatus) DeepCopyInto(out *IngestorClusterStatus) { } } in.AppContext.DeepCopyInto(&out.AppContext) - out.BusConfiguration = in.BusConfiguration + if in.Bus != nil { + in, out := &in.Bus, &out.Bus + *out = new(BusSpec) + **out = **in + } + if in.LargeMessageStore != nil { + in, out := &in.LargeMessageStore, &out.LargeMessageStore + *out = new(LargeMessageStoreSpec) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterStatus. @@ -739,6 +759,95 @@ func (in *IngestorClusterStatus) DeepCopy() *IngestorClusterStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LargeMessageStore) DeepCopyInto(out *LargeMessageStore) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LargeMessageStore. +func (in *LargeMessageStore) DeepCopy() *LargeMessageStore { + if in == nil { + return nil + } + out := new(LargeMessageStore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LargeMessageStoreList) DeepCopyInto(out *LargeMessageStoreList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LargeMessageStore, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LargeMessageStoreList. +func (in *LargeMessageStoreList) DeepCopy() *LargeMessageStoreList { + if in == nil { + return nil + } + out := new(LargeMessageStoreList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LargeMessageStoreList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LargeMessageStoreSpec) DeepCopyInto(out *LargeMessageStoreSpec) { + *out = *in + out.S3 = in.S3 +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LargeMessageStoreSpec. +func (in *LargeMessageStoreSpec) DeepCopy() *LargeMessageStoreSpec { + if in == nil { + return nil + } + out := new(LargeMessageStoreSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LargeMessageStoreStatus) DeepCopyInto(out *LargeMessageStoreStatus) { + *out = *in + if in.ResourceRevMap != nil { + in, out := &in.ResourceRevMap, &out.ResourceRevMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LargeMessageStoreStatus. +func (in *LargeMessageStoreStatus) DeepCopy() *LargeMessageStoreStatus { + if in == nil { + return nil + } + out := new(LargeMessageStoreStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LicenseManager) DeepCopyInto(out *LicenseManager) { *out = *in @@ -977,6 +1086,21 @@ func (in *Probe) DeepCopy() *Probe { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Spec) DeepCopyInto(out *S3Spec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Spec. +func (in *S3Spec) DeepCopy() *S3Spec { + if in == nil { + return nil + } + out := new(S3Spec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SQSSpec) DeepCopyInto(out *SQSSpec) { *out = *in diff --git a/cmd/main.go b/cmd/main.go index 1984474fa..0d14d691a 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -230,11 +230,18 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "IngestorCluster") os.Exit(1) } - if err := (&controller.BusConfigurationReconciler{ + if err := (&controller.BusReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "BusConfiguration") + setupLog.Error(err, "unable to create controller", "controller", "Bus") + os.Exit(1) + } + if err := (&controller.LargeMessageStoreReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "LargeMessageStore") os.Exit(1) } //+kubebuilder:scaffold:builder diff --git a/config/crd/bases/enterprise.splunk.com_buses.yaml b/config/crd/bases/enterprise.splunk.com_buses.yaml new file mode 100644 index 000000000..6a98483a5 --- /dev/null +++ b/config/crd/bases/enterprise.splunk.com_buses.yaml @@ -0,0 +1,123 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.16.1 + name: buses.enterprise.splunk.com +spec: + group: enterprise.splunk.com + names: + kind: Bus + listKind: BusList + plural: buses + shortNames: + - bus + singular: bus + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: Status of bus + jsonPath: .status.phase + name: Phase + type: string + - description: Age of bus resource + jsonPath: .metadata.creationTimestamp + name: Age + type: date + - description: Auxillary message describing CR status + jsonPath: .status.message + name: Message + type: string + name: v4 + schema: + openAPIV3Schema: + description: Bus is the Schema for the buses API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: BusSpec defines the desired state of Bus + properties: + provider: + description: Provider of queue resources + enum: + - sqs + type: string + queueName: + description: Name of the queue + minLength: 1 + type: string + region: + description: Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string + sqs: + description: sqs specific inputs + properties: + dlq: + description: Name of the dead letter queue resource + minLength: 1 + type: string + endpoint: + description: Amazon SQS Service endpoint + pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + type: string + required: + - dlq + type: object + required: + - provider + - queueName + - region + type: object + x-kubernetes-validations: + - message: sqs must be provided when provider is sqs + rule: self.provider != 'sqs' || has(self.sqs) + status: + description: BusStatus defines the observed state of Bus + properties: + message: + description: Auxillary message describing CR status + type: string + phase: + description: Phase of the bus + enum: + - Pending + - Ready + - Updating + - ScalingUp + - ScalingDown + - Terminating + - Error + type: string + resourceRevMap: + additionalProperties: + type: string + description: Resource revision tracker + type: object + type: object + type: object + served: true + storage: true + subresources: + scale: + labelSelectorPath: .status.selector + specReplicasPath: .spec.replicas + statusReplicasPath: .status.replicas + status: {} diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index d66e057fb..3563c678f 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -5165,8 +5165,8 @@ spec: x-kubernetes-list-type: atomic type: object type: object - busConfigurationRef: - description: Bus configuration reference + busRef: + description: Bus reference properties: apiVersion: description: API version of the referent. @@ -5480,6 +5480,49 @@ spec: type: object x-kubernetes-map-type: atomic type: array + largeMessageStoreRef: + description: Large Message Store reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic licenseManagerRef: description: LicenseManagerRef refers to a Splunk Enterprise license manager managed by the operator within Kubernetes @@ -8294,27 +8337,44 @@ spec: type: boolean description: Holds secrets whose IDXC password has changed type: object - busConfiguration: - description: Bus configuration + bus: + description: Bus properties: + provider: + description: Provider of queue resources + enum: + - sqs + type: string + queueName: + description: Name of the queue + minLength: 1 + type: string + region: + description: Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string sqs: + description: sqs specific inputs properties: - authRegion: - type: string - deadLetterQueueName: + dlq: + description: Name of the dead letter queue resource + minLength: 1 type: string endpoint: + description: Amazon SQS Service endpoint + pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ type: string - largeMessageStoreEndpoint: - type: string - largeMessageStorePath: - type: string - queueName: - type: string + required: + - dlq type: object - type: - type: string + required: + - provider + - queueName + - region type: object + x-kubernetes-validations: + - message: sqs must be provided when provider is sqs + rule: self.provider != 'sqs' || has(self.sqs) clusterManagerPhase: description: current phase of the cluster manager enum: @@ -8349,6 +8409,34 @@ spec: initialized_flag: description: Indicates if the cluster is initialized. type: boolean + largeMessageStore: + description: Large Message Store + properties: + provider: + description: Provider of queue resources + enum: + - s3 + type: string + s3: + description: s3 specific inputs + properties: + endpoint: + description: S3-compatible Service endpoint + pattern: ^https://s3(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + type: string + path: + description: S3 bucket path + pattern: ^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$ + type: string + required: + - path + type: object + required: + - provider + type: object + x-kubernetes-validations: + - message: s3 must be provided when provider is s3 + rule: self.provider != 's3' || has(self.s3) maintenance_mode: description: Indicates if the cluster is in maintenance mode. type: boolean diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 82f1f868a..8ada99079 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -1141,8 +1141,8 @@ spec: type: object type: array type: object - busConfigurationRef: - description: Bus configuration reference + busRef: + description: Bus reference properties: apiVersion: description: API version of the referent. @@ -1456,6 +1456,49 @@ spec: type: object x-kubernetes-map-type: atomic type: array + largeMessageStoreRef: + description: Large Message Store reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic licenseManagerRef: description: LicenseManagerRef refers to a Splunk Enterprise license manager managed by the operator within Kubernetes @@ -4545,27 +4588,72 @@ spec: description: App Framework version info for future use type: integer type: object - busConfiguration: - description: Bus configuration + bus: + description: Bus properties: + provider: + description: Provider of queue resources + enum: + - sqs + type: string + queueName: + description: Name of the queue + minLength: 1 + type: string + region: + description: Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string sqs: + description: sqs specific inputs properties: - authRegion: - type: string - deadLetterQueueName: + dlq: + description: Name of the dead letter queue resource + minLength: 1 type: string endpoint: + description: Amazon SQS Service endpoint + pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ type: string - largeMessageStoreEndpoint: - type: string - largeMessageStorePath: + required: + - dlq + type: object + required: + - provider + - queueName + - region + type: object + x-kubernetes-validations: + - message: sqs must be provided when provider is sqs + rule: self.provider != 'sqs' || has(self.sqs) + largeMessageStore: + description: Large Message Store + properties: + provider: + description: Provider of queue resources + enum: + - s3 + type: string + s3: + description: s3 specific inputs + properties: + endpoint: + description: S3-compatible Service endpoint + pattern: ^https://s3(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ type: string - queueName: + path: + description: S3 bucket path + pattern: ^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$ type: string + required: + - path type: object - type: - type: string + required: + - provider type: object + x-kubernetes-validations: + - message: s3 must be provided when provider is s3 + rule: self.provider != 's3' || has(self.s3) message: description: Auxillary message describing CR status type: string diff --git a/config/crd/bases/enterprise.splunk.com_busconfigurations.yaml b/config/crd/bases/enterprise.splunk.com_largemessagestores.yaml similarity index 64% rename from config/crd/bases/enterprise.splunk.com_busconfigurations.yaml rename to config/crd/bases/enterprise.splunk.com_largemessagestores.yaml index 9f80cdbea..20cd26906 100644 --- a/config/crd/bases/enterprise.splunk.com_busconfigurations.yaml +++ b/config/crd/bases/enterprise.splunk.com_largemessagestores.yaml @@ -4,24 +4,24 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.16.1 - name: busconfigurations.enterprise.splunk.com + name: largemessagestores.enterprise.splunk.com spec: group: enterprise.splunk.com names: - kind: BusConfiguration - listKind: BusConfigurationList - plural: busconfigurations + kind: LargeMessageStore + listKind: LargeMessageStoreList + plural: largemessagestores shortNames: - - bus - singular: busconfiguration + - lms + singular: largemessagestore scope: Namespaced versions: - additionalPrinterColumns: - - description: Status of bus configuration + - description: Status of large message store jsonPath: .status.phase name: Phase type: string - - description: Age of bus configuration resource + - description: Age of large message store resource jsonPath: .metadata.creationTimestamp name: Age type: date @@ -32,7 +32,7 @@ spec: name: v4 schema: openAPIV3Schema: - description: BusConfiguration is the Schema for the busconfigurations API + description: LargeMessageStore is the Schema for the largemessagestores API properties: apiVersion: description: |- @@ -52,34 +52,41 @@ spec: metadata: type: object spec: - description: BusConfigurationSpec defines the desired state of BusConfiguration + description: LargeMessageStoreSpec defines the desired state of LargeMessageStore properties: - sqs: + provider: + description: Provider of queue resources + enum: + - s3 + type: string + s3: + description: s3 specific inputs properties: - authRegion: - type: string - deadLetterQueueName: - type: string endpoint: + description: S3-compatible Service endpoint + pattern: ^https://s3(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ type: string - largeMessageStoreEndpoint: - type: string - largeMessageStorePath: - type: string - queueName: + path: + description: S3 bucket path + pattern: ^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$ type: string + required: + - path type: object - type: - type: string + required: + - provider type: object + x-kubernetes-validations: + - message: s3 must be provided when provider is s3 + rule: self.provider != 's3' || has(self.s3) status: - description: BusConfigurationStatus defines the observed state of BusConfiguration. + description: LargeMessageStoreStatus defines the observed state of LargeMessageStore. properties: message: description: Auxillary message describing CR status type: string phase: - description: Phase of the bus configuration + description: Phase of the large message store enum: - Pending - Ready diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 679c1dc72..c8ba16418 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -11,7 +11,8 @@ resources: - bases/enterprise.splunk.com_searchheadclusters.yaml - bases/enterprise.splunk.com_standalones.yaml - bases/enterprise.splunk.com_ingestorclusters.yaml -- bases/enterprise.splunk.com_busconfigurations.yaml +- bases/enterprise.splunk.com_buses.yaml +- bases/enterprise.splunk.com_largemessagestores.yaml #+kubebuilder:scaffold:crdkustomizeresource diff --git a/config/rbac/busconfiguration_editor_role.yaml b/config/rbac/bus_editor_role.yaml similarity index 88% rename from config/rbac/busconfiguration_editor_role.yaml rename to config/rbac/bus_editor_role.yaml index fde8687f7..c08c2ce39 100644 --- a/config/rbac/busconfiguration_editor_role.yaml +++ b/config/rbac/bus_editor_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: busconfiguration-editor-role + name: bus-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - buses verbs: - create - delete @@ -25,6 +25,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - buses/status verbs: - get diff --git a/config/rbac/busconfiguration_viewer_role.yaml b/config/rbac/bus_viewer_role.yaml similarity index 87% rename from config/rbac/busconfiguration_viewer_role.yaml rename to config/rbac/bus_viewer_role.yaml index 6230863a9..6f9c42d2a 100644 --- a/config/rbac/busconfiguration_viewer_role.yaml +++ b/config/rbac/bus_viewer_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: busconfiguration-viewer-role + name: bus-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - buses verbs: - get - list @@ -21,6 +21,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - buses/status verbs: - get diff --git a/config/rbac/largemessagestore_editor_role.yaml b/config/rbac/largemessagestore_editor_role.yaml new file mode 100644 index 000000000..614d09ad2 --- /dev/null +++ b/config/rbac/largemessagestore_editor_role.yaml @@ -0,0 +1,30 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: largemessagestore-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - largemessagestores + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - largemessagestores/status + verbs: + - get diff --git a/config/rbac/largemessagestore_viewer_role.yaml b/config/rbac/largemessagestore_viewer_role.yaml new file mode 100644 index 000000000..36cfde351 --- /dev/null +++ b/config/rbac/largemessagestore_viewer_role.yaml @@ -0,0 +1,26 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: largemessagestore-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - largemessagestores + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - largemessagestores/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 78231b303..94ed9d59e 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -47,11 +47,12 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - buses - clustermanagers - clustermasters - indexerclusters - ingestorclusters + - largemessagestores - licensemanagers - licensemasters - monitoringconsoles @@ -68,11 +69,12 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/finalizers + - buses/finalizers - clustermanagers/finalizers - clustermasters/finalizers - indexerclusters/finalizers - ingestorclusters/finalizers + - largemessagestores/finalizers - licensemanagers/finalizers - licensemasters/finalizers - monitoringconsoles/finalizers @@ -83,11 +85,12 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - buses/status - clustermanagers/status - clustermasters/status - indexerclusters/status - ingestorclusters/status + - largemessagestores/status - licensemanagers/status - licensemasters/status - monitoringconsoles/status diff --git a/config/samples/enterprise_v4_busconfiguration.yaml b/config/samples/enterprise_v4_bus.yaml similarity index 72% rename from config/samples/enterprise_v4_busconfiguration.yaml rename to config/samples/enterprise_v4_bus.yaml index 0cc1aed31..51af9d05a 100644 --- a/config/samples/enterprise_v4_busconfiguration.yaml +++ b/config/samples/enterprise_v4_bus.yaml @@ -1,7 +1,7 @@ apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration +kind: Bus metadata: - name: busconfiguration-sample + name: bus-sample finalizers: - "enterprise.splunk.com/delete-pvc" spec: {} diff --git a/config/samples/enterprise_v4_largemessagestore.yaml b/config/samples/enterprise_v4_largemessagestore.yaml new file mode 100644 index 000000000..508ba0b77 --- /dev/null +++ b/config/samples/enterprise_v4_largemessagestore.yaml @@ -0,0 +1,8 @@ +apiVersion: enterprise.splunk.com/v4 +kind: LargeMessageStore +metadata: + name: largemessagestore-sample + finalizers: + - "enterprise.splunk.com/delete-pvc" +spec: {} +# TODO(user): Add fields here diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 88c71025d..1ea90a3ae 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -14,5 +14,6 @@ resources: - enterprise_v4_clustermanager.yaml - enterprise_v4_licensemanager.yaml - enterprise_v4_ingestorcluster.yaml -- enterprise_v4_busconfiguration.yaml +- enterprise_v4_bus.yaml +- enterprise_v4_largemessagestore.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/docs/CustomResources.md b/docs/CustomResources.md index 6461d4488..384153add 100644 --- a/docs/CustomResources.md +++ b/docs/CustomResources.md @@ -338,10 +338,12 @@ metadata: name: ic spec: replicas: 3 - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms ``` -Note: `busConfigurationRef` is required field in case of IngestorCluster resource since it will be used to connect the IngestorCluster to BusConfiguration resource. +Note: `busRef` and `largeMessageStoreRef` are required fields in case of IngestorCluster resource since they will be used to connect the IngestorCluster to Bus and LargeMessageStore resources. In addition to [Common Spec Parameters for All Resources](#common-spec-parameters-for-all-resources) and [Common Spec Parameters for All Splunk Enterprise Resources](#common-spec-parameters-for-all-splunk-enterprise-resources), diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index dd53922ff..3b151cc4d 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -16,13 +16,13 @@ This separation enables: - SPLUNK_IMAGE_VERSION: Splunk Enterprise Docker Image version -# BusConfiguration +# Bus -BusConfiguration is introduced to store message bus configuration to be shared among IngestorCluster and IndexerCluster. +Bus is introduced to store message bus to be shared among IngestorCluster and IndexerCluster. ## Spec -BusConfiguration inputs can be found in the table below. As of now, only SQS type of message bus is supported. +Bus inputs can be found in the table below. As of now, only SQS type of message bus is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | @@ -45,9 +45,9 @@ Change of any of the bus inputs does not restart Splunk. It just updates the con ## Example ``` apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration +kind: Bus metadata: - name: bus-config + name: bus spec: type: sqs_smartbus sqs: @@ -70,7 +70,8 @@ In addition to common spec inputs, the IngestorCluster resource provides the fol | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | -| busConfigurationRef | corev1.ObjectReference | Message bus configuration reference | +| busRef | corev1.ObjectReference | Message bus reference | +| largeMessageStoreRef | corev1.ObjectReference | Large message store reference | ## Example @@ -89,8 +90,10 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms ``` # IndexerCluster @@ -104,7 +107,8 @@ In addition to common spec inputs, the IndexerCluster resource provides the foll | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | -| busConfigurationRef | corev1.ObjectReference | Message bus configuration reference | +| busRef | corev1.ObjectReference | Message bus reference | +| largeMessageStoreRef | corev1.ObjectReference | Large message store reference | ## Example @@ -135,8 +139,10 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms ``` # Common Spec @@ -149,12 +155,12 @@ An IngestorCluster template has been added to the splunk/splunk-enterprise Helm ## Example -Below examples describe how to define values for BusConfiguration, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. +Below examples describe how to define values for Bus, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. ``` -busConfiguration:: +bus: enabled: true - name: bus-config + name: bus type: sqs_smartbus sqs: queueName: sqs-test @@ -171,8 +177,10 @@ ingestorCluster: name: ingestor replicaCount: 3 serviceAccount: ingestor-sa - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms ``` ``` @@ -189,8 +197,10 @@ indexerCluster: serviceAccount: ingestor-sa clusterManagerRef: name: cm - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms ``` # Service Account @@ -492,12 +502,12 @@ $ aws iam list-attached-role-policies --role-name eksctl-ind-ing-sep-demo-addon- } ``` -3. Install BusConfiguration resource. +3. Install Bus resource. ``` $ cat bus.yaml apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration +kind: Bus metadata: name: bus finalizers: @@ -518,19 +528,19 @@ $ kubectl apply -f bus.yaml ``` ``` -$ kubectl get busconfiguration +$ kubectl get bus NAME PHASE AGE MESSAGE bus Ready 20s ``` ``` -kubectl describe busconfiguration +kubectl describe bus Name: bus Namespace: default Labels: Annotations: API Version: enterprise.splunk.com/v4 -Kind: BusConfiguration +Kind: Bus Metadata: Creation Timestamp: 2025-10-27T10:25:53Z Finalizers: @@ -568,8 +578,10 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms ``` ``` @@ -598,8 +610,8 @@ Metadata: Resource Version: 12345678 UID: 12345678-1234-1234-1234-1234567890123 Spec: - Bus Configuration Ref: - Name: bus-config + Bus Ref: + Name: bus Namespace: default Image: splunk/splunk:${SPLUNK_IMAGE_VERSION} Replicas: 3 @@ -616,7 +628,7 @@ Status: Is Deployment In Progress: false Last App Info Check Time: 0 Version: 0 - Bus Configuration: + Bus: Sqs: Auth Region: us-west-2 Dead Letter Queue Name: sqs-dlq-test @@ -704,8 +716,10 @@ spec: clusterManagerRef: name: cm serviceAccount: ingestor-sa - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms ``` ``` diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml deleted file mode 100644 index 2a746968e..000000000 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_busconfigurations.yaml +++ /dev/null @@ -1,40 +0,0 @@ -{{- if .Values.busConfiguration }} -{{- if .Values.busConfiguration.enabled }} -apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration -metadata: - name: {{ .Values.busConfiguration.name }} - namespace: {{ default .Release.Namespace .Values.busConfiguration.namespaceOverride }} - {{- with .Values.busConfiguration.additionalLabels }} - labels: -{{ toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.busConfiguration.additionalAnnotations }} - annotations: -{{ toYaml . | nindent 4 }} - {{- end }} -spec: - type: {{ .Values.busConfiguration.type | quote }} - {{- with .Values.busConfiguration.sqs }} - sqs: - {{- if .queueName }} - queueName: {{ .queueName | quote }} - {{- end }} - {{- if .authRegion }} - authRegion: {{ .authRegion | quote }} - {{- end }} - {{- if .endpoint }} - endpoint: {{ .endpoint | quote }} - {{- end }} - {{- if .largeMessageStoreEndpoint }} - largeMessageStoreEndpoint: {{ .largeMessageStoreEndpoint | quote }} - {{- end }} - {{- if .largeMessageStorePath }} - largeMessageStorePath: {{ .largeMessageStorePath | quote }} - {{- end }} - {{- if .deadLetterQueueName }} - deadLetterQueueName: {{ .deadLetterQueueName | quote }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml new file mode 100644 index 000000000..ce1c1e7a9 --- /dev/null +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml @@ -0,0 +1,30 @@ +{{- if .Values.bus }} +{{- if .Values.bus.enabled }} +apiVersion: enterprise.splunk.com/v4 +kind: Bus +metadata: + name: {{ .Values.bus.name }} + namespace: {{ default .Release.Namespace .Values.bus.namespaceOverride }} + {{- with .Values.bus.additionalLabels }} + labels: +{{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.bus.additionalAnnotations }} + annotations: +{{ toYaml . | nindent 4 }} + {{- end }} +spec: + provider: {{ .Values.bus.provider | quote }} + queueName: {{ .Values.bus.queueName | quote }} + region: {{ .Values.bus.region | quote }} + {{- with .Values.bus.sqs }} + sqs: + {{- if .endpoint }} + endpoint: {{ .endpoint | quote }} + {{- end }} + {{- if .dlq }} + dlq: {{ .dlq | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml index 77c24d500..0e6a96673 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml @@ -163,8 +163,14 @@ items: {{ toYaml . | indent 6 }} {{- end }} {{- end }} - {{- with $.Values.indexerCluster.busConfigurationRef }} - busConfigurationRef: + {{- with $.Values.indexerCluster.busRef }} + busRef: + name: {{ .name }} + {{- if .namespace }} + namespace: {{ .namespace }} + {{- end }} + {{- with $.Values.indexerCluster.largeMessageStoreRef }} + largeMessageStoreRef: name: {{ .name }} {{- if .namespace }} namespace: {{ .namespace }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml index fd72da310..b6c1640ec 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml @@ -95,11 +95,18 @@ spec: topologySpreadConstraints: {{- toYaml . | nindent 4 }} {{- end }} - {{- with $.Values.ingestorCluster.busConfigurationRef }} - busConfigurationRef: - name: {{ $.Values.ingestorCluster.busConfigurationRef.name }} - {{- if $.Values.ingestorCluster.busConfigurationRef.namespace }} - namespace: {{ $.Values.ingestorCluster.busConfigurationRef.namespace }} + {{- with $.Values.ingestorCluster.busRef }} + busRef: + name: {{ $.Values.ingestorCluster.busRef.name }} + {{- if $.Values.ingestorCluster.busRef.namespace }} + namespace: {{ $.Values.ingestorCluster.busRef.namespace }} + {{- end }} + {{- end }} + {{- with $.Values.ingestorCluster.largeMessageStoreRef }} + largeMessageStoreRef: + name: {{ $.Values.ingestorCluster.largeMessageStoreRef.name }} + {{- if $.Values.ingestorCluster.largeMessageStoreRef.namespace }} + namespace: {{ $.Values.ingestorCluster.largeMessageStoreRef.namespace }} {{- end }} {{- end }} {{- with .Values.ingestorCluster.extraEnv }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml new file mode 100644 index 000000000..77ef09e69 --- /dev/null +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml @@ -0,0 +1,28 @@ +{{- if .Values.largemessagestore }} +{{- if .Values.largemessagestore.enabled }} +apiVersion: enterprise.splunk.com/v4 +kind: LargeMessageStore +metadata: + name: {{ .Values.largemessagestore.name }} + namespace: {{ default .Release.Namespace .Values.largemessagestore.namespaceOverride }} + {{- with .Values.largemessagestore.additionalLabels }} + labels: +{{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.largemessagestore.additionalAnnotations }} + annotations: +{{ toYaml . | nindent 4 }} + {{- end }} +spec: + provider: {{ .Values.largemessagestore.provider | quote }} + {{- with .Values.largemessagestore.s3 }} + s3: + {{- if .endpoint }} + endpoint: {{ .endpoint | quote }} + {{- end }} + {{- if .path }} + path: {{ .path | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/values.yaml b/helm-chart/splunk-enterprise/values.yaml index e49073398..a001bbead 100644 --- a/helm-chart/splunk-enterprise/values.yaml +++ b/helm-chart/splunk-enterprise/values.yaml @@ -350,7 +350,9 @@ indexerCluster: # nodeAffinityPolicy: [Honor|Ignore] # optional; beta since v1.26 # nodeTaintsPolicy: [Honor|Ignore] # optional; beta since v1.26 - busConfigurationRef: {} + busRef: {} + + largeMessageStoreRef: {} searchHeadCluster: @@ -899,4 +901,6 @@ ingestorCluster: affinity: {} - busConfigurationRef: {} \ No newline at end of file + busRef: {} + + largeMessageStoreRef: {} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/busconfiguration_editor_role.yaml b/helm-chart/splunk-operator/templates/rbac/bus_editor_role.yaml similarity index 78% rename from helm-chart/splunk-operator/templates/rbac/busconfiguration_editor_role.yaml rename to helm-chart/splunk-operator/templates/rbac/bus_editor_role.yaml index 1475add32..f285a1ca5 100644 --- a/helm-chart/splunk-operator/templates/rbac/busconfiguration_editor_role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/bus_editor_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-editor-role + name: {{ include "splunk-operator.operator.fullname" . }}-bus-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - buses verbs: - create - delete @@ -25,19 +25,19 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - buses/status verbs: - get {{- else }} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-editor-role + name: {{ include "splunk-operator.operator.fullname" . }}-bus-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - buses verbs: - create - delete @@ -49,7 +49,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - buses/status verbs: - get {{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/busconfiguration_viewer_role.yaml b/helm-chart/splunk-operator/templates/rbac/bus_viewer_role.yaml similarity index 76% rename from helm-chart/splunk-operator/templates/rbac/busconfiguration_viewer_role.yaml rename to helm-chart/splunk-operator/templates/rbac/bus_viewer_role.yaml index 500b1d100..c4381a3cc 100644 --- a/helm-chart/splunk-operator/templates/rbac/busconfiguration_viewer_role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/bus_viewer_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-viewer-role + name: {{ include "splunk-operator.operator.fullname" . }}-bus-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - buses verbs: - get - list @@ -21,19 +21,19 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - buses/status verbs: - get {{- else }} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-busconfiguration-viewer-role + name: {{ include "splunk-operator.operator.fullname" . }}-bus-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - buses verbs: - get - list @@ -41,7 +41,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - buses/status verbs: - get {{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/role.yaml b/helm-chart/splunk-operator/templates/rbac/role.yaml index 4eab5275e..61cf4ada9 100644 --- a/helm-chart/splunk-operator/templates/rbac/role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/role.yaml @@ -251,7 +251,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations + - buses verbs: - create - delete @@ -263,13 +263,39 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/finalizers + - buses/finalizers verbs: - update - apiGroups: - enterprise.splunk.com resources: - - busconfigurations/status + - buses/status + verbs: + - get + - patch + - update +- apiGroups: + - enterprise.splunk.com + resources: + - largemessagestores + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - largemessagestores/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - largemessagestores/status verbs: - get - patch diff --git a/internal/controller/busconfiguration_controller.go b/internal/controller/bus_controller.go similarity index 70% rename from internal/controller/busconfiguration_controller.go rename to internal/controller/bus_controller.go index c8519c017..b52e91991 100644 --- a/internal/controller/busconfiguration_controller.go +++ b/internal/controller/bus_controller.go @@ -36,34 +36,34 @@ import ( enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" ) -// BusConfigurationReconciler reconciles a BusConfiguration object -type BusConfigurationReconciler struct { +// BusReconciler reconciles a Bus object +type BusReconciler struct { client.Client Scheme *runtime.Scheme } -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=busconfigurations/finalizers,verbs=update +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=buses,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=buses/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=buses/finalizers,verbs=update // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // TODO(user): Modify the Reconcile function to compare the state specified by -// the BusConfiguration object against the actual cluster state, and then +// the Bus object against the actual cluster state, and then // perform operations to make the cluster state reflect the state specified by // the user. // // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile -func (r *BusConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "BusConfiguration")).Inc() - defer recordInstrumentionData(time.Now(), req, "controller", "BusConfiguration") +func (r *BusReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "Bus")).Inc() + defer recordInstrumentionData(time.Now(), req, "controller", "Bus") reqLogger := log.FromContext(ctx) - reqLogger = reqLogger.WithValues("busconfiguration", req.NamespacedName) + reqLogger = reqLogger.WithValues("bus", req.NamespacedName) - // Fetch the BusConfiguration - instance := &enterpriseApi.BusConfiguration{} + // Fetch the Bus + instance := &enterpriseApi.Bus{} err := r.Get(ctx, req.NamespacedName, instance) if err != nil { if k8serrors.IsNotFound(err) { @@ -74,20 +74,20 @@ func (r *BusConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Req return ctrl.Result{}, nil } // Error reading the object - requeue the request. - return ctrl.Result{}, errors.Wrap(err, "could not load bus configuration data") + return ctrl.Result{}, errors.Wrap(err, "could not load bus data") } // If the reconciliation is paused, requeue annotations := instance.GetAnnotations() if annotations != nil { - if _, ok := annotations[enterpriseApi.BusConfigurationPausedAnnotation]; ok { + if _, ok := annotations[enterpriseApi.BusPausedAnnotation]; ok { return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil } } reqLogger.Info("start", "CR version", instance.GetResourceVersion()) - result, err := ApplyBusConfiguration(ctx, r.Client, instance) + result, err := ApplyBus(ctx, r.Client, instance) if result.Requeue && result.RequeueAfter != 0 { reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) } @@ -95,14 +95,14 @@ func (r *BusConfigurationReconciler) Reconcile(ctx context.Context, req ctrl.Req return result, err } -var ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { - return enterprise.ApplyBusConfiguration(ctx, client, instance) +var ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { + return enterprise.ApplyBus(ctx, client, instance) } // SetupWithManager sets up the controller with the Manager. -func (r *BusConfigurationReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *BusReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&enterpriseApi.BusConfiguration{}). + For(&enterpriseApi.Bus{}). WithEventFilter(predicate.Or( common.GenerationChangedPredicate(), common.AnnotationChangedPredicate(), diff --git a/internal/controller/busconfiguration_controller_test.go b/internal/controller/bus_controller_test.go similarity index 56% rename from internal/controller/busconfiguration_controller_test.go rename to internal/controller/bus_controller_test.go index e08154211..300af1879 100644 --- a/internal/controller/busconfiguration_controller_test.go +++ b/internal/controller/bus_controller_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -var _ = Describe("BusConfiguration Controller", func() { +var _ = Describe("Bus Controller", func() { BeforeEach(func() { time.Sleep(2 * time.Second) }) @@ -43,47 +43,55 @@ var _ = Describe("BusConfiguration Controller", func() { }) - Context("BusConfiguration Management", func() { + Context("Bus Management", func() { - It("Get BusConfiguration custom resource should fail", func() { + It("Get Bus custom resource should fail", func() { namespace := "ns-splunk-bus-1" - ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - _, err := GetBusConfiguration("test", nsSpecs.Name) - Expect(err.Error()).Should(Equal("busconfigurations.enterprise.splunk.com \"test\" not found")) - + _, err := GetBus("test", nsSpecs.Name) + Expect(err.Error()).Should(Equal("buses.enterprise.splunk.com \"test\" not found")) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) - It("Create BusConfiguration custom resource with annotations should pause", func() { + It("Create Bus custom resource with annotations should pause", func() { namespace := "ns-splunk-bus-2" annotations := make(map[string]string) - annotations[enterpriseApi.BusConfigurationPausedAnnotation] = "" - ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + annotations[enterpriseApi.BusPausedAnnotation] = "" + ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - CreateBusConfiguration("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) - icSpec, _ := GetBusConfiguration("test", nsSpecs.Name) + spec := enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "smartbus-queue", + Region: "us-west-2", + SQS: enterpriseApi.SQSSpec{ + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + } + CreateBus("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + icSpec, _ := GetBus("test", nsSpecs.Name) annotations = map[string]string{} icSpec.Annotations = annotations icSpec.Status.Phase = "Ready" - UpdateBusConfiguration(icSpec, enterpriseApi.PhaseReady) - DeleteBusConfiguration("test", nsSpecs.Name) + UpdateBus(icSpec, enterpriseApi.PhaseReady, spec) + DeleteBus("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) - It("Create BusConfiguration custom resource should succeeded", func() { + It("Create Bus custom resource should succeeded", func() { namespace := "ns-splunk-bus-3" - ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -91,14 +99,23 @@ var _ = Describe("BusConfiguration Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) annotations := make(map[string]string) - CreateBusConfiguration("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) - DeleteBusConfiguration("test", nsSpecs.Name) + spec := enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "smartbus-queue", + Region: "us-west-2", + SQS: enterpriseApi.SQSSpec{ + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + } + CreateBus("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + DeleteBus("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) It("Cover Unused methods", func() { namespace := "ns-splunk-bus-4" - ApplyBusConfiguration = func(ctx context.Context, client client.Client, instance *enterpriseApi.BusConfiguration) (reconcile.Result, error) { + ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -108,7 +125,7 @@ var _ = Describe("BusConfiguration Controller", func() { ctx := context.TODO() builder := fake.NewClientBuilder() c := builder.Build() - instance := BusConfigurationReconciler{ + instance := BusReconciler{ Client: c, Scheme: scheme.Scheme, } @@ -121,11 +138,20 @@ var _ = Describe("BusConfiguration Controller", func() { _, err := instance.Reconcile(ctx, request) Expect(err).ToNot(HaveOccurred()) - bcSpec := testutils.NewBusConfiguration("test", namespace, "image") + spec := enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "smartbus-queue", + Region: "us-west-2", + SQS: enterpriseApi.SQSSpec{ + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + } + bcSpec := testutils.NewBus("test", namespace, spec) Expect(c.Create(ctx, bcSpec)).Should(Succeed()) annotations := make(map[string]string) - annotations[enterpriseApi.BusConfigurationPausedAnnotation] = "" + annotations[enterpriseApi.BusPausedAnnotation] = "" bcSpec.Annotations = annotations Expect(c.Update(ctx, bcSpec)).Should(Succeed()) @@ -147,86 +173,87 @@ var _ = Describe("BusConfiguration Controller", func() { }) }) -func GetBusConfiguration(name string, namespace string) (*enterpriseApi.BusConfiguration, error) { - By("Expecting BusConfiguration custom resource to be retrieved successfully") +func GetBus(name string, namespace string) (*enterpriseApi.Bus, error) { + By("Expecting Bus custom resource to be retrieved successfully") key := types.NamespacedName{ Name: name, Namespace: namespace, } - bc := &enterpriseApi.BusConfiguration{} + b := &enterpriseApi.Bus{} - err := k8sClient.Get(context.Background(), key, bc) + err := k8sClient.Get(context.Background(), key, b) if err != nil { return nil, err } - return bc, err + return b, err } -func CreateBusConfiguration(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase) *enterpriseApi.BusConfiguration { - By("Expecting BusConfiguration custom resource to be created successfully") +func CreateBus(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.BusSpec) *enterpriseApi.Bus { + By("Expecting Bus custom resource to be created successfully") key := types.NamespacedName{ Name: name, Namespace: namespace, } - ingSpec := &enterpriseApi.BusConfiguration{ + ingSpec := &enterpriseApi.Bus{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Annotations: annotations, }, + Spec: spec, } Expect(k8sClient.Create(context.Background(), ingSpec)).Should(Succeed()) time.Sleep(2 * time.Second) - bc := &enterpriseApi.BusConfiguration{} + b := &enterpriseApi.Bus{} Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, bc) + _ = k8sClient.Get(context.Background(), key, b) if status != "" { fmt.Printf("status is set to %v", status) - bc.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), bc)).Should(Succeed()) + b.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), b)).Should(Succeed()) time.Sleep(2 * time.Second) } return true }, timeout, interval).Should(BeTrue()) - return bc + return b } -func UpdateBusConfiguration(instance *enterpriseApi.BusConfiguration, status enterpriseApi.Phase) *enterpriseApi.BusConfiguration { - By("Expecting BusConfiguration custom resource to be updated successfully") +func UpdateBus(instance *enterpriseApi.Bus, status enterpriseApi.Phase, spec enterpriseApi.BusSpec) *enterpriseApi.Bus { + By("Expecting Bus custom resource to be updated successfully") key := types.NamespacedName{ Name: instance.Name, Namespace: instance.Namespace, } - bcSpec := testutils.NewBusConfiguration(instance.Name, instance.Namespace, "image") - bcSpec.ResourceVersion = instance.ResourceVersion - Expect(k8sClient.Update(context.Background(), bcSpec)).Should(Succeed()) + bSpec := testutils.NewBus(instance.Name, instance.Namespace, spec) + bSpec.ResourceVersion = instance.ResourceVersion + Expect(k8sClient.Update(context.Background(), bSpec)).Should(Succeed()) time.Sleep(2 * time.Second) - bc := &enterpriseApi.BusConfiguration{} + b := &enterpriseApi.Bus{} Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, bc) + _ = k8sClient.Get(context.Background(), key, b) if status != "" { fmt.Printf("status is set to %v", status) - bc.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), bc)).Should(Succeed()) + b.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), b)).Should(Succeed()) time.Sleep(2 * time.Second) } return true }, timeout, interval).Should(BeTrue()) - return bc + return b } -func DeleteBusConfiguration(name string, namespace string) { - By("Expecting BusConfiguration custom resource to be deleted successfully") +func DeleteBus(name string, namespace string) { + By("Expecting Bus custom resource to be deleted successfully") key := types.NamespacedName{ Name: name, @@ -234,9 +261,9 @@ func DeleteBusConfiguration(name string, namespace string) { } Eventually(func() error { - bc := &enterpriseApi.BusConfiguration{} - _ = k8sClient.Get(context.Background(), key, bc) - err := k8sClient.Delete(context.Background(), bc) + b := &enterpriseApi.Bus{} + _ = k8sClient.Get(context.Background(), key, b) + err := k8sClient.Delete(context.Background(), b) return err }, timeout, interval).Should(Succeed()) } diff --git a/internal/controller/indexercluster_controller.go b/internal/controller/indexercluster_controller.go index 3cc840baa..676f81d23 100644 --- a/internal/controller/indexercluster_controller.go +++ b/internal/controller/indexercluster_controller.go @@ -172,9 +172,9 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IndexerCluster{}, )). - Watches(&enterpriseApi.BusConfiguration{}, + Watches(&enterpriseApi.Bus{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - bc, ok := obj.(*enterpriseApi.BusConfiguration) + b, ok := obj.(*enterpriseApi.Bus) if !ok { return nil } @@ -184,11 +184,39 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { } var reqs []reconcile.Request for _, ic := range list.Items { - ns := ic.Spec.BusConfigurationRef.Namespace + ns := ic.Spec.BusRef.Namespace if ns == "" { ns = ic.Namespace } - if ic.Spec.BusConfigurationRef.Name == bc.Name && ns == bc.Namespace { + if ic.Spec.BusRef.Name == b.Name && ns == b.Namespace { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + } + } + return reqs + }), + ). + Watches(&enterpriseApi.LargeMessageStore{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + lms, ok := obj.(*enterpriseApi.LargeMessageStore) + if !ok { + return nil + } + var list enterpriseApi.IndexerClusterList + if err := r.Client.List(ctx, &list); err != nil { + return nil + } + var reqs []reconcile.Request + for _, ic := range list.Items { + ns := ic.Spec.LargeMessageStoreRef.Namespace + if ns == "" { + ns = ic.Namespace + } + if ic.Spec.LargeMessageStoreRef.Name == lms.Name && ns == lms.Namespace { reqs = append(reqs, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ic.Name, diff --git a/internal/controller/ingestorcluster_controller.go b/internal/controller/ingestorcluster_controller.go index a2c5846df..1df81eb78 100644 --- a/internal/controller/ingestorcluster_controller.go +++ b/internal/controller/ingestorcluster_controller.go @@ -141,9 +141,9 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IngestorCluster{}, )). - Watches(&enterpriseApi.BusConfiguration{}, + Watches(&enterpriseApi.Bus{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - bc, ok := obj.(*enterpriseApi.BusConfiguration) + b, ok := obj.(*enterpriseApi.Bus) if !ok { return nil } @@ -153,11 +153,39 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { } var reqs []reconcile.Request for _, ic := range list.Items { - ns := ic.Spec.BusConfigurationRef.Namespace + ns := ic.Spec.BusRef.Namespace if ns == "" { ns = ic.Namespace } - if ic.Spec.BusConfigurationRef.Name == bc.Name && ns == bc.Namespace { + if ic.Spec.BusRef.Name == b.Name && ns == b.Namespace { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + } + } + return reqs + }), + ). + Watches(&enterpriseApi.LargeMessageStore{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + lms, ok := obj.(*enterpriseApi.LargeMessageStore) + if !ok { + return nil + } + var list enterpriseApi.IndexerClusterList + if err := r.Client.List(ctx, &list); err != nil { + return nil + } + var reqs []reconcile.Request + for _, ic := range list.Items { + ns := ic.Spec.LargeMessageStoreRef.Namespace + if ns == "" { + ns = ic.Namespace + } + if ic.Spec.LargeMessageStoreRef.Name == lms.Name && ns == lms.Namespace { reqs = append(reqs, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ic.Name, diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go index 5e7ae4b73..811ca930a 100644 --- a/internal/controller/ingestorcluster_controller_test.go +++ b/internal/controller/ingestorcluster_controller_test.go @@ -71,7 +71,35 @@ var _ = Describe("IngestorCluster Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) + bus := &enterpriseApi.Bus{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bus", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "smartbus-queue", + Region: "us-west-2", + SQS: enterpriseApi.SQSSpec{ + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + }, + } + lms := &enterpriseApi.LargeMessageStore{ + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + }, + } + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, lms, bus) icSpec, _ := GetIngestorCluster("test", nsSpecs.Name) annotations = map[string]string{} icSpec.Annotations = annotations @@ -91,7 +119,35 @@ var _ = Describe("IngestorCluster Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) annotations := make(map[string]string) - CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady) + bus := &enterpriseApi.Bus{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bus", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "smartbus-queue", + Region: "us-west-2", + SQS: enterpriseApi.SQSSpec{ + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + }, + } + lms := &enterpriseApi.LargeMessageStore{ + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + }, + } + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, lms, bus) DeleteIngestorCluster("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) @@ -164,7 +220,7 @@ func GetIngestorCluster(name string, namespace string) (*enterpriseApi.IngestorC return ic, err } -func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase) *enterpriseApi.IngestorCluster { +func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, lms *enterpriseApi.LargeMessageStore, bus *enterpriseApi.Bus) *enterpriseApi.IngestorCluster { By("Expecting IngestorCluster custom resource to be created successfully") key := types.NamespacedName{ @@ -184,8 +240,13 @@ func CreateIngestorCluster(name string, namespace string, annotations map[string }, }, Replicas: 3, - BusConfigurationRef: corev1.ObjectReference{ - Name: "busConfig", + BusRef: corev1.ObjectReference{ + Name: bus.Name, + Namespace: bus.Namespace, + }, + LargeMessageStoreRef: corev1.ObjectReference{ + Name: lms.Name, + Namespace: lms.Namespace, }, }, } diff --git a/internal/controller/largemessagestore_controller.go b/internal/controller/largemessagestore_controller.go new file mode 100644 index 000000000..69a4af131 --- /dev/null +++ b/internal/controller/largemessagestore_controller.go @@ -0,0 +1,120 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "time" + + k8serrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/pkg/errors" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/internal/controller/common" + metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics" + enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" +) + +// LargeMessageStoreReconciler reconciles a LargeMessageStore object +type LargeMessageStoreReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=largemessagestores,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=largemessagestores/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=largemessagestores/finalizers,verbs=update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the LargeMessageStore object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile +func (r *LargeMessageStoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "LargeMessageStore")).Inc() + defer recordInstrumentionData(time.Now(), req, "controller", "LargeMessageStore") + + reqLogger := log.FromContext(ctx) + reqLogger = reqLogger.WithValues("largemessagestore", req.NamespacedName) + + // Fetch the LargeMessageStore + instance := &enterpriseApi.LargeMessageStore{} + err := r.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8serrors.IsNotFound(err) { + // Request object not found, could have been deleted after + // reconcile request. Owned objects are automatically + // garbage collected. For additional cleanup logic use + // finalizers. Return and don't requeue + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + return ctrl.Result{}, errors.Wrap(err, "could not load largemessagestore data") + } + + // If the reconciliation is paused, requeue + annotations := instance.GetAnnotations() + if annotations != nil { + if _, ok := annotations[enterpriseApi.LargeMessageStorePausedAnnotation]; ok { + return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil + } + } + + reqLogger.Info("start", "CR version", instance.GetResourceVersion()) + + result, err := ApplyLargeMessageStore(ctx, r.Client, instance) + if result.Requeue && result.RequeueAfter != 0 { + reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) + } + + return result, err +} + +var ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + return enterprise.ApplyLargeMessageStore(ctx, client, instance) +} + +// SetupWithManager sets up the controller with the Manager. +func (r *LargeMessageStoreReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&enterpriseApi.LargeMessageStore{}). + WithEventFilter(predicate.Or( + common.GenerationChangedPredicate(), + common.AnnotationChangedPredicate(), + common.LabelChangedPredicate(), + common.SecretChangedPredicate(), + common.ConfigMapChangedPredicate(), + common.StatefulsetChangedPredicate(), + common.PodChangedPredicate(), + common.CrdChangedPredicate(), + )). + WithOptions(controller.Options{ + MaxConcurrentReconciles: enterpriseApi.TotalWorker, + }). + Complete(r) +} diff --git a/internal/controller/largemessagestore_controller_test.go b/internal/controller/largemessagestore_controller_test.go new file mode 100644 index 000000000..5d85d4409 --- /dev/null +++ b/internal/controller/largemessagestore_controller_test.go @@ -0,0 +1,263 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controller + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/splunk/splunk-operator/internal/controller/testutils" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +var _ = Describe("LargeMessageStore Controller", func() { + BeforeEach(func() { + time.Sleep(2 * time.Second) + }) + + AfterEach(func() { + + }) + + Context("LargeMessageStore Management", func() { + + It("Get LargeMessageStore custom resource should fail", func() { + namespace := "ns-splunk-largemessagestore-1" + ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + _, err := GetLargeMessageStore("test", nsSpecs.Name) + Expect(err.Error()).Should(Equal("largemessagestores.enterprise.splunk.com \"test\" not found")) + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Create LargeMessageStore custom resource with annotations should pause", func() { + namespace := "ns-splunk-largemessagestore-2" + annotations := make(map[string]string) + annotations[enterpriseApi.LargeMessageStorePausedAnnotation] = "" + ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + spec := enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + } + CreateLargeMessageStore("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + icSpec, _ := GetLargeMessageStore("test", nsSpecs.Name) + annotations = map[string]string{} + icSpec.Annotations = annotations + icSpec.Status.Phase = "Ready" + UpdateLargeMessageStore(icSpec, enterpriseApi.PhaseReady, spec) + DeleteLargeMessageStore("test", nsSpecs.Name) + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Create LargeMessageStore custom resource should succeeded", func() { + namespace := "ns-splunk-largemessagestore-3" + ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + annotations := make(map[string]string) + spec := enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + } + CreateLargeMessageStore("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + DeleteLargeMessageStore("test", nsSpecs.Name) + Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) + }) + + It("Cover Unused methods", func() { + namespace := "ns-splunk-largemessagestore-4" + ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + return reconcile.Result{}, nil + } + nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} + + Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + + ctx := context.TODO() + builder := fake.NewClientBuilder() + c := builder.Build() + instance := LargeMessageStoreReconciler{ + Client: c, + Scheme: scheme.Scheme, + } + request := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "test", + Namespace: namespace, + }, + } + _, err := instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + spec := enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + } + lmsSpec := testutils.NewLargeMessageStore("test", namespace, spec) + Expect(c.Create(ctx, lmsSpec)).Should(Succeed()) + + annotations := make(map[string]string) + annotations[enterpriseApi.LargeMessageStorePausedAnnotation] = "" + lmsSpec.Annotations = annotations + Expect(c.Update(ctx, lmsSpec)).Should(Succeed()) + + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + annotations = map[string]string{} + lmsSpec.Annotations = annotations + Expect(c.Update(ctx, lmsSpec)).Should(Succeed()) + + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + + lmsSpec.DeletionTimestamp = &metav1.Time{} + _, err = instance.Reconcile(ctx, request) + Expect(err).ToNot(HaveOccurred()) + }) + + }) +}) + +func GetLargeMessageStore(name string, namespace string) (*enterpriseApi.LargeMessageStore, error) { + By("Expecting LargeMessageStore custom resource to be retrieved successfully") + + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + lms := &enterpriseApi.LargeMessageStore{} + + err := k8sClient.Get(context.Background(), key, lms) + if err != nil { + return nil, err + } + + return lms, err +} + +func CreateLargeMessageStore(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.LargeMessageStoreSpec) *enterpriseApi.LargeMessageStore { + By("Expecting LargeMessageStore custom resource to be created successfully") + + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + lmsSpec := &enterpriseApi.LargeMessageStore{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Annotations: annotations, + }, + Spec: spec, + } + + Expect(k8sClient.Create(context.Background(), lmsSpec)).Should(Succeed()) + time.Sleep(2 * time.Second) + + lms := &enterpriseApi.LargeMessageStore{} + Eventually(func() bool { + _ = k8sClient.Get(context.Background(), key, lms) + if status != "" { + fmt.Printf("status is set to %v", status) + lms.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), lms)).Should(Succeed()) + time.Sleep(2 * time.Second) + } + return true + }, timeout, interval).Should(BeTrue()) + + return lms +} + +func UpdateLargeMessageStore(instance *enterpriseApi.LargeMessageStore, status enterpriseApi.Phase, spec enterpriseApi.LargeMessageStoreSpec) *enterpriseApi.LargeMessageStore { + By("Expecting LargeMessageStore custom resource to be updated successfully") + + key := types.NamespacedName{ + Name: instance.Name, + Namespace: instance.Namespace, + } + + lmsSpec := testutils.NewLargeMessageStore(instance.Name, instance.Namespace, spec) + lmsSpec.ResourceVersion = instance.ResourceVersion + Expect(k8sClient.Update(context.Background(), lmsSpec)).Should(Succeed()) + time.Sleep(2 * time.Second) + + lms := &enterpriseApi.LargeMessageStore{} + Eventually(func() bool { + _ = k8sClient.Get(context.Background(), key, lms) + if status != "" { + fmt.Printf("status is set to %v", status) + lms.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), lms)).Should(Succeed()) + time.Sleep(2 * time.Second) + } + return true + }, timeout, interval).Should(BeTrue()) + + return lms +} + +func DeleteLargeMessageStore(name string, namespace string) { + By("Expecting LargeMessageStore custom resource to be deleted successfully") + + key := types.NamespacedName{ + Name: name, + Namespace: namespace, + } + + Eventually(func() error { + lms := &enterpriseApi.LargeMessageStore{} + _ = k8sClient.Get(context.Background(), key, lms) + err := k8sClient.Delete(context.Background(), lms) + return err + }, timeout, interval).Should(Succeed()) +} diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 52c4c1a1d..17ce5e760 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -50,7 +50,6 @@ func TestAPIs(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Controller Suite") - } var _ = BeforeSuite(func(ctx context.Context) { @@ -99,6 +98,12 @@ var _ = BeforeSuite(func(ctx context.Context) { Scheme: clientgoscheme.Scheme, }) Expect(err).ToNot(HaveOccurred()) + if err := (&BusReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager); err != nil { + Expect(err).NotTo(HaveOccurred()) + } if err := (&ClusterManagerReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), @@ -117,37 +122,43 @@ var _ = BeforeSuite(func(ctx context.Context) { }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&LicenseManagerReconciler{ + if err := (&IngestorClusterReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&LicenseMasterReconciler{ + if err := (&LargeMessageStoreReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&MonitoringConsoleReconciler{ + if err := (&LicenseManagerReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&SearchHeadClusterReconciler{ + if err := (&LicenseMasterReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&StandaloneReconciler{ + if err := (&MonitoringConsoleReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&IngestorClusterReconciler{ + if err := (&SearchHeadClusterReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager); err != nil { + Expect(err).NotTo(HaveOccurred()) + } + if err := (&StandaloneReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { diff --git a/internal/controller/testutils/new.go b/internal/controller/testutils/new.go index 9ca78593c..e3e37efc2 100644 --- a/internal/controller/testutils/new.go +++ b/internal/controller/testutils/new.go @@ -54,28 +54,26 @@ func NewIngestorCluster(name, ns, image string) *enterpriseApi.IngestorCluster { Spec: enterpriseApi.Spec{ImagePullPolicy: string(pullPolicy)}, }, Replicas: 3, - BusConfigurationRef: corev1.ObjectReference{ - Name: "busConfig", + BusRef: corev1.ObjectReference{ + Name: "bus", }, }, } } -// NewBusConfiguration returns new BusConfiguration instance with its config hash -func NewBusConfiguration(name, ns, image string) *enterpriseApi.BusConfiguration { - return &enterpriseApi.BusConfiguration{ +// NewBus returns new Bus instance with its config hash +func NewBus(name, ns string, spec enterpriseApi.BusSpec) *enterpriseApi.Bus { + return &enterpriseApi.Bus{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - }, - }, + Spec: spec, + } +} + +// NewLargeMessageStore returns new LargeMessageStore instance with its config hash +func NewLargeMessageStore(name, ns string, spec enterpriseApi.LargeMessageStoreSpec) *enterpriseApi.LargeMessageStore { + return &enterpriseApi.LargeMessageStore{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, + Spec: spec, } } @@ -313,9 +311,6 @@ func NewIndexerCluster(name, ns, image string) *enterpriseApi.IndexerCluster { ad.Spec = enterpriseApi.IndexerClusterSpec{ CommonSplunkSpec: *cs, - BusConfigurationRef: corev1.ObjectReference{ - Name: "busConfig", - }, } return ad } diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml index 5ac9b4a7a..001a78ee4 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml @@ -1,21 +1,33 @@ --- -# assert for bus configurtion custom resource to be ready +# assert for bus custom resource to be ready apiVersion: enterprise.splunk.com/v4 -kind: BusConfiguration +kind: Bus metadata: - name: bus-config + name: bus spec: - type: sqs_smartbus + provider: sqs + queueName: sqs-test + region: us-west-2 sqs: - queueName: sqs-test - authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test deadLetterQueueName: sqs-dlq-test status: phase: Ready +--- +# assert for large message store custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: LargeMessageStore +metadata: + name: lms +spec: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test +status: + phase: Ready + --- # assert for cluster manager custom resource to be ready apiVersion: enterprise.splunk.com/v4 @@ -49,20 +61,23 @@ metadata: name: indexer spec: replicas: 3 - busConfigurationRef: - name: bus-config + busRef: + name: bus status: phase: Ready - busConfiguration: - type: sqs_smartbus + bus: + provider: sqs + queueName: sqs-test + region: us-west-2 sqs: - queueName: sqs-test - authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test deadLetterQueueName: sqs-dlq-test - + largeMessageStore: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test + --- # check for stateful set and replicas as configured apiVersion: apps/v1 @@ -87,19 +102,22 @@ metadata: name: ingestor spec: replicas: 3 - busConfigurationRef: - name: bus-config + busRef: + name: bus status: phase: Ready - busConfiguration: - type: sqs_smartbus + bus: + provider: sqs + queueName: sqs-test + region: us-west-2 sqs: - queueName: sqs-test - authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test deadLetterQueueName: sqs-dlq-test + largeMessageStore: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test --- # check for stateful set and replicas as configured diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index daa1ab4ab..86a2df8a8 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -6,19 +6,22 @@ metadata: name: ingestor spec: replicas: 4 - busConfigurationRef: - name: bus-config + busRef: + name: bus status: phase: Ready - busConfiguration: - type: sqs_smartbus + bus: + provider: sqs + queueName: sqs-test + region: us-west-2 sqs: - queueName: sqs-test - authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test deadLetterQueueName: sqs-dlq-test + largeMessageStore: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test --- # check for stateful sets and replicas updated diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index 6e87733cc..d832c5253 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -5,24 +5,32 @@ splunk-operator: persistentVolumeClaim: storageClassName: gp2 -busConfiguration: +bus: enabled: true - name: bus-config - type: sqs_smartbus + name: bus + provider: sqs + queueName: sqs-test + region: us-west-2 sqs: - queueName: sqs-test - authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test deadLetterQueueName: sqs-dlq-test +largeMessageStore: + enabled: true + name: lms + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test + ingestorCluster: enabled: true name: ingestor replicaCount: 3 - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms clusterManager: enabled: true @@ -35,5 +43,7 @@ indexerCluster: replicaCount: 3 clusterManagerRef: name: cm - busConfigurationRef: - name: bus-config + busRef: + name: bus + largeMessageStoreRef: + name: lms diff --git a/pkg/splunk/enterprise/bus.go b/pkg/splunk/enterprise/bus.go new file mode 100644 index 000000000..b6e8318ed --- /dev/null +++ b/pkg/splunk/enterprise/bus.go @@ -0,0 +1,75 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "time" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ApplyBus reconciles the state of an IngestorCluster custom resource +func ApplyBus(ctx context.Context, client client.Client, cr *enterpriseApi.Bus) (reconcile.Result, error) { + var err error + + // Unless modified, reconcile for this object will be requeued after 5 seconds + result := reconcile.Result{ + Requeue: true, + RequeueAfter: time.Second * 5, + } + + if cr.Status.ResourceRevMap == nil { + cr.Status.ResourceRevMap = make(map[string]string) + } + + eventPublisher, _ := newK8EventPublisher(client, cr) + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + cr.Kind = "Bus" + + // Initialize phase + cr.Status.Phase = enterpriseApi.PhaseError + + // Update the CR Status + defer updateCRStatus(ctx, client, cr, &err) + + // Check if deletion has been requested + if cr.ObjectMeta.DeletionTimestamp != nil { + terminating, err := splctrl.CheckForDeletion(ctx, cr, client) + if terminating && err != nil { + cr.Status.Phase = enterpriseApi.PhaseTerminating + } else { + result.Requeue = false + } + return result, err + } + + cr.Status.Phase = enterpriseApi.PhaseReady + + // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. + // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. + if !result.Requeue { + result.RequeueAfter = 0 + } + + return result, nil +} diff --git a/pkg/splunk/enterprise/bus_test.go b/pkg/splunk/enterprise/bus_test.go new file mode 100644 index 000000000..ac8ce8a8e --- /dev/null +++ b/pkg/splunk/enterprise/bus_test.go @@ -0,0 +1,69 @@ +/* +Copyright 2025. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "os" + "testing" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func TestApplyBus(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + ctx := context.TODO() + + scheme := runtime.NewScheme() + _ = enterpriseApi.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Object definitions + bus := &enterpriseApi.Bus{ + TypeMeta: metav1.TypeMeta{ + Kind: "Bus", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "bus", + Namespace: "test", + }, + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", + SQS: enterpriseApi.SQSSpec{ + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + c.Create(ctx, bus) + + // ApplyBus + result, err := ApplyBus(ctx, c, bus) + assert.NoError(t, err) + assert.True(t, result.Requeue) + assert.NotEqual(t, enterpriseApi.PhaseError, bus.Status.Phase) + assert.Equal(t, enterpriseApi.PhaseReady, bus.Status.Phase) +} diff --git a/pkg/splunk/enterprise/busconfiguration.go b/pkg/splunk/enterprise/busconfiguration.go deleted file mode 100644 index 43fd35f68..000000000 --- a/pkg/splunk/enterprise/busconfiguration.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package enterprise - -import ( - "context" - "errors" - "fmt" - "strings" - "time" - - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" - splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -// ApplyBusConfiguration reconciles the state of an IngestorCluster custom resource -func ApplyBusConfiguration(ctx context.Context, client client.Client, cr *enterpriseApi.BusConfiguration) (reconcile.Result, error) { - var err error - - // Unless modified, reconcile for this object will be requeued after 5 seconds - result := reconcile.Result{ - Requeue: true, - RequeueAfter: time.Second * 5, - } - - reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("ApplyBusConfiguration") - - if cr.Status.ResourceRevMap == nil { - cr.Status.ResourceRevMap = make(map[string]string) - } - - eventPublisher, _ := newK8EventPublisher(client, cr) - ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) - - cr.Kind = "BusConfiguration" - - // Initialize phase - cr.Status.Phase = enterpriseApi.PhaseError - - // Update the CR Status - defer updateCRStatus(ctx, client, cr, &err) - - // Validate and updates defaults for CR - err = validateBusConfigurationSpec(ctx, client, cr) - if err != nil { - eventPublisher.Warning(ctx, "validateBusConfigurationSpec", fmt.Sprintf("validate bus configuration spec failed %s", err.Error())) - scopedLog.Error(err, "Failed to validate bus configuration spec") - return result, err - } - - // Check if deletion has been requested - if cr.ObjectMeta.DeletionTimestamp != nil { - terminating, err := splctrl.CheckForDeletion(ctx, cr, client) - if terminating && err != nil { - cr.Status.Phase = enterpriseApi.PhaseTerminating - } else { - result.Requeue = false - } - return result, err - } - - cr.Status.Phase = enterpriseApi.PhaseReady - - // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. - // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. - if !result.Requeue { - result.RequeueAfter = 0 - } - - return result, nil -} - -// validateBusConfigurationSpec checks validity and makes default updates to a BusConfigurationSpec and returns error if something is wrong -func validateBusConfigurationSpec(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.BusConfiguration) error { - return validateBusConfigurationInputs(cr) -} - -func validateBusConfigurationInputs(cr *enterpriseApi.BusConfiguration) error { - // sqs_smartbus type is supported for now - if cr.Spec.Type != "sqs_smartbus" { - return errors.New("only sqs_smartbus type is supported in bus configuration") - } - - // Cannot be empty fields check - cannotBeEmptyFields := []string{} - if cr.Spec.SQS.QueueName == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "queueName") - } - - if cr.Spec.SQS.AuthRegion == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "authRegion") - } - - if cr.Spec.SQS.DeadLetterQueueName == "" { - cannotBeEmptyFields = append(cannotBeEmptyFields, "deadLetterQueueName") - } - - if len(cannotBeEmptyFields) > 0 { - return errors.New("bus configuration sqs " + strings.Join(cannotBeEmptyFields, ", ") + " cannot be empty") - } - - // Have to start with https:// or s3:// checks - haveToStartWithHttps := []string{} - if !strings.HasPrefix(cr.Spec.SQS.Endpoint, "https://") { - haveToStartWithHttps = append(haveToStartWithHttps, "endpoint") - } - - if !strings.HasPrefix(cr.Spec.SQS.LargeMessageStoreEndpoint, "https://") { - haveToStartWithHttps = append(haveToStartWithHttps, "largeMessageStoreEndpoint") - } - - if len(haveToStartWithHttps) > 0 { - return errors.New("bus configuration sqs " + strings.Join(haveToStartWithHttps, ", ") + " must start with https://") - } - - if !strings.HasPrefix(cr.Spec.SQS.LargeMessageStorePath, "s3://") { - return errors.New("bus configuration sqs largeMessageStorePath must start with s3://") - } - - return nil -} diff --git a/pkg/splunk/enterprise/busconfiguration_test.go b/pkg/splunk/enterprise/busconfiguration_test.go deleted file mode 100644 index 45d19bb40..000000000 --- a/pkg/splunk/enterprise/busconfiguration_test.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2025. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package enterprise - -import ( - "context" - "os" - "path/filepath" - "testing" - - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/stretchr/testify/assert" - appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func init() { - GetReadinessScriptLocation = func() string { - fileLocation, _ := filepath.Abs("../../../" + readinessScriptLocation) - return fileLocation - } - GetLivenessScriptLocation = func() string { - fileLocation, _ := filepath.Abs("../../../" + livenessScriptLocation) - return fileLocation - } - GetStartupScriptLocation = func() string { - fileLocation, _ := filepath.Abs("../../../" + startupScriptLocation) - return fileLocation - } -} - -func TestApplyBusConfiguration(t *testing.T) { - os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") - - ctx := context.TODO() - - scheme := runtime.NewScheme() - _ = enterpriseApi.AddToScheme(scheme) - _ = corev1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - c := fake.NewClientBuilder().WithScheme(scheme).Build() - - // Object definitions - busConfig := &enterpriseApi.BusConfiguration{ - TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", - APIVersion: "enterprise.splunk.com/v4", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", - Namespace: "test", - }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", - SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", - }, - }, - } - c.Create(ctx, busConfig) - - // ApplyBusConfiguration - result, err := ApplyBusConfiguration(ctx, c, busConfig) - assert.NoError(t, err) - assert.True(t, result.Requeue) - assert.NotEqual(t, enterpriseApi.PhaseError, busConfig.Status.Phase) - assert.Equal(t, enterpriseApi.PhaseReady, busConfig.Status.Phase) -} - -func TestValidateBusConfigurationInputs(t *testing.T) { - busConfig := enterpriseApi.BusConfiguration{ - TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", - APIVersion: "enterprise.splunk.com/v4", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", - }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "othertype", - SQS: enterpriseApi.SQSSpec{}, - }, - } - - err := validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "only sqs_smartbus type is supported in bus configuration", err.Error()) - - busConfig.Spec.Type = "sqs_smartbus" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs queueName, authRegion, deadLetterQueueName cannot be empty", err.Error()) - - busConfig.Spec.SQS.AuthRegion = "us-west-2" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs queueName, deadLetterQueueName cannot be empty", err.Error()) - - busConfig.Spec.SQS.QueueName = "test-queue" - busConfig.Spec.SQS.DeadLetterQueueName = "dlq-test" - busConfig.Spec.SQS.AuthRegion = "" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs authRegion cannot be empty", err.Error()) - - busConfig.Spec.SQS.AuthRegion = "us-west-2" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs endpoint, largeMessageStoreEndpoint must start with https://", err.Error()) - - busConfig.Spec.SQS.Endpoint = "https://sqs.us-west-2.amazonaws.com" - busConfig.Spec.SQS.LargeMessageStoreEndpoint = "https://s3.us-west-2.amazonaws.com" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs largeMessageStorePath must start with s3://", err.Error()) - - busConfig.Spec.SQS.LargeMessageStorePath = "ingestion/smartbus-test" - - err = validateBusConfigurationInputs(&busConfig) - assert.NotNil(t, err) - assert.Equal(t, "bus configuration sqs largeMessageStorePath must start with s3://", err.Error()) - - busConfig.Spec.SQS.LargeMessageStorePath = "s3://ingestion/smartbus-test" - - err = validateBusConfigurationInputs(&busConfig) - assert.Nil(t, err) -} diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 74b1b0a91..7b8009cdd 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -78,7 +78,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // updates status after function completes cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + cr.Status.Bus = &enterpriseApi.BusSpec{} } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -245,35 +245,51 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // no need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus config - busConfig := enterpriseApi.BusConfiguration{} - if cr.Spec.BusConfigurationRef.Name != "" { + // Bus + bus := enterpriseApi.Bus{} + if cr.Spec.BusRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusConfigurationRef.Namespace != "" { - ns = cr.Spec.BusConfigurationRef.Namespace + if cr.Spec.BusRef.Namespace != "" { + ns = cr.Spec.BusRef.Namespace } err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.BusConfigurationRef.Name, + Name: cr.Spec.BusRef.Name, Namespace: ns, - }, &busConfig) + }, &bus) if err != nil { return result, err } } - // If bus config is updated - if cr.Spec.BusConfigurationRef.Name != "" { - if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { + // Large Message Store + lms := enterpriseApi.LargeMessageStore{} + if cr.Spec.LargeMessageStoreRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.LargeMessageStoreRef.Namespace != "" { + ns = cr.Spec.LargeMessageStoreRef.Namespace + } + err = client.Get(context.Background(), types.NamespacedName{ + Name: cr.Spec.LargeMessageStoreRef.Name, + Namespace: ns, + }, &lms) + if err != nil { + return result, err + } + } + + // If bus is updated + if cr.Spec.BusRef.Name != "" { + if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, busConfig, client) + err = mgr.handlePullBusChange(ctx, cr, bus, lms, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") return result, err } - cr.Status.BusConfiguration = busConfig.Spec + cr.Status.Bus = &bus.Spec } } @@ -366,7 +382,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr.Status.Phase = enterpriseApi.PhaseError cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + cr.Status.Bus = &enterpriseApi.BusSpec{} } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -536,35 +552,51 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // no need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus config - busConfig := enterpriseApi.BusConfiguration{} - if cr.Spec.BusConfigurationRef.Name != "" { + // Bus + bus := enterpriseApi.Bus{} + if cr.Spec.BusRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusConfigurationRef.Namespace != "" { - ns = cr.Spec.BusConfigurationRef.Namespace + if cr.Spec.BusRef.Namespace != "" { + ns = cr.Spec.BusRef.Namespace } err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.BusConfigurationRef.Name, + Name: cr.Spec.BusRef.Name, Namespace: ns, - }, &busConfig) + }, &bus) if err != nil { return result, err } } - // If bus config is updated - if cr.Spec.BusConfigurationRef.Name != "" { - if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { + // Large Message Store + lms := enterpriseApi.LargeMessageStore{} + if cr.Spec.LargeMessageStoreRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.LargeMessageStoreRef.Namespace != "" { + ns = cr.Spec.LargeMessageStoreRef.Namespace + } + err = client.Get(context.Background(), types.NamespacedName{ + Name: cr.Spec.LargeMessageStoreRef.Name, + Namespace: ns, + }, &bus) + if err != nil { + return result, err + } + } + + // If bus is updated + if cr.Spec.BusRef.Name != "" { + if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, busConfig, client) + err = mgr.handlePullBusChange(ctx, cr, bus, lms, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") return result, err } - cr.Status.BusConfiguration = busConfig.Spec + cr.Status.Bus = &bus.Spec } } @@ -1234,7 +1266,7 @@ func getSiteName(ctx context.Context, c splcommon.ControllerClient, cr *enterpri var newSplunkClientForBusPipeline = splclient.NewSplunkClient // Checks if only PullBus or Pipeline config changed, and updates the conf file if so -func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, busConfig enterpriseApi.BusConfiguration, k8s client.Client) error { +func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, bus enterpriseApi.Bus, lms enterpriseApi.LargeMessageStore, k8s client.Client) error { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("handlePullBusChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) @@ -1253,27 +1285,27 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne splunkClient := newSplunkClientForBusPipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) afterDelete := false - if (busConfig.Spec.SQS.QueueName != "" && newCR.Status.BusConfiguration.SQS.QueueName != "" && busConfig.Spec.SQS.QueueName != newCR.Status.BusConfiguration.SQS.QueueName) || - (busConfig.Spec.Type != "" && newCR.Status.BusConfiguration.Type != "" && busConfig.Spec.Type != newCR.Status.BusConfiguration.Type) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { + if (bus.Spec.QueueName != "" && newCR.Status.Bus.QueueName != "" && bus.Spec.QueueName != newCR.Status.Bus.QueueName) || + (bus.Spec.Provider != "" && newCR.Status.Bus.Provider != "" && bus.Spec.Provider != newCR.Status.Bus.Provider) { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.QueueName)); err != nil { updateErr = err } - if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.QueueName)); err != nil { updateErr = err } afterDelete = true } - busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&busConfig, newCR, afterDelete) + busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, afterDelete) for _, pbVal := range busChangedFieldsOutputs { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName), [][]string{pbVal}); err != nil { updateErr = err } } for _, pbVal := range busChangedFieldsInputs { - if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { + if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName), [][]string{pbVal}); err != nil { updateErr = err } } @@ -1290,14 +1322,22 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne } // getChangedBusFieldsForIndexer returns a list of changed bus and pipeline fields for indexer pods -func getChangedBusFieldsForIndexer(busConfig *enterpriseApi.BusConfiguration, busConfigIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields [][]string) { +func getChangedBusFieldsForIndexer(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields [][]string) { // Compare bus fields - oldPB := busConfigIndexerStatus.Status.BusConfiguration - newPB := busConfig.Spec + oldPB := busIndexerStatus.Status.Bus + if oldPB == nil { + oldPB = &enterpriseApi.BusSpec{} + } + newPB := bus.Spec - // Push all bus fields - busChangedFieldsInputs, busChangedFieldsOutputs = pullBusChanged(&oldPB, &newPB, afterDelete) + oldLMS := busIndexerStatus.Status.LargeMessageStore + if oldLMS == nil { + oldLMS = &enterpriseApi.LargeMessageStoreSpec{} + } + newLMS := lms.Spec + // Push all bus fields + busChangedFieldsInputs, busChangedFieldsOutputs = pullBusChanged(oldPB, &newPB, oldLMS, &newLMS, afterDelete) // Always set all pipeline fields, not just changed ones pipelineChangedFields = pipelineConfig(true) @@ -1315,34 +1355,43 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { return strings.HasPrefix(previousVersion, "8") && strings.HasPrefix(currentVersion, "9") } -func pullBusChanged(oldBus, newBus *enterpriseApi.BusConfigurationSpec, afterDelete bool) (inputs, outputs [][]string) { - if oldBus.Type != newBus.Type || afterDelete { - inputs = append(inputs, []string{"remote_queue.type", newBus.Type}) +func pullBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (inputs, outputs [][]string) { + busProvider := "" + if newBus.Provider == "sqs" { + busProvider = "sqs_smartbus" + } + lmsProvider := "" + if newLMS.Provider == "s3" { + lmsProvider = "sqs_smartbus" + } + + if oldBus.Provider != newBus.Provider || afterDelete { + inputs = append(inputs, []string{"remote_queue.type", busProvider}) } - if oldBus.SQS.AuthRegion != newBus.SQS.AuthRegion || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", newBus.Type), newBus.SQS.AuthRegion}) + if oldBus.Region != newBus.Region || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.Region}) } if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", newBus.Type), newBus.SQS.Endpoint}) + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", busProvider), newBus.SQS.Endpoint}) } - if oldBus.SQS.LargeMessageStoreEndpoint != newBus.SQS.LargeMessageStoreEndpoint || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newBus.Type), newBus.SQS.LargeMessageStoreEndpoint}) + if oldLMS.S3.Endpoint != newLMS.S3.Endpoint || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", lmsProvider), newLMS.S3.Endpoint}) } - if oldBus.SQS.LargeMessageStorePath != newBus.SQS.LargeMessageStorePath || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", newBus.Type), newBus.SQS.LargeMessageStorePath}) + if oldLMS.S3.Path != newLMS.S3.Path || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", lmsProvider), newLMS.S3.Path}) } - if oldBus.SQS.DeadLetterQueueName != newBus.SQS.DeadLetterQueueName || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newBus.Type), newBus.SQS.DeadLetterQueueName}) + if oldBus.SQS.DLQ != newBus.SQS.DLQ || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busProvider), newBus.SQS.DLQ}) } inputs = append(inputs, - []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", newBus.Type), "4"}, - []string{fmt.Sprintf("remote_queue.%s.retry_policy", newBus.Type), "max_count"}, + []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busProvider), "4"}, + []string{fmt.Sprintf("remote_queue.%s.retry_policy", busProvider), "max_count"}, ) outputs = inputs outputs = append(outputs, - []string{fmt.Sprintf("remote_queue.%s.send_interval", newBus.Type), "5s"}, - []string{fmt.Sprintf("remote_queue.%s.encoding_format", newBus.Type), "s2s"}, + []string{fmt.Sprintf("remote_queue.%s.send_interval", busProvider), "5s"}, + []string{fmt.Sprintf("remote_queue.%s.encoding_format", busProvider), "s2s"}, ) return inputs, outputs diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index e541fc4f6..9df4b2f75 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -1344,23 +1344,21 @@ func TestInvalidIndexerClusterSpec(t *testing.T) { func TestGetIndexerStatefulSet(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") - busConfig := enterpriseApi.BusConfiguration{ + bus := enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "bus", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -1371,8 +1369,8 @@ func TestGetIndexerStatefulSet(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IndexerClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + BusRef: corev1.ObjectReference{ + Name: bus.Name, }, }, } @@ -2048,60 +2046,80 @@ func TestImageUpdatedTo9(t *testing.T) { } func TestGetChangedBusFieldsForIndexer(t *testing.T) { - busConfig := enterpriseApi.BusConfiguration{ + provider := "sqs_smartbus" + + bus := enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "bus", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + + lms := enterpriseApi.LargeMessageStore{ + TypeMeta: metav1.TypeMeta{ + Kind: "LargeMessageStore", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", }, }, } newCR := &enterpriseApi.IndexerCluster{ Spec: enterpriseApi.IndexerClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + BusRef: corev1.ObjectReference{ + Name: bus.Name, + }, + LargeMessageStoreRef: corev1.ObjectReference{ + Name: lms.Name, }, }, } - busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&busConfig, newCR, false) + busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, false) assert.Equal(t, 8, len(busChangedFieldsInputs)) assert.Equal(t, [][]string{ - {"remote_queue.type", busConfig.Spec.Type}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, + {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, }, busChangedFieldsInputs) assert.Equal(t, 10, len(busChangedFieldsOutputs)) assert.Equal(t, [][]string{ - {"remote_queue.type", busConfig.Spec.Type}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, - {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, + {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, + {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, }, busChangedFieldsOutputs) assert.Equal(t, 5, len(pipelineChangedFields)) @@ -2116,24 +2134,42 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { func TestHandlePullBusChange(t *testing.T) { // Object definitions - busConfig := enterpriseApi.BusConfiguration{ + provider := "sqs_smartbus" + + bus := enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "bus", Namespace: "test", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + + lms := enterpriseApi.LargeMessageStore{ + TypeMeta: metav1.TypeMeta{ + Kind: "LargeMessageStore", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + Namespace: "test", + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", }, }, } @@ -2147,12 +2183,18 @@ func TestHandlePullBusChange(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IndexerClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + BusRef: corev1.ObjectReference{ + Name: bus.Name, + }, + LargeMessageStoreRef: corev1.ObjectReference{ + Name: lms.Name, + Namespace: lms.Namespace, }, }, Status: enterpriseApi.IndexerClusterStatus{ ReadyReplicas: 3, + Bus: &enterpriseApi.BusSpec{}, + LargeMessageStore: &enterpriseApi.LargeMessageStoreSpec{}, }, } @@ -2209,7 +2251,8 @@ func TestHandlePullBusChange(t *testing.T) { // Mock pods c := spltest.NewMockClient() ctx := context.TODO() - c.Create(ctx, &busConfig) + c.Create(ctx, &bus) + c.Create(ctx, &lms) c.Create(ctx, newCR) c.Create(ctx, pod0) c.Create(ctx, pod1) @@ -2217,7 +2260,7 @@ func TestHandlePullBusChange(t *testing.T) { // Negative test case: secret not found mgr := &indexerClusterPodManager{} - err := mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err := mgr.handlePullBusChange(ctx, newCR, bus, lms, c) assert.NotNil(t, err) // Mock secret @@ -2228,41 +2271,41 @@ func TestHandlePullBusChange(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestPullBusPipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePullBusChange(ctx, newCR, bus, lms, c) assert.NotNil(t, err) // outputs.conf propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, } propertyKVListOutputs := propertyKVList - propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}) - propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}) + propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}) + propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}) body := buildFormBody(propertyKVListOutputs) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, busConfig, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, bus, newCR.Status.ReadyReplicas, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza mgr = newTestPullBusPipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePullBusChange(ctx, newCR, bus, lms, c) assert.NotNil(t, err) // inputs.conf body = buildFormBody(propertyKVList) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, busConfig, newCR.Status.ReadyReplicas, "conf-inputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, bus, newCR.Status.ReadyReplicas, "conf-inputs", body) // Negative test case: failure in updating remote queue stanza mgr = newTestPullBusPipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePullBusChange(ctx, newCR, bus, lms, c) assert.NotNil(t, err) // default-mode.conf @@ -2290,7 +2333,7 @@ func TestHandlePullBusChange(t *testing.T) { mgr = newTestPullBusPipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePullBusChange(ctx, newCR, bus, lms, c) assert.Nil(t, err) } @@ -2308,7 +2351,7 @@ func buildFormBody(pairs [][]string) string { return b.String() } -func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, busConfig enterpriseApi.BusConfiguration, replicas int32, confName, body string) { +func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, bus enterpriseApi.Bus, replicas int32, confName, body string) { for i := 0; i < int(replicas); i++ { podName := fmt.Sprintf("splunk-%s-indexer-%d", cr.GetName(), i) baseURL := fmt.Sprintf( @@ -2316,11 +2359,11 @@ func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } @@ -2340,7 +2383,7 @@ func newTestPullBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *inde } } -func TestApplyIndexerClusterManager_BusConfig_Success(t *testing.T) { +func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") ctx := context.TODO() @@ -2352,28 +2395,26 @@ func TestApplyIndexerClusterManager_BusConfig_Success(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions - busConfig := enterpriseApi.BusConfiguration{ + bus := enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "bus", Namespace: "test", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } - c.Create(ctx, &busConfig) + c.Create(ctx, &bus) cm := &enterpriseApi.ClusterManager{ TypeMeta: metav1.TypeMeta{Kind: "ClusterManager"}, @@ -2395,9 +2436,9 @@ func TestApplyIndexerClusterManager_BusConfig_Success(t *testing.T) { }, Spec: enterpriseApi.IndexerClusterSpec{ Replicas: 1, - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, - Namespace: busConfig.Namespace, + BusRef: corev1.ObjectReference{ + Name: bus.Name, + Namespace: bus.Namespace, }, CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ ClusterManagerRef: corev1.ObjectReference{ diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 4f96f05bc..6ca721b6a 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -73,7 +73,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr defer updateCRStatus(ctx, client, cr, &err) if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.BusConfiguration = enterpriseApi.BusConfigurationSpec{} + cr.Status.Bus = &enterpriseApi.BusSpec{} } cr.Status.Replicas = cr.Spec.Replicas @@ -210,34 +210,50 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // No need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus config - busConfig := enterpriseApi.BusConfiguration{} - if cr.Spec.BusConfigurationRef.Name != "" { + // Bus + bus := enterpriseApi.Bus{} + if cr.Spec.BusRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusConfigurationRef.Namespace != "" { - ns = cr.Spec.BusConfigurationRef.Namespace + if cr.Spec.BusRef.Namespace != "" { + ns = cr.Spec.BusRef.Namespace } err = client.Get(ctx, types.NamespacedName{ - Name: cr.Spec.BusConfigurationRef.Name, + Name: cr.Spec.BusRef.Name, Namespace: ns, - }, &busConfig) + }, &bus) if err != nil { return result, err } } - // If bus config is updated - if !reflect.DeepEqual(cr.Status.BusConfiguration, busConfig.Spec) { + // Large Message Store + lms := enterpriseApi.LargeMessageStore{} + if cr.Spec.LargeMessageStoreRef.Name != "" { + ns := cr.GetNamespace() + if cr.Spec.LargeMessageStoreRef.Namespace != "" { + ns = cr.Spec.LargeMessageStoreRef.Namespace + } + err = client.Get(context.Background(), types.NamespacedName{ + Name: cr.Spec.LargeMessageStoreRef.Name, + Namespace: ns, + }, &lms) + if err != nil { + return result, err + } + } + + // If bus is updated + if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePushBusChange(ctx, cr, busConfig, client) + err = mgr.handlePushBusChange(ctx, cr, bus, lms, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") return result, err } - cr.Status.BusConfiguration = busConfig.Spec + cr.Status.Bus = &bus.Spec } // Upgrade fron automated MC to MC CRD @@ -311,7 +327,7 @@ func getIngestorStatefulSet(ctx context.Context, client splcommon.ControllerClie } // Checks if only Bus or Pipeline config changed, and updates the conf file if so -func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, busConfig enterpriseApi.BusConfiguration, k8s client.Client) error { +func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, bus enterpriseApi.Bus, lms enterpriseApi.LargeMessageStore, k8s client.Client) error { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("handlePushBusChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) @@ -330,18 +346,18 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) afterDelete := false - if (busConfig.Spec.SQS.QueueName != "" && newCR.Status.BusConfiguration.SQS.QueueName != "" && busConfig.Spec.SQS.QueueName != newCR.Status.BusConfiguration.SQS.QueueName) || - (busConfig.Spec.Type != "" && newCR.Status.BusConfiguration.Type != "" && busConfig.Spec.Type != newCR.Status.BusConfiguration.Type) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.BusConfiguration.SQS.QueueName)); err != nil { + if (bus.Spec.QueueName != "" && newCR.Status.Bus.QueueName != "" && bus.Spec.QueueName != newCR.Status.Bus.QueueName) || + (bus.Spec.Provider != "" && newCR.Status.Bus.Provider != "" && bus.Spec.Provider != newCR.Status.Bus.Provider) { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.QueueName)); err != nil { updateErr = err } afterDelete = true } - busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&busConfig, newCR, afterDelete) + busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, afterDelete) for _, pbVal := range busChangedFields { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName), [][]string{pbVal}); err != nil { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName), [][]string{pbVal}); err != nil { updateErr = err } } @@ -358,12 +374,21 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n } // getChangedBusFieldsForIngestor returns a list of changed bus and pipeline fields for ingestor pods -func getChangedBusFieldsForIngestor(busConfig *enterpriseApi.BusConfiguration, busConfigIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (busChangedFields, pipelineChangedFields [][]string) { - oldPB := &busConfigIngestorStatus.Status.BusConfiguration - newPB := &busConfig.Spec +func getChangedBusFieldsForIngestor(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (busChangedFields, pipelineChangedFields [][]string) { + oldPB := busIngestorStatus.Status.Bus + if oldPB == nil { + oldPB = &enterpriseApi.BusSpec{} + } + newPB := &bus.Spec + + oldLMS := busIngestorStatus.Status.LargeMessageStore + if oldLMS == nil { + oldLMS = &enterpriseApi.LargeMessageStoreSpec{} + } + newLMS := &lms.Spec // Push changed bus fields - busChangedFields = pushBusChanged(oldPB, newPB, afterDelete) + busChangedFields = pushBusChanged(oldPB, newPB, oldLMS, newLMS, afterDelete) // Always changed pipeline fields pipelineChangedFields = pipelineConfig(false) @@ -402,31 +427,40 @@ func pipelineConfig(isIndexer bool) (output [][]string) { return output } -func pushBusChanged(oldBus, newBus *enterpriseApi.BusConfigurationSpec, afterDelete bool) (output [][]string) { - if oldBus.Type != newBus.Type || afterDelete { - output = append(output, []string{"remote_queue.type", newBus.Type}) +func pushBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (output [][]string) { + busProvider := "" + if newBus.Provider == "sqs" { + busProvider = "sqs_smartbus" + } + lmsProvider := "" + if newLMS.Provider == "s3" { + lmsProvider = "sqs_smartbus" + } + + if oldBus.Provider != newBus.Provider || afterDelete { + output = append(output, []string{"remote_queue.type", busProvider}) } - if oldBus.SQS.AuthRegion != newBus.SQS.AuthRegion || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", newBus.Type), newBus.SQS.AuthRegion}) + if oldBus.Region != newBus.Region || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.Region}) } if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", newBus.Type), newBus.SQS.Endpoint}) + output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", busProvider), newBus.SQS.Endpoint}) } - if oldBus.SQS.LargeMessageStoreEndpoint != newBus.SQS.LargeMessageStoreEndpoint || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", newBus.Type), newBus.SQS.LargeMessageStoreEndpoint}) + if oldLMS.S3.Endpoint != newLMS.S3.Endpoint || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", lmsProvider), newLMS.S3.Endpoint}) } - if oldBus.SQS.LargeMessageStorePath != newBus.SQS.LargeMessageStorePath || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", newBus.Type), newBus.SQS.LargeMessageStorePath}) + if oldLMS.S3.Path != newLMS.S3.Path || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", lmsProvider), newLMS.S3.Path}) } - if oldBus.SQS.DeadLetterQueueName != newBus.SQS.DeadLetterQueueName || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", newBus.Type), newBus.SQS.DeadLetterQueueName}) + if oldBus.SQS.DLQ != newBus.SQS.DLQ || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busProvider), newBus.SQS.DLQ}) } output = append(output, - []string{fmt.Sprintf("remote_queue.%s.encoding_format", newBus.Type), "s2s"}, - []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", newBus.Type), "4"}, - []string{fmt.Sprintf("remote_queue.%s.retry_policy", newBus.Type), "max_count"}, - []string{fmt.Sprintf("remote_queue.%s.send_interval", newBus.Type), "5s"}) + []string{fmt.Sprintf("remote_queue.%s.encoding_format", busProvider), "s2s"}, + []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busProvider), "4"}, + []string{fmt.Sprintf("remote_queue.%s.retry_policy", busProvider), "max_count"}, + []string{fmt.Sprintf("remote_queue.%s.send_interval", busProvider), "5s"}) return output } diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index bee3df4d6..d7a1604cd 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -63,28 +63,47 @@ func TestApplyIngestorCluster(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions - busConfig := &enterpriseApi.BusConfiguration{ + provider := "sqs_smartbus" + + bus := &enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "bus", Namespace: "test", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } - c.Create(ctx, busConfig) + c.Create(ctx, bus) + + lms := enterpriseApi.LargeMessageStore{ + TypeMeta: metav1.TypeMeta{ + Kind: "LargeMessageStore", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + Namespace: "test", + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", + }, + }, + } + c.Create(ctx, &lms) cr := &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ @@ -100,9 +119,13 @@ func TestApplyIngestorCluster(t *testing.T) { CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Mock: true, }, - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, - Namespace: busConfig.Namespace, + BusRef: corev1.ObjectReference{ + Name: bus.Name, + Namespace: bus.Namespace, + }, + LargeMessageStoreRef: corev1.ObjectReference{ + Name: lms.Name, + Namespace: lms.Namespace, }, }, } @@ -261,19 +284,19 @@ func TestApplyIngestorCluster(t *testing.T) { defer func() { newIngestorClusterPodManager = origNew }() propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, + {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, busConfig, cr.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, bus, cr.Status.ReadyReplicas, "conf-outputs", body) // default-mode.conf propertyKVList = [][]string{ @@ -310,23 +333,21 @@ func TestGetIngestorStatefulSet(t *testing.T) { // Object definitions os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") - busConfig := enterpriseApi.BusConfiguration{ + bus := enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "bus", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -341,8 +362,8 @@ func TestGetIngestorStatefulSet(t *testing.T) { }, Spec: enterpriseApi.IngestorClusterSpec{ Replicas: 2, - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + BusRef: corev1.ObjectReference{ + Name: bus.Name, }, }, } @@ -396,50 +417,70 @@ func TestGetIngestorStatefulSet(t *testing.T) { } func TestGetChangedBusFieldsForIngestor(t *testing.T) { - busConfig := enterpriseApi.BusConfiguration{ + provider := "sqs_smartbus" + + bus := enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "bus", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + + lms := enterpriseApi.LargeMessageStore{ + TypeMeta: metav1.TypeMeta{ + Kind: "LargeMessageStore", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", }, }, } newCR := &enterpriseApi.IngestorCluster{ Spec: enterpriseApi.IngestorClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + BusRef: corev1.ObjectReference{ + Name: bus.Name, + }, + LargeMessageStoreRef: corev1.ObjectReference{ + Name: lms.Name, }, }, Status: enterpriseApi.IngestorClusterStatus{}, } - busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&busConfig, newCR, false) + busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, false) assert.Equal(t, 10, len(busChangedFields)) assert.Equal(t, [][]string{ - {"remote_queue.type", busConfig.Spec.Type}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, - {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, + {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, }, busChangedFields) assert.Equal(t, 6, len(pipelineChangedFields)) @@ -455,23 +496,40 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { func TestHandlePushBusChange(t *testing.T) { // Object definitions - busConfig := enterpriseApi.BusConfiguration{ + provider := "sqs_smartbus" + + bus := enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "busConfig", + Name: "bus", }, - Spec: enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + Spec: enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://ingestion/smartbus-test", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - DeadLetterQueueName: "sqs-dlq-test", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", + }, + }, + } + + lms := enterpriseApi.LargeMessageStore{ + TypeMeta: metav1.TypeMeta{ + Kind: "LargeMessageStore", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", }, }, } @@ -485,13 +543,18 @@ func TestHandlePushBusChange(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IngestorClusterSpec{ - BusConfigurationRef: corev1.ObjectReference{ - Name: busConfig.Name, + BusRef: corev1.ObjectReference{ + Name: bus.Name, + }, + LargeMessageStoreRef: corev1.ObjectReference{ + Name: lms.Name, }, }, Status: enterpriseApi.IngestorClusterStatus{ - Replicas: 3, - ReadyReplicas: 3, + Replicas: 3, + ReadyReplicas: 3, + Bus: &enterpriseApi.BusSpec{}, + LargeMessageStore: &enterpriseApi.LargeMessageStoreSpec{}, }, } @@ -555,7 +618,7 @@ func TestHandlePushBusChange(t *testing.T) { // Negative test case: secret not found mgr := &ingestorClusterPodManager{} - err := mgr.handlePushBusChange(ctx, newCR, busConfig, c) + err := mgr.handlePushBusChange(ctx, newCR, bus, lms, c) assert.NotNil(t, err) // Mock secret @@ -566,29 +629,29 @@ func TestHandlePushBusChange(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestPushBusPipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePushBusChange(ctx, newCR, bus, lms, c) assert.NotNil(t, err) // outputs.conf propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.encoding_format", busConfig.Spec.Type), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", busConfig.Spec.Type), busConfig.Spec.SQS.AuthRegion}, - {fmt.Sprintf("remote_queue.%s.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStoreEndpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", busConfig.Spec.Type), busConfig.Spec.SQS.LargeMessageStorePath}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busConfig.Spec.Type), busConfig.Spec.SQS.DeadLetterQueueName}, - {fmt.Sprintf("remote_queue.max_count.%s.max_retries_per_part", busConfig.Spec.Type), "4"}, - {fmt.Sprintf("remote_queue.%s.retry_policy", busConfig.Spec.Type), "max_count"}, - {fmt.Sprintf("remote_queue.%s.send_interval", busConfig.Spec.Type), "5s"}, + {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.max_count.%s.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, newCR, &busConfig, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, newCR, &bus, newCR.Status.ReadyReplicas, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza mgr = newTestPushBusPipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePushBusChange(ctx, newCR, bus, lms, c) assert.NotNil(t, err) // default-mode.conf @@ -617,11 +680,11 @@ func TestHandlePushBusChange(t *testing.T) { mgr = newTestPushBusPipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, busConfig, c) + err = mgr.handlePushBusChange(ctx, newCR, bus, lms, c) assert.Nil(t, err) } -func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, busConfig *enterpriseApi.BusConfiguration, replicas int32, confName, body string) { +func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, bus *enterpriseApi.Bus, replicas int32, confName, body string) { for i := 0; i < int(replicas); i++ { podName := fmt.Sprintf("splunk-%s-ingestor-%d", cr.GetName(), i) baseURL := fmt.Sprintf( @@ -629,11 +692,11 @@ func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, c podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", busConfig.Spec.SQS.QueueName)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } diff --git a/pkg/splunk/enterprise/largemessagestore.go b/pkg/splunk/enterprise/largemessagestore.go new file mode 100644 index 000000000..8e6ff93f5 --- /dev/null +++ b/pkg/splunk/enterprise/largemessagestore.go @@ -0,0 +1,75 @@ +/* +Copyright 2025. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "time" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" + splctrl "github.com/splunk/splunk-operator/pkg/splunk/splkcontroller" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// ApplyLargeMessageStore reconciles the state of an IngestorCluster custom resource +func ApplyLargeMessageStore(ctx context.Context, client client.Client, cr *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + var err error + + // Unless modified, reconcile for this object will be requeued after 5 seconds + result := reconcile.Result{ + Requeue: true, + RequeueAfter: time.Second * 5, + } + + if cr.Status.ResourceRevMap == nil { + cr.Status.ResourceRevMap = make(map[string]string) + } + + eventPublisher, _ := newK8EventPublisher(client, cr) + ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) + + cr.Kind = "LargeMessageStore" + + // Initialize phase + cr.Status.Phase = enterpriseApi.PhaseError + + // Update the CR Status + defer updateCRStatus(ctx, client, cr, &err) + + // Check if deletion has been requested + if cr.ObjectMeta.DeletionTimestamp != nil { + terminating, err := splctrl.CheckForDeletion(ctx, cr, client) + if terminating && err != nil { + cr.Status.Phase = enterpriseApi.PhaseTerminating + } else { + result.Requeue = false + } + return result, err + } + + cr.Status.Phase = enterpriseApi.PhaseReady + + // RequeueAfter if greater than 0, tells the Controller to requeue the reconcile key after the Duration. + // Implies that Requeue is true, there is no need to set Requeue to true at the same time as RequeueAfter. + if !result.Requeue { + result.RequeueAfter = 0 + } + + return result, nil +} diff --git a/pkg/splunk/enterprise/largemessagestore_test.go b/pkg/splunk/enterprise/largemessagestore_test.go new file mode 100644 index 000000000..0f627383c --- /dev/null +++ b/pkg/splunk/enterprise/largemessagestore_test.go @@ -0,0 +1,83 @@ +/* +Copyright 2025. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package enterprise + +import ( + "context" + "os" + "path/filepath" + "testing" + + enterpriseApi "github.com/splunk/splunk-operator/api/v4" + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func init() { + GetReadinessScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + readinessScriptLocation) + return fileLocation + } + GetLivenessScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + livenessScriptLocation) + return fileLocation + } + GetStartupScriptLocation = func() string { + fileLocation, _ := filepath.Abs("../../../" + startupScriptLocation) + return fileLocation + } +} + +func TestApplyLargeMessageStore(t *testing.T) { + os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") + + ctx := context.TODO() + + scheme := runtime.NewScheme() + _ = enterpriseApi.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).Build() + + // Object definitions + lms := &enterpriseApi.LargeMessageStore{ + TypeMeta: metav1.TypeMeta{ + Kind: "LargeMessageStore", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + Namespace: "test", + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", + }, + }, + } + c.Create(ctx, lms) + + // ApplyLargeMessageStore + result, err := ApplyLargeMessageStore(ctx, c, lms) + assert.NoError(t, err) + assert.True(t, result.Requeue) + assert.NotEqual(t, enterpriseApi.PhaseError, lms.Status.Phase) + assert.Equal(t, enterpriseApi.PhaseReady, lms.Status.Phase) +} diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go index 6ebd3df34..180659498 100644 --- a/pkg/splunk/enterprise/types.go +++ b/pkg/splunk/enterprise/types.go @@ -63,8 +63,11 @@ const ( // SplunkIngestor may be a standalone or clustered ingestion peer SplunkIngestor InstanceType = "ingestor" - // SplunkBusConfiguration is the bus configuration instance - SplunkBusConfiguration InstanceType = "busconfiguration" + // SplunkBus is the bus instance + SplunkBus InstanceType = "bus" + + // SplunkLargeMessageStore is the large message store instance + SplunkLargeMessageStore InstanceType = "large-message-store" // SplunkDeployer is an instance that distributes baseline configurations and apps to search head cluster members SplunkDeployer InstanceType = "deployer" @@ -294,8 +297,10 @@ func KindToInstanceString(kind string) string { return SplunkIndexer.ToString() case "IngestorCluster": return SplunkIngestor.ToString() - case "BusConfiguration": - return SplunkBusConfiguration.ToString() + case "Bus": + return SplunkBus.ToString() + case "LargeMessageStore": + return SplunkLargeMessageStore.ToString() case "LicenseManager": return SplunkLicenseManager.ToString() case "LicenseMaster": diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 38853aab0..e8f0736b3 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2291,20 +2291,34 @@ func fetchCurrentCRWithStatusUpdate(ctx context.Context, client splcommon.Contro origCR.(*enterpriseApi.IngestorCluster).Status.DeepCopyInto(&latestIngCR.Status) return latestIngCR, nil - case "BusConfiguration": - latestBusCR := &enterpriseApi.BusConfiguration{} + case "Bus": + latestBusCR := &enterpriseApi.Bus{} err = client.Get(ctx, namespacedName, latestBusCR) if err != nil { return nil, err } - origCR.(*enterpriseApi.BusConfiguration).Status.Message = "" + origCR.(*enterpriseApi.Bus).Status.Message = "" if (crError != nil) && ((*crError) != nil) { - origCR.(*enterpriseApi.BusConfiguration).Status.Message = (*crError).Error() + origCR.(*enterpriseApi.Bus).Status.Message = (*crError).Error() } - origCR.(*enterpriseApi.BusConfiguration).Status.DeepCopyInto(&latestBusCR.Status) + origCR.(*enterpriseApi.Bus).Status.DeepCopyInto(&latestBusCR.Status) return latestBusCR, nil + case "LargeMessageStore": + latestLmsCR := &enterpriseApi.LargeMessageStore{} + err = client.Get(ctx, namespacedName, latestLmsCR) + if err != nil { + return nil, err + } + + origCR.(*enterpriseApi.LargeMessageStore).Status.Message = "" + if (crError != nil) && ((*crError) != nil) { + origCR.(*enterpriseApi.LargeMessageStore).Status.Message = (*crError).Error() + } + origCR.(*enterpriseApi.LargeMessageStore).Status.DeepCopyInto(&latestLmsCR.Status) + return latestLmsCR, nil + case "LicenseMaster": latestLmCR := &enterpriseApiV3.LicenseMaster{} err = client.Get(ctx, namespacedName, latestLmCR) diff --git a/test/appframework_aws/c3/appframework_aws_test.go b/test/appframework_aws/c3/appframework_aws_test.go index ba0162ffa..2d150f5ac 100644 --- a/test/appframework_aws/c3/appframework_aws_test.go +++ b/test/appframework_aws/c3/appframework_aws_test.go @@ -3182,7 +3182,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_aws/c3/manager_appframework_test.go b/test/appframework_aws/c3/manager_appframework_test.go index afc7abae6..904433195 100644 --- a/test/appframework_aws/c3/manager_appframework_test.go +++ b/test/appframework_aws/c3/manager_appframework_test.go @@ -355,7 +355,7 @@ var _ = Describe("c3appfw test", func() { shcName := fmt.Sprintf("%s-shc", deployment.GetName()) idxName := fmt.Sprintf("%s-idxc", deployment.GetName()) shc, err := deployment.DeploySearchHeadCluster(ctx, shcName, cm.GetName(), lm.GetName(), "", mcName) - idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, "") + idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") // Wait for License Manager to be in READY phase testenv.LicenseManagerReady(ctx, deployment, testcaseEnvInst) @@ -3324,7 +3324,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_az/c3/appframework_azure_test.go b/test/appframework_az/c3/appframework_azure_test.go index 0622700a4..c7fea6ff3 100644 --- a/test/appframework_az/c3/appframework_azure_test.go +++ b/test/appframework_az/c3/appframework_azure_test.go @@ -993,7 +993,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_az/c3/manager_appframework_azure_test.go b/test/appframework_az/c3/manager_appframework_azure_test.go index 2a0af0b3b..4412efe43 100644 --- a/test/appframework_az/c3/manager_appframework_azure_test.go +++ b/test/appframework_az/c3/manager_appframework_azure_test.go @@ -991,7 +991,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/appframework_gcp/c3/manager_appframework_test.go b/test/appframework_gcp/c3/manager_appframework_test.go index 02ad17cfb..66c553e47 100644 --- a/test/appframework_gcp/c3/manager_appframework_test.go +++ b/test/appframework_gcp/c3/manager_appframework_test.go @@ -361,7 +361,7 @@ var _ = Describe("c3appfw test", func() { shcName := fmt.Sprintf("%s-shc", deployment.GetName()) idxName := fmt.Sprintf("%s-idxc", deployment.GetName()) shc, err := deployment.DeploySearchHeadCluster(ctx, shcName, cm.GetName(), lm.GetName(), "", mcName) - idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, "") + idxc, err := deployment.DeployIndexerCluster(ctx, idxName, lm.GetName(), 3, cm.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") // Wait for License Manager to be in READY phase testenv.LicenseManagerReady(ctx, deployment, testcaseEnvInst) @@ -3327,7 +3327,7 @@ var _ = Describe("c3appfw test", func() { // Deploy the Indexer Cluster testcaseEnvInst.Log.Info("Deploy Single Site Indexer Cluster") indexerReplicas := 3 - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, "") + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", indexerReplicas, deployment.GetName(), "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") Expect(err).To(Succeed(), "Unable to deploy Single Site Indexer Cluster") // Deploy the Search Head Cluster diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index c040802f8..c99112617 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -39,15 +39,20 @@ var ( testenvInstance *testenv.TestEnv testSuiteName = "indingsep-" + testenv.RandomDNSName(3) - bus = enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + bus = enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://test-bucket/smartbus-test", - DeadLetterQueueName: "test-dead-letter-queue", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "test-dead-letter-queue", + }, + } + lms = enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://test-bucket/smartbus-test", }, } serviceAccountName = "index-ingest-sa" @@ -80,15 +85,13 @@ var ( "AWS_STS_REGIONAL_ENDPOINTS=regional", } - updateBus = enterpriseApi.BusConfigurationSpec{ - Type: "sqs_smartbus", + updateBus = enterpriseApi.BusSpec{ + Provider: "sqs", + QueueName: "test-queue-updated", + Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - QueueName: "test-queue-updated", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - LargeMessageStoreEndpoint: "https://s3.us-west-2.amazonaws.com", - LargeMessageStorePath: "s3://test-bucket-updated/smartbus-test", - DeadLetterQueueName: "test-dead-letter-queue-updated", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "test-dead-letter-queue-updated", }, } diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 8bccddb47..1b3d27c70 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -79,14 +79,19 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus Configuration - testcaseEnvInst.Log.Info("Deploy Bus Configuration") - bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + // Deploy Bus + testcaseEnvInst.Log.Info("Deploy Bus") + b, err := deployment.DeployBus(ctx, "bus", bus) + Expect(err).To(Succeed(), "Unable to deploy Bus") + + // Deploy LargeMessageStore + testcaseEnvInst.Log.Info("Deploy LargeMessageStore") + lm, err := deployment.DeployLargeMessageStore(ctx, "lms", lms) + Expect(err).To(Succeed(), "Unable to deploy LargeMessageStore") // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: b.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -96,7 +101,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: b.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -125,12 +130,19 @@ var _ = Describe("indingsep test", func() { err = deployment.DeleteCR(ctx, ingest) Expect(err).To(Succeed(), "Unable to delete Ingestor Cluster instance", "Ingestor Cluster Name", ingest) - // Delete the Bus Configuration - busConfiguration := &enterpriseApi.BusConfiguration{} - err = deployment.GetInstance(ctx, "bus-config", busConfiguration) - Expect(err).To(Succeed(), "Unable to get Bus Configuration instance", "Bus Configuration Name", busConfiguration) - err = deployment.DeleteCR(ctx, busConfiguration) - Expect(err).To(Succeed(), "Unable to delete Bus Configuration", "Bus Configuration Name", busConfiguration) + // Delete the Bus + bus := &enterpriseApi.Bus{} + err = deployment.GetInstance(ctx, "bus", bus) + Expect(err).To(Succeed(), "Unable to get Bus instance", "Bus Name", bus) + err = deployment.DeleteCR(ctx, bus) + Expect(err).To(Succeed(), "Unable to delete Bus", "Bus Name", bus) + + // Delete the LargeMessageStore + lm = &enterpriseApi.LargeMessageStore{} + err = deployment.GetInstance(ctx, "lms", lm) + Expect(err).To(Succeed(), "Unable to get LargeMessageStore instance", "LargeMessageStore Name", lm) + err = deployment.DeleteCR(ctx, lm) + Expect(err).To(Succeed(), "Unable to delete LargeMessageStore", "LargeMessageStore Name", lm) }) }) @@ -140,10 +152,15 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus Configuration - testcaseEnvInst.Log.Info("Deploy Bus Configuration") - bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + // Deploy Bus + testcaseEnvInst.Log.Info("Deploy Bus") + bc, err := deployment.DeployBus(ctx, "bus", bus) + Expect(err).To(Succeed(), "Unable to deploy Bus") + + // Deploy LargeMessageStore + testcaseEnvInst.Log.Info("Deploy LargeMessageStore") + lm, err := deployment.DeployLargeMessageStore(ctx, "lms", lms) + Expect(err).To(Succeed(), "Unable to deploy LargeMessageStore") // Upload apps to S3 testcaseEnvInst.Log.Info("Upload apps to S3") @@ -188,9 +205,10 @@ var _ = Describe("indingsep test", func() { Image: testcaseEnvInst.GetSplunkImage(), }, }, - BusConfigurationRef: v1.ObjectReference{Name: bc.Name}, - Replicas: 3, - AppFrameworkConfig: appFrameworkSpec, + BusRef: v1.ObjectReference{Name: bc.Name}, + LargeMessageStoreRef: v1.ObjectReference{Name: lm.Name}, + Replicas: 3, + AppFrameworkConfig: appFrameworkSpec, }, } @@ -238,14 +256,19 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus Configuration - testcaseEnvInst.Log.Info("Deploy Bus Configuration") - bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + // Deploy Bus + testcaseEnvInst.Log.Info("Deploy Bus") + bc, err := deployment.DeployBus(ctx, "bus", bus) + Expect(err).To(Succeed(), "Unable to deploy Bus") + + // Deploy LargeMessageStore + testcaseEnvInst.Log.Info("Deploy LargeMessageStore") + lm, err := deployment.DeployLargeMessageStore(ctx, "lms", lms) + Expect(err).To(Succeed(), "Unable to deploy LargeMessageStore") // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -255,7 +278,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -278,7 +301,7 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.BusConfiguration).To(Equal(bus), "Ingestor bus configuration status is not the same as provided as input") + Expect(ingest.Status.Bus).To(Equal(bus), "Ingestor bus status is not the same as provided as input") // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -288,7 +311,7 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.BusConfiguration).To(Equal(bus), "Indexer bus configuration status is not the same as provided as input") + Expect(index.Status.Bus).To(Equal(bus), "Indexer bus status is not the same as provided as input") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") @@ -340,14 +363,19 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus Configuration - testcaseEnvInst.Log.Info("Deploy Bus Configuration") - bc, err := deployment.DeployBusConfiguration(ctx, "bus-config", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration") + // Deploy Bus + testcaseEnvInst.Log.Info("Deploy Bus") + bc, err := deployment.DeployBus(ctx, "bus", bus) + Expect(err).To(Succeed(), "Unable to deploy Bus") + + // Deploy LargeMessageStore + testcaseEnvInst.Log.Info("Deploy LargeMessageStore") + lm, err := deployment.DeployLargeMessageStore(ctx, "lms", lms) + Expect(err).To(Succeed(), "Unable to deploy LargeMessageStore") // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -357,7 +385,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -372,17 +400,17 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) - // Get instance of current Bus Configuration CR with latest config - testcaseEnvInst.Log.Info("Get instance of current Bus Configuration CR with latest config") - bus := &enterpriseApi.BusConfiguration{} + // Get instance of current Bus CR with latest config + testcaseEnvInst.Log.Info("Get instance of current Bus CR with latest config") + bus := &enterpriseApi.Bus{} err = deployment.GetInstance(ctx, bc.Name, bus) - Expect(err).To(Succeed(), "Failed to get instance of Bus Configuration") + Expect(err).To(Succeed(), "Failed to get instance of Bus") - // Update instance of BusConfiguration CR with new bus configuration - testcaseEnvInst.Log.Info("Update instance of BusConfiguration CR with new bus configuration") + // Update instance of Bus CR with new bus + testcaseEnvInst.Log.Info("Update instance of Bus CR with new bus") bus.Spec = updateBus err = deployment.UpdateCR(ctx, bus) - Expect(err).To(Succeed(), "Unable to deploy Bus Configuration with updated CR") + Expect(err).To(Succeed(), "Unable to deploy Bus with updated CR") // Ensure that Ingestor Cluster has not been restarted testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster has not been restarted") @@ -400,7 +428,7 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.BusConfiguration).To(Equal(updateBus), "Ingestor bus configuration status is not the same as provided as input") + Expect(ingest.Status.Bus).To(Equal(updateBus), "Ingestor bus status is not the same as provided as input") // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -410,7 +438,7 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.BusConfiguration).To(Equal(updateBus), "Indexer bus configuration status is not the same as provided as input") + Expect(index.Status.Bus).To(Equal(updateBus), "Indexer bus status is not the same as provided as input") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") diff --git a/test/testenv/deployment.go b/test/testenv/deployment.go index 2e312c652..3a7ba21d2 100644 --- a/test/testenv/deployment.go +++ b/test/testenv/deployment.go @@ -431,9 +431,9 @@ func (d *Deployment) DeployClusterMasterWithSmartStoreIndexes(ctx context.Contex } // DeployIndexerCluster deploys the indexer cluster -func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, busConfig corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { +func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, bus, lms corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { d.testenv.Log.Info("Deploying indexer cluster", "name", name, "CM", clusterManagerRef) - indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, busConfig, serviceAccountName) + indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, bus, lms, serviceAccountName) pdata, _ := json.Marshal(indexer) d.testenv.Log.Info("indexer cluster spec", "cr", string(pdata)) deployed, err := d.deployCR(ctx, name, indexer) @@ -445,10 +445,10 @@ func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseMana } // DeployIngestorCluster deploys the ingestor cluster -func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, busConfig corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { +func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, bus, lms corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { d.testenv.Log.Info("Deploying ingestor cluster", "name", name) - ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, busConfig, serviceAccountName) + ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, bus, lms, serviceAccountName) pdata, _ := json.Marshal(ingestor) d.testenv.Log.Info("ingestor cluster spec", "cr", string(pdata)) @@ -460,20 +460,36 @@ func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, cou return deployed.(*enterpriseApi.IngestorCluster), err } -// DeployBusConfiguration deploys the bus configuration -func (d *Deployment) DeployBusConfiguration(ctx context.Context, name string, busConfig enterpriseApi.BusConfigurationSpec) (*enterpriseApi.BusConfiguration, error) { - d.testenv.Log.Info("Deploying bus configuration", "name", name) +// DeployBus deploys the bus +func (d *Deployment) DeployBus(ctx context.Context, name string, bus enterpriseApi.BusSpec) (*enterpriseApi.Bus, error) { + d.testenv.Log.Info("Deploying bus", "name", name) - busCfg := newBusConfiguration(name, d.testenv.namespace, busConfig) + busCfg := newBus(name, d.testenv.namespace, bus) pdata, _ := json.Marshal(busCfg) - d.testenv.Log.Info("bus configuration spec", "cr", string(pdata)) + d.testenv.Log.Info("bus spec", "cr", string(pdata)) deployed, err := d.deployCR(ctx, name, busCfg) if err != nil { return nil, err } - return deployed.(*enterpriseApi.BusConfiguration), err + return deployed.(*enterpriseApi.Bus), err +} + +// DeployLargeMessageStore deploys the large message store +func (d *Deployment) DeployLargeMessageStore(ctx context.Context, name string, lms enterpriseApi.LargeMessageStoreSpec) (*enterpriseApi.LargeMessageStore, error) { + d.testenv.Log.Info("Deploying large message store", "name", name) + + lmsCfg := newLargeMessageStore(name, d.testenv.namespace, lms) + pdata, _ := json.Marshal(lmsCfg) + + d.testenv.Log.Info("large message store spec", "cr", string(pdata)) + deployed, err := d.deployCR(ctx, name, lmsCfg) + if err != nil { + return nil, err + } + + return deployed.(*enterpriseApi.LargeMessageStore), err } // DeployIngestorClusterWithAdditionalConfiguration deploys the ingestor cluster with additional configuration @@ -632,13 +648,22 @@ func (d *Deployment) UpdateCR(ctx context.Context, cr client.Object) error { ucr := cr.(*enterpriseApi.IngestorCluster) current.Spec = ucr.Spec cobject = current - case "BusConfiguration": - current := &enterpriseApi.BusConfiguration{} + case "Bus": + current := &enterpriseApi.Bus{} + err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) + if err != nil { + return err + } + ucr := cr.(*enterpriseApi.Bus) + current.Spec = ucr.Spec + cobject = current + case "LargeMessageStore": + current := &enterpriseApi.LargeMessageStore{} err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) if err != nil { return err } - ucr := cr.(*enterpriseApi.BusConfiguration) + ucr := cr.(*enterpriseApi.LargeMessageStore) current.Spec = ucr.Spec cobject = current case "ClusterMaster": @@ -740,7 +765,7 @@ func (d *Deployment) DeploySingleSiteCluster(ctx context.Context, name string, i } // Deploy the indexer cluster - _, err := d.DeployIndexerCluster(ctx, name+"-idxc", LicenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-idxc", LicenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -798,7 +823,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHead(ctx context.Cont multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -870,7 +895,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHead(ctx context.Context, n multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -931,7 +956,7 @@ func (d *Deployment) DeployMultisiteCluster(ctx context.Context, name string, in multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1067,7 +1092,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndIndexes(ctx context. multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1122,7 +1147,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHeadAndIndexes(ctx co multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, LicenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1227,7 +1252,7 @@ func (d *Deployment) DeploySingleSiteClusterWithGivenAppFrameworkSpec(ctx contex } // Deploy the indexer cluster - idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") + idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1305,7 +1330,7 @@ func (d *Deployment) DeploySingleSiteClusterMasterWithGivenAppFrameworkSpec(ctx } // Deploy the indexer cluster - idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, "") + idxc, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1405,7 +1430,7 @@ func (d *Deployment) DeployMultisiteClusterWithSearchHeadAndAppFramework(ctx con multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1509,7 +1534,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithSearchHeadAndAppFramework(c multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + idxc, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return cm, idxc, sh, err } @@ -1590,7 +1615,7 @@ func (d *Deployment) DeploySingleSiteClusterWithGivenMonitoringConsole(ctx conte } // Deploy the indexer cluster - _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, "") + _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseManager, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1662,7 +1687,7 @@ func (d *Deployment) DeploySingleSiteClusterMasterWithGivenMonitoringConsole(ctx } // Deploy the indexer cluster - _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, "") + _, err = d.DeployIndexerCluster(ctx, name+"-idxc", licenseMaster, indexerReplicas, name, "", corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1756,7 +1781,7 @@ func (d *Deployment) DeployMultisiteClusterWithMonitoringConsole(ctx context.Con multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-manager", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseManager, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } @@ -1856,7 +1881,7 @@ func (d *Deployment) DeployMultisiteClusterMasterWithMonitoringConsole(ctx conte multisite_master: splunk-%s-%s-service site: %s `, name, "cluster-master", siteName) - _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, "") + _, err := d.DeployIndexerCluster(ctx, name+"-"+siteName, licenseMaster, indexerReplicas, name, siteDefaults, corev1.ObjectReference{}, corev1.ObjectReference{}, "") if err != nil { return err } diff --git a/test/testenv/util.go b/test/testenv/util.go index b779ab3c3..28bd67a13 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -359,7 +359,7 @@ func newClusterMasterWithGivenIndexes(name, ns, licenseManagerName, ansibleConfi } // newIndexerCluster creates and initialize the CR for IndexerCluster Kind -func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, busConfig corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { +func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, bus, lms corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { licenseMasterRef, licenseManagerRef := swapLicenseManager(name, licenseManagerName) clusterMasterRef, clusterManagerRef := swapClusterManager(name, clusterManagerRef) @@ -396,8 +396,9 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste }, Defaults: ansibleConfig, }, - Replicas: int32(replicas), - BusConfigurationRef: busConfig, + Replicas: int32(replicas), + BusRef: bus, + LargeMessageStoreRef: lms, }, } @@ -405,7 +406,7 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste } // newIngestorCluster creates and initialize the CR for IngestorCluster Kind -func newIngestorCluster(name, ns string, replicas int, splunkImage string, busConfig corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { +func newIngestorCluster(name, ns string, replicas int, splunkImage string, bus, lms corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { return &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IngestorCluster", @@ -425,24 +426,38 @@ func newIngestorCluster(name, ns string, replicas int, splunkImage string, busCo Image: splunkImage, }, }, - Replicas: int32(replicas), - BusConfigurationRef: busConfig, + Replicas: int32(replicas), + BusRef: bus, + LargeMessageStoreRef: lms, }, } } -// newBusConfiguration creates and initializes the CR for BusConfiguration Kind -func newBusConfiguration(name, ns string, busConfig enterpriseApi.BusConfigurationSpec) *enterpriseApi.BusConfiguration { - return &enterpriseApi.BusConfiguration{ +// newBus creates and initializes the CR for Bus Kind +func newBus(name, ns string, bus enterpriseApi.BusSpec) *enterpriseApi.Bus { + return &enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ - Kind: "BusConfiguration", + Kind: "Bus", }, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: ns, }, - Spec: busConfig, + Spec: bus, + } +} +// newLargeMessageStore creates and initializes the CR for LargeMessageStore Kind +func newLargeMessageStore(name, ns string, lms enterpriseApi.LargeMessageStoreSpec) *enterpriseApi.LargeMessageStore { + return &enterpriseApi.LargeMessageStore{ + TypeMeta: metav1.TypeMeta{ + Kind: "LargeMessageStore", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + }, + Spec: lms, } } From cb8daf2a967460b238cad848093c7962bf731c6d Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 12 Dec 2025 13:00:28 +0100 Subject: [PATCH 02/25] CSPL-4358 Update docs --- docs/CustomResources.md | 69 +++++++++++ docs/IndexIngestionSeparation.md | 195 +++++++++++++++++++++++-------- 2 files changed, 214 insertions(+), 50 deletions(-) diff --git a/docs/CustomResources.md b/docs/CustomResources.md index 384153add..95ca6c1d9 100644 --- a/docs/CustomResources.md +++ b/docs/CustomResources.md @@ -18,9 +18,11 @@ you can use to manage Splunk Enterprise deployments in your Kubernetes cluster. - [LicenseManager Resource Spec Parameters](#licensemanager-resource-spec-parameters) - [Standalone Resource Spec Parameters](#standalone-resource-spec-parameters) - [SearchHeadCluster Resource Spec Parameters](#searchheadcluster-resource-spec-parameters) + - [Bus Resource Spec Parameters](#bus-resource-spec-parameters) - [ClusterManager Resource Spec Parameters](#clustermanager-resource-spec-parameters) - [IndexerCluster Resource Spec Parameters](#indexercluster-resource-spec-parameters) - [IngestorCluster Resource Spec Parameters](#ingestorcluster-resource-spec-parameters) + - [LargeMessageStore Resource Spec Parameters](#largemessagestore-resource-spec-parameters) - [MonitoringConsole Resource Spec Parameters](#monitoringconsole-resource-spec-parameters) - [Examples of Guaranteed and Burstable QoS](#examples-of-guaranteed-and-burstable-qos) - [A Guaranteed QoS Class example:](#a-guaranteed-qos-class-example) @@ -279,6 +281,41 @@ spec: cpu: "4" ``` +## Bus Resource Spec Parameters + +```yaml +apiVersion: enterprise.splunk.com/v4 +kind: Bus +metadata: + name: bus +spec: + replicas: 3 + provider: sqs + sqs: + name: sqs-test + region: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + dlq: sqs-dlq-test +``` + +Bus inputs can be found in the table below. As of now, only SQS provider of message bus is supported. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| provider | string | [Required] Provider of message bus (Allowed values: sqs) | +| sqs | SQS | [Required if provider=sqs] SQS message bus inputs | + +SQS message bus inputs can be found in the table below. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| name | string | [Required] Name of the queue | +| region | string | [Required] Region where the queue is located | +| endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint +| dlq | string | [Required] Name of the dead letter queue | + +Change of any of the bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. + ## ClusterManager Resource Spec Parameters ClusterManager resource does not have a required spec parameter, but to configure SmartStore, you can specify indexes and volume configuration as below - ```yaml @@ -353,6 +390,36 @@ the `IngestorCluster` resource provides the following `Spec` configuration param | ---------- | ------- | ----------------------------------------------------- | | replicas | integer | The number of ingestor peers (minimum of 3 which is the default) | +## LargeMessageStore Resource Spec Parameters + +```yaml +apiVersion: enterprise.splunk.com/v4 +kind: LargeMessageStore +metadata: + name: lms +spec: + provider: s3 + s3: + path: s3://ingestion/smartbus-test + endpoint: https://s3.us-west-2.amazonaws.com +``` + +LargeMessageStore inputs can be found in the table below. As of now, only S3 provider of large message store is supported. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| provider | string | [Required] Provider of large message store (Allowed values: s3) | +| s3 | S3 | [Required if provider=s3] S3 large message store inputs | + +S3 large message store inputs can be found in the table below. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | +| endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint + +Change of any of the large message bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. + ## MonitoringConsole Resource Spec Parameters ```yaml @@ -464,10 +531,12 @@ The Splunk Operator controller reconciles every Splunk Enterprise CR. However, t | Customer Resource Definition | Annotation | | ----------- | --------- | +| bus.enterprise.splunk.com | "bus.enterprise.splunk.com/paused" | | clustermaster.enterprise.splunk.com | "clustermaster.enterprise.splunk.com/paused" | | clustermanager.enterprise.splunk.com | "clustermanager.enterprise.splunk.com/paused" | | indexercluster.enterprise.splunk.com | "indexercluster.enterprise.splunk.com/paused" | | ingestorcluster.enterprise.splunk.com | "ingestorcluster.enterprise.splunk.com/paused" | +| largemessagestore.enterprise.splunk.com | "largemessagestore.enterprise.splunk.com/paused" | | licensemaster.enterprise.splunk.com | "licensemaster.enterprise.splunk.com/paused" | | monitoringconsole.enterprise.splunk.com | "monitoringconsole.enterprise.splunk.com/paused" | | searchheadcluster.enterprise.splunk.com | "searchheadcluster.enterprise.splunk.com/paused" | diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index 3b151cc4d..e8c6211d7 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -18,29 +18,27 @@ This separation enables: # Bus -Bus is introduced to store message bus to be shared among IngestorCluster and IndexerCluster. +Bus is introduced to store message bus information to be shared among IngestorCluster and IndexerCluster. ## Spec -Bus inputs can be found in the table below. As of now, only SQS type of message bus is supported. +Bus inputs can be found in the table below. As of now, only SQS provider of message bus is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| type | string | Type of message bus (Only sqs_smartbus as of now) | -| sqs | SQS | SQS message bus inputs | +| provider | string | [Required] Provider of message bus (Allowed values: sqs) | +| sqs | SQS | [Required if provider=sqs] SQS message bus inputs | SQS message bus inputs can be found in the table below. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| queueName | string | Name of the SQS queue | -| authRegion | string | Region where the SQS queue is located | -| endpoint | string | AWS SQS endpoint -| largeMessageStoreEndpoint | string | AWS S3 Large Message Store endpoint | -| largeMessageStorePath | string | S3 path for Large Message Store | -| deadLetterQueueName | string | Name of the SQS dead letter queue | +| name | string | [Required] Name of the queue | +| region | string | [Required] Region where the queue is located | +| endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint +| dlq | string | [Required] Name of the dead letter queue | -Change of any of the bus inputs does not restart Splunk. It just updates the config values with no disruptions. +Change of any of the bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. ## Example ``` @@ -49,14 +47,47 @@ kind: Bus metadata: name: bus spec: - type: sqs_smartbus + provider: sqs sqs: - queueName: sqs-test - authRegion: us-west-2 + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test +``` + +# LargeMessageStore + +LargeMessageStore is introduced to store large message (messages that exceed the size of messages that can be stored in SQS) store information to be shared among IngestorCluster and IndexerCluster. + +## Spec + +LargeMessageStore inputs can be found in the table below. As of now, only S3 provider of large message store is supported. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| provider | string | [Required] Provider of large message store (Allowed values: s3) | +| s3 | S3 | [Required if provider=s3] S3 large message store inputs | + +S3 large message store inputs can be found in the table below. + +| Key | Type | Description | +| ---------- | ------- | ------------------------------------------------- | +| path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | +| endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint + +Change of any of the large message bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. + +## Example +``` +apiVersion: enterprise.splunk.com/v4 +kind: LargeMessageStore +metadata: + name: lms +spec: + provider: s3 + s3: + path: s3://ingestion/smartbus-test + endpoint: https://s3.us-west-2.amazonaws.com ``` # IngestorCluster @@ -75,7 +106,7 @@ In addition to common spec inputs, the IngestorCluster resource provides the fol ## Example -The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Push Bus reference allows the user to specify queue and bucket settings for the ingestion process. +The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Bus and LargeMessageStore references allow the user to specify queue and bucket settings for the ingestion process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. @@ -112,7 +143,7 @@ In addition to common spec inputs, the IndexerCluster resource provides the foll ## Example -The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Pull Bus reference allows the user to specify queue and bucket settings for the indexing process. +The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Bus and LargeMessageStore references allow the user to specify queue and bucket settings for the indexing process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. @@ -151,24 +182,32 @@ Common spec values for all SOK Custom Resources can be found in [CustomResources # Helm Charts -An IngestorCluster template has been added to the splunk/splunk-enterprise Helm chart. The IndexerCluster template has also been enhanced to support new inputs. +Bus, LargeMessageStore and IngestorCluster have been added to the splunk/splunk-enterprise Helm chart. IndexerCluster has also been enhanced to support new inputs. ## Example -Below examples describe how to define values for Bus, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. +Below examples describe how to define values for Bus, LargeMessageStoe, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. ``` bus: enabled: true name: bus - type: sqs_smartbus + provider: sqs sqs: - queueName: sqs-test - authRegion: us-west-2 + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test +``` + +``` +largeMessageStore: + enabled: true + name: lms + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test ``` ``` @@ -513,14 +552,12 @@ metadata: finalizers: - enterprise.splunk.com/delete-pvc spec: - type: sqs_smartbus + provider: sqs sqs: - queueName: sqs-test - authRegion: us-west-2 + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - largeMessageStoreEndpoint: https://s3.us-west-2.amazonaws.com - largeMessageStorePath: s3://ingestion/smartbus-test - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test ``` ``` @@ -550,13 +587,11 @@ Metadata: UID: 12345678-1234-5678-1234-012345678911 Spec: Sqs: - Auth Region: us-west-2 - Dead Letter Queue Name: sqs-dlq-test + Region: us-west-2 + DLQ: sqs-dlq-test Endpoint: https://sqs.us-west-2.amazonaws.com - Large Message Store Endpoint: https://s3.us-west-2.amazonaws.com - Large Message Store Path: s3://ingestion/smartbus-test - Queue Name: sqs-test - Type: sqs_smartbus + Name: sqs-test + Provider: sqs Status: Message: Phase: Ready @@ -564,7 +599,61 @@ Status: Events: ``` -4. Install IngestorCluster resource. +4. Install LargeMessageStore resource. + +``` +$ cat lms.yaml +apiVersion: enterprise.splunk.com/v4 +kind: LargeMessageStore +metadata: + name: lms + finalizers: + - enterprise.splunk.com/delete-pvc +spec: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test +``` + +``` +$ kubectl apply -f lms.yaml +``` + +``` +$ kubectl get lms +NAME PHASE AGE MESSAGE +lms Ready 20s +``` + +``` +kubectl describe lms +Name: lms +Namespace: default +Labels: +Annotations: +API Version: enterprise.splunk.com/v4 +Kind: LargeMessageStore +Metadata: + Creation Timestamp: 2025-10-27T10:25:53Z + Finalizers: + enterprise.splunk.com/delete-pvc + Generation: 1 + Resource Version: 12345678 + UID: 12345678-1234-5678-1234-012345678911 +Spec: + S3: + Endpoint: https://s3.us-west-2.amazonaws.com + Path: s3://ingestion/smartbus-test + Provider: s3 +Status: + Message: + Phase: Ready + Resource Rev Map: +Events: +``` + +5. Install IngestorCluster resource. ``` $ cat ingestor.yaml @@ -614,6 +703,9 @@ Spec: Name: bus Namespace: default Image: splunk/splunk:${SPLUNK_IMAGE_VERSION} + Large Message Store Ref: + Name: lms + Namespace: default Replicas: 3 Service Account: ingestor-sa Status: @@ -630,13 +722,16 @@ Status: Version: 0 Bus: Sqs: - Auth Region: us-west-2 - Dead Letter Queue Name: sqs-dlq-test - Endpoint: https://sqs.us-west-2.amazonaws.com - Large Message Store Endpoint: https://s3.us-west-2.amazonaws.com - Large Message Store Path: s3://ingestion/smartbus-test - Queue Name: sqs-test - Type: sqs_smartbus + Region: us-west-2 + DLQ: sqs-dlq-test + Endpoint: https://sqs.us-west-2.amazonaws.com + Name: sqs-test + Provider: sqs + Large Message Store: + S3: + Endpoint: https://s3.us-west-2.amazonaws.com + Path: s3://ingestion/smartbus-test + Provider: s3 Message: Phase: Ready Ready Replicas: 3 @@ -690,7 +785,7 @@ remote_queue.sqs_smartbus.send_interval = 5s remote_queue.type = sqs_smartbus ``` -5. Install IndexerCluster resource. +6. Install IndexerCluster resource. ``` $ cat idxc.yaml @@ -791,7 +886,7 @@ disabled = false disabled = true ``` -6. Install Horizontal Pod Autoscaler for IngestorCluster. +7. Install Horizontal Pod Autoscaler for IngestorCluster. ``` $ cat hpa-ing.yaml @@ -874,7 +969,7 @@ NAME REFERENCE TARGETS MINPODS MAXPODS REPLICA ing-hpa IngestorCluster/ingestor cpu: 115%/50% 3 10 10 8m54s ``` -7. Generate fake load. +8. Generate fake load. - HEC_TOKEN: HEC token for making fake calls From 61c0387ce7de0b81859c24eb32a4b96b3ee029f4 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 12 Dec 2025 13:33:11 +0100 Subject: [PATCH 03/25] CSPL-4358 Addressing comments --- api/v4/bus_types.go | 12 +++--- .../bases/enterprise.splunk.com_buses.yaml | 20 +++++----- ...enterprise.splunk.com_indexerclusters.yaml | 20 +++++----- ...nterprise.splunk.com_ingestorclusters.yaml | 20 +++++----- .../templates/enterprise_v4_buses.yaml | 8 +++- internal/controller/bus_controller_test.go | 18 ++++----- .../ingestorcluster_controller_test.go | 12 +++--- .../01-assert.yaml | 18 ++++----- .../02-assert.yaml | 6 +-- .../splunk_index_ingest_sep.yaml | 6 +-- pkg/splunk/enterprise/bus_test.go | 6 +-- pkg/splunk/enterprise/indexercluster.go | 14 +++---- pkg/splunk/enterprise/indexercluster_test.go | 38 +++++++++---------- pkg/splunk/enterprise/ingestorcluster.go | 10 ++--- pkg/splunk/enterprise/ingestorcluster_test.go | 34 ++++++++--------- ...dex_and_ingestion_separation_suite_test.go | 12 +++--- 16 files changed, 129 insertions(+), 125 deletions(-) diff --git a/api/v4/bus_types.go b/api/v4/bus_types.go index 10958f56b..a4930c1fa 100644 --- a/api/v4/bus_types.go +++ b/api/v4/bus_types.go @@ -36,21 +36,21 @@ type BusSpec struct { // Provider of queue resources Provider string `json:"provider"` + // sqs specific inputs + SQS SQSSpec `json:"sqs"` +} + +type SQSSpec struct { // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // Name of the queue - QueueName string `json:"queueName"` + Name string `json:"name"` // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$` // Region of the resources Region string `json:"region"` - // sqs specific inputs - SQS SQSSpec `json:"sqs"` -} - -type SQSSpec struct { // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 // Name of the dead letter queue resource diff --git a/config/crd/bases/enterprise.splunk.com_buses.yaml b/config/crd/bases/enterprise.splunk.com_buses.yaml index 6a98483a5..6f4f8fac8 100644 --- a/config/crd/bases/enterprise.splunk.com_buses.yaml +++ b/config/crd/bases/enterprise.splunk.com_buses.yaml @@ -59,14 +59,6 @@ spec: enum: - sqs type: string - queueName: - description: Name of the queue - minLength: 1 - type: string - region: - description: Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string sqs: description: sqs specific inputs properties: @@ -78,13 +70,21 @@ spec: description: Amazon SQS Service endpoint pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ type: string + name: + description: Name of the queue + minLength: 1 + type: string + region: + description: Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string required: - dlq + - name + - region type: object required: - provider - - queueName - - region type: object x-kubernetes-validations: - message: sqs must be provided when provider is sqs diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 3563c678f..c9c19edfb 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8345,14 +8345,6 @@ spec: enum: - sqs type: string - queueName: - description: Name of the queue - minLength: 1 - type: string - region: - description: Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string sqs: description: sqs specific inputs properties: @@ -8364,13 +8356,21 @@ spec: description: Amazon SQS Service endpoint pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ type: string + name: + description: Name of the queue + minLength: 1 + type: string + region: + description: Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string required: - dlq + - name + - region type: object required: - provider - - queueName - - region type: object x-kubernetes-validations: - message: sqs must be provided when provider is sqs diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 8ada99079..bdd6fb096 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -4596,14 +4596,6 @@ spec: enum: - sqs type: string - queueName: - description: Name of the queue - minLength: 1 - type: string - region: - description: Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string sqs: description: sqs specific inputs properties: @@ -4615,13 +4607,21 @@ spec: description: Amazon SQS Service endpoint pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ type: string + name: + description: Name of the queue + minLength: 1 + type: string + region: + description: Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string required: - dlq + - name + - region type: object required: - provider - - queueName - - region type: object x-kubernetes-validations: - message: sqs must be provided when provider is sqs diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml index ce1c1e7a9..bbf162332 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml @@ -15,8 +15,6 @@ metadata: {{- end }} spec: provider: {{ .Values.bus.provider | quote }} - queueName: {{ .Values.bus.queueName | quote }} - region: {{ .Values.bus.region | quote }} {{- with .Values.bus.sqs }} sqs: {{- if .endpoint }} @@ -25,6 +23,12 @@ spec: {{- if .dlq }} dlq: {{ .dlq | quote }} {{- end }} + {{- if .name }} + name: {{ .name | quote }} + {{- end }} + {{- if .region }} + region: {{ .region | quote }} + {{- end }} {{- end }} {{- end }} {{- end }} \ No newline at end of file diff --git a/internal/controller/bus_controller_test.go b/internal/controller/bus_controller_test.go index 300af1879..c45c66420 100644 --- a/internal/controller/bus_controller_test.go +++ b/internal/controller/bus_controller_test.go @@ -72,10 +72,10 @@ var _ = Describe("Bus Controller", func() { spec := enterpriseApi.BusSpec{ Provider: "sqs", - QueueName: "smartbus-queue", - Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - DLQ: "smartbus-dlq", + Name: "smartbus-queue", + Region: "us-west-2", + DLQ: "smartbus-dlq", Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } @@ -101,10 +101,10 @@ var _ = Describe("Bus Controller", func() { annotations := make(map[string]string) spec := enterpriseApi.BusSpec{ Provider: "sqs", - QueueName: "smartbus-queue", - Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - DLQ: "smartbus-dlq", + Name: "smartbus-queue", + Region: "us-west-2", + DLQ: "smartbus-dlq", Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } @@ -140,10 +140,10 @@ var _ = Describe("Bus Controller", func() { spec := enterpriseApi.BusSpec{ Provider: "sqs", - QueueName: "smartbus-queue", - Region: "us-west-2", SQS: enterpriseApi.SQSSpec{ - DLQ: "smartbus-dlq", + Name: "smartbus-queue", + Region: "us-west-2", + DLQ: "smartbus-dlq", Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go index 811ca930a..053195d44 100644 --- a/internal/controller/ingestorcluster_controller_test.go +++ b/internal/controller/ingestorcluster_controller_test.go @@ -77,10 +77,10 @@ var _ = Describe("IngestorCluster Controller", func() { Namespace: nsSpecs.Name, }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "smartbus-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "smartbus-queue", + Region: "us-west-2", DLQ: "smartbus-dlq", Endpoint: "https://sqs.us-west-2.amazonaws.com", }, @@ -125,10 +125,10 @@ var _ = Describe("IngestorCluster Controller", func() { Namespace: nsSpecs.Name, }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "smartbus-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "smartbus-queue", + Region: "us-west-2", DLQ: "smartbus-dlq", Endpoint: "https://sqs.us-west-2.amazonaws.com", }, diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml index 001a78ee4..f34dd2e6c 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml @@ -6,11 +6,11 @@ metadata: name: bus spec: provider: sqs - queueName: sqs-test - region: us-west-2 sqs: + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test status: phase: Ready @@ -67,11 +67,11 @@ status: phase: Ready bus: provider: sqs - queueName: sqs-test - region: us-west-2 sqs: + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test largeMessageStore: provider: s3 s3: @@ -108,11 +108,11 @@ status: phase: Ready bus: provider: sqs - queueName: sqs-test - region: us-west-2 sqs: + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test largeMessageStore: provider: s3 s3: diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index 86a2df8a8..291eddeba 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -12,11 +12,11 @@ status: phase: Ready bus: provider: sqs - queueName: sqs-test - region: us-west-2 sqs: + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test largeMessageStore: provider: s3 s3: diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index d832c5253..a73c51ac2 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -9,11 +9,11 @@ bus: enabled: true name: bus provider: sqs - queueName: sqs-test - region: us-west-2 sqs: + name: sqs-test + region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - deadLetterQueueName: sqs-dlq-test + dlq: sqs-dlq-test largeMessageStore: enabled: true diff --git a/pkg/splunk/enterprise/bus_test.go b/pkg/splunk/enterprise/bus_test.go index ac8ce8a8e..6e5bf1aa7 100644 --- a/pkg/splunk/enterprise/bus_test.go +++ b/pkg/splunk/enterprise/bus_test.go @@ -49,10 +49,10 @@ func TestApplyBus(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 7b8009cdd..e71a19efd 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -1285,12 +1285,12 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne splunkClient := newSplunkClientForBusPipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) afterDelete := false - if (bus.Spec.QueueName != "" && newCR.Status.Bus.QueueName != "" && bus.Spec.QueueName != newCR.Status.Bus.QueueName) || + if (bus.Spec.SQS.Name != "" && newCR.Status.Bus.SQS.Name != "" && bus.Spec.SQS.Name != newCR.Status.Bus.SQS.Name) || (bus.Spec.Provider != "" && newCR.Status.Bus.Provider != "" && bus.Spec.Provider != newCR.Status.Bus.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.QueueName)); err != nil { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.SQS.Name)); err != nil { updateErr = err } - if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.QueueName)); err != nil { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.SQS.Name)); err != nil { updateErr = err } afterDelete = true @@ -1299,13 +1299,13 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, afterDelete) for _, pbVal := range busChangedFieldsOutputs { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName), [][]string{pbVal}); err != nil { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } for _, pbVal := range busChangedFieldsInputs { - if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName), [][]string{pbVal}); err != nil { + if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } @@ -1368,8 +1368,8 @@ func pullBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enter if oldBus.Provider != newBus.Provider || afterDelete { inputs = append(inputs, []string{"remote_queue.type", busProvider}) } - if oldBus.Region != newBus.Region || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.Region}) + if oldBus.SQS.Region != newBus.SQS.Region || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.SQS.Region}) } if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", busProvider), newBus.SQS.Endpoint}) diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 9df4b2f75..ff10e453d 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -1353,10 +1353,10 @@ func TestGetIndexerStatefulSet(t *testing.T) { Name: "bus", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, @@ -2057,10 +2057,10 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { Name: "bus", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, @@ -2099,7 +2099,7 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { assert.Equal(t, 8, len(busChangedFieldsInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, @@ -2111,7 +2111,7 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { assert.Equal(t, 10, len(busChangedFieldsOutputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, @@ -2146,10 +2146,10 @@ func TestHandlePullBusChange(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, @@ -2192,8 +2192,8 @@ func TestHandlePullBusChange(t *testing.T) { }, }, Status: enterpriseApi.IndexerClusterStatus{ - ReadyReplicas: 3, - Bus: &enterpriseApi.BusSpec{}, + ReadyReplicas: 3, + Bus: &enterpriseApi.BusSpec{}, LargeMessageStore: &enterpriseApi.LargeMessageStoreSpec{}, }, } @@ -2276,7 +2276,7 @@ func TestHandlePullBusChange(t *testing.T) { // outputs.conf propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, @@ -2359,11 +2359,11 @@ func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } @@ -2405,10 +2405,10 @@ func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 6ca721b6a..9e6c6ce17 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -346,9 +346,9 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) afterDelete := false - if (bus.Spec.QueueName != "" && newCR.Status.Bus.QueueName != "" && bus.Spec.QueueName != newCR.Status.Bus.QueueName) || + if (bus.Spec.SQS.Name != "" && newCR.Status.Bus.SQS.Name != "" && bus.Spec.SQS.Name != newCR.Status.Bus.SQS.Name) || (bus.Spec.Provider != "" && newCR.Status.Bus.Provider != "" && bus.Spec.Provider != newCR.Status.Bus.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.QueueName)); err != nil { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.SQS.Name)); err != nil { updateErr = err } afterDelete = true @@ -357,7 +357,7 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, afterDelete) for _, pbVal := range busChangedFields { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName), [][]string{pbVal}); err != nil { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } @@ -440,8 +440,8 @@ func pushBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enter if oldBus.Provider != newBus.Provider || afterDelete { output = append(output, []string{"remote_queue.type", busProvider}) } - if oldBus.Region != newBus.Region || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.Region}) + if oldBus.SQS.Region != newBus.SQS.Region || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.SQS.Region}) } if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", busProvider), newBus.SQS.Endpoint}) diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index d7a1604cd..75cc14ec5 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -75,10 +75,10 @@ func TestApplyIngestorCluster(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, @@ -285,7 +285,7 @@ func TestApplyIngestorCluster(t *testing.T) { propertyKVList := [][]string{ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, @@ -342,10 +342,10 @@ func TestGetIngestorStatefulSet(t *testing.T) { Name: "bus", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, @@ -428,10 +428,10 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { Name: "bus", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, @@ -472,7 +472,7 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { assert.Equal(t, 10, len(busChangedFields)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, @@ -507,10 +507,10 @@ func TestHandlePushBusChange(t *testing.T) { Name: "bus", }, Spec: enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", }, @@ -635,7 +635,7 @@ func TestHandlePushBusChange(t *testing.T) { // outputs.conf propertyKVList := [][]string{ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, @@ -692,11 +692,11 @@ func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, c podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", bus.Spec.QueueName)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index c99112617..711580d99 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -40,10 +40,10 @@ var ( testSuiteName = "indingsep-" + testenv.RandomDNSName(3) bus = enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "test-dead-letter-queue", }, @@ -86,10 +86,10 @@ var ( } updateBus = enterpriseApi.BusSpec{ - Provider: "sqs", - QueueName: "test-queue-updated", - Region: "us-west-2", + Provider: "sqs", SQS: enterpriseApi.SQSSpec{ + Name: "test-queue-updated", + Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "test-dead-letter-queue-updated", }, From 3eb98f747eca7c6e5475f53ff1e4e5c0172a7c4f Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Mon, 15 Dec 2025 10:47:47 +0100 Subject: [PATCH 04/25] CSPL-4358 Adding more validations --- api/v4/bus_types.go | 1 + api/v4/indexercluster_types.go | 1 + api/v4/ingestorcluster_types.go | 2 ++ api/v4/largemessagestore.go | 1 + .../bases/enterprise.splunk.com_buses.yaml | 1 + ...enterprise.splunk.com_indexerclusters.yaml | 6 ++++ ...nterprise.splunk.com_ingestorclusters.yaml | 5 +++ ...erprise.splunk.com_largemessagestores.yaml | 1 + pkg/splunk/enterprise/indexercluster.go | 36 +++++++++++++++++-- pkg/splunk/enterprise/ingestorcluster.go | 20 +++++++++-- 10 files changed, 70 insertions(+), 4 deletions(-) diff --git a/api/v4/bus_types.go b/api/v4/bus_types.go index a4930c1fa..4d9cd3a42 100644 --- a/api/v4/bus_types.go +++ b/api/v4/bus_types.go @@ -36,6 +36,7 @@ type BusSpec struct { // Provider of queue resources Provider string `json:"provider"` + // +kubebuilder:validation:Required // sqs specific inputs SQS SQSSpec `json:"sqs"` } diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 0ec425240..1f096ccdd 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -34,6 +34,7 @@ const ( IndexerClusterPausedAnnotation = "indexercluster.enterprise.splunk.com/paused" ) +// +kubebuilder:validation:XValidation:rule="has(self.busRef) == has(self.largeMessageStoreRef)",message="busRef and largeMessageStoreRef must both be set or both be empty" // IndexerClusterSpec defines the desired state of a Splunk Enterprise indexer cluster type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index 27fa5d1e0..811f780a4 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -39,9 +39,11 @@ type IngestorClusterSpec struct { // Splunk Enterprise app repository that specifies remote app location and scope for Splunk app management AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` + // +kubebuilder:validation:Required // Bus reference BusRef corev1.ObjectReference `json:"busRef"` + // +kubebuilder:validation:Required // Large Message Store reference LargeMessageStoreRef corev1.ObjectReference `json:"largeMessageStoreRef"` } diff --git a/api/v4/largemessagestore.go b/api/v4/largemessagestore.go index 3e9f4b62b..26c986f2d 100644 --- a/api/v4/largemessagestore.go +++ b/api/v4/largemessagestore.go @@ -36,6 +36,7 @@ type LargeMessageStoreSpec struct { // Provider of queue resources Provider string `json:"provider"` + // +kubebuilder:validation:Required // s3 specific inputs S3 S3Spec `json:"s3"` } diff --git a/config/crd/bases/enterprise.splunk.com_buses.yaml b/config/crd/bases/enterprise.splunk.com_buses.yaml index 6f4f8fac8..54d498834 100644 --- a/config/crd/bases/enterprise.splunk.com_buses.yaml +++ b/config/crd/bases/enterprise.splunk.com_buses.yaml @@ -85,6 +85,7 @@ spec: type: object required: - provider + - sqs type: object x-kubernetes-validations: - message: sqs must be provided when provider is sqs diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index c9c19edfb..67e1021f6 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8328,6 +8328,10 @@ spec: type: object type: array type: object + x-kubernetes-validations: + - message: busRef and largeMessageStoreRef must both be set or both be + empty + rule: has(self.busRef) == has(self.largeMessageStoreRef) status: description: IndexerClusterStatus defines the observed state of a Splunk Enterprise indexer cluster @@ -8371,6 +8375,7 @@ spec: type: object required: - provider + - sqs type: object x-kubernetes-validations: - message: sqs must be provided when provider is sqs @@ -8433,6 +8438,7 @@ spec: type: object required: - provider + - s3 type: object x-kubernetes-validations: - message: s3 must be provided when provider is s3 diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index bdd6fb096..4ecaa8d32 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -4302,6 +4302,9 @@ spec: - name type: object type: array + required: + - busRef + - largeMessageStoreRef type: object status: description: IngestorClusterStatus defines the observed state of Ingestor @@ -4622,6 +4625,7 @@ spec: type: object required: - provider + - sqs type: object x-kubernetes-validations: - message: sqs must be provided when provider is sqs @@ -4650,6 +4654,7 @@ spec: type: object required: - provider + - s3 type: object x-kubernetes-validations: - message: s3 must be provided when provider is s3 diff --git a/config/crd/bases/enterprise.splunk.com_largemessagestores.yaml b/config/crd/bases/enterprise.splunk.com_largemessagestores.yaml index 20cd26906..562cd773c 100644 --- a/config/crd/bases/enterprise.splunk.com_largemessagestores.yaml +++ b/config/crd/bases/enterprise.splunk.com_largemessagestores.yaml @@ -75,6 +75,7 @@ spec: type: object required: - provider + - s3 type: object x-kubernetes-validations: - message: s3 must be provided when provider is s3 diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index e71a19efd..2170e914a 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -261,6 +261,14 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } } + // Can not override original bus spec due to comparison in the later code + busCopy := bus + if busCopy.Spec.Provider == "sqs" { + if busCopy.Spec.SQS.Endpoint == "" { + busCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", busCopy.Spec.SQS.Region) + } + } + // Large Message Store lms := enterpriseApi.LargeMessageStore{} if cr.Spec.LargeMessageStoreRef.Name != "" { @@ -277,12 +285,20 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } } + // Can not override original large message store spec due to comparison in the later code + lmsCopy := lms + if lmsCopy.Spec.Provider == "s3" { + if lmsCopy.Spec.S3.Endpoint == "" { + lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", busCopy.Spec.SQS.Region) + } + } + // If bus is updated if cr.Spec.BusRef.Name != "" { if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, bus, lms, client) + err = mgr.handlePullBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") @@ -568,6 +584,14 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } } + // Can not override original bus spec due to comparison in the later code + busCopy := bus + if busCopy.Spec.Provider == "sqs" { + if busCopy.Spec.SQS.Endpoint == "" { + busCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", busCopy.Spec.SQS.Region) + } + } + // Large Message Store lms := enterpriseApi.LargeMessageStore{} if cr.Spec.LargeMessageStoreRef.Name != "" { @@ -584,12 +608,20 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } } + // Can not override original bus spec due to comparison in the later code + lmsCopy := lms + if lmsCopy.Spec.Provider == "s3" { + if lmsCopy.Spec.S3.Endpoint == "" { + lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", busCopy.Spec.SQS.Region) + } + } + // If bus is updated if cr.Spec.BusRef.Name != "" { if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, bus, lms, client) + err = mgr.handlePullBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 9e6c6ce17..524f183b5 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -226,6 +226,14 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } } + // Can not override original bus spec due to comparison in the later code + busCopy := bus + if busCopy.Spec.Provider == "sqs" { + if busCopy.Spec.SQS.Endpoint == "" { + busCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", busCopy.Spec.SQS.Region) + } + } + // Large Message Store lms := enterpriseApi.LargeMessageStore{} if cr.Spec.LargeMessageStoreRef.Name != "" { @@ -242,11 +250,19 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } } + // Can not override original bus spec due to comparison in the later code + lmsCopy := lms + if lmsCopy.Spec.Provider == "s3" { + if lmsCopy.Spec.S3.Endpoint == "" { + lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", bus.Spec.SQS.Region) + } + } + // If bus is updated if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePushBusChange(ctx, cr, bus, lms, client) + err = mgr.handlePushBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") @@ -377,7 +393,7 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n func getChangedBusFieldsForIngestor(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (busChangedFields, pipelineChangedFields [][]string) { oldPB := busIngestorStatus.Status.Bus if oldPB == nil { - oldPB = &enterpriseApi.BusSpec{} + oldPB = &enterpriseApi.BusSpec{} } newPB := &bus.Spec From 254cbf045eb625aaa7108742eef8935dee17860e Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Tue, 16 Dec 2025 11:05:26 +0100 Subject: [PATCH 05/25] CSPL-4360 Secret reference added for Bus CR --- api/v4/bus_types.go | 4 + api/v4/zz_generated.deepcopy.go | 13 +- .../bases/enterprise.splunk.com_buses.yaml | 33 +++++ ...enterprise.splunk.com_indexerclusters.yaml | 34 +++++ ...nterprise.splunk.com_ingestorclusters.yaml | 34 +++++ .../templates/enterprise_v4_buses.yaml | 4 + .../01-assert.yaml | 133 +----------------- .../01-create-se-secret.yaml | 7 + .../02-assert.yaml | 111 ++++++++++++++- ...stall-setup.yaml => 02-install-setup.yaml} | 0 .../03-assert.yaml | 33 +++++ ...ingestor.yaml => 03-scaleup-ingestor.yaml} | 0 ...all-setup.yaml => 04-uninstall-setup.yaml} | 0 .../splunk_index_ingest_sep.yaml | 3 + pkg/splunk/enterprise/indexercluster.go | 32 +++-- pkg/splunk/enterprise/indexercluster_test.go | 15 +- pkg/splunk/enterprise/ingestorcluster.go | 29 +++- pkg/splunk/enterprise/ingestorcluster_test.go | 11 +- pkg/splunk/enterprise/util.go | 19 +++ .../index_and_ingestion_separation_test.go | 20 +++ test/testenv/remote_index_utils.go | 8 ++ 21 files changed, 384 insertions(+), 159 deletions(-) create mode 100644 kuttl/tests/helm/index-and-ingest-separation/01-create-se-secret.yaml rename kuttl/tests/helm/index-and-ingest-separation/{01-install-setup.yaml => 02-install-setup.yaml} (100%) create mode 100644 kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml rename kuttl/tests/helm/index-and-ingest-separation/{02-scaleup-ingestor.yaml => 03-scaleup-ingestor.yaml} (100%) rename kuttl/tests/helm/index-and-ingest-separation/{03-uninstall-setup.yaml => 04-uninstall-setup.yaml} (100%) diff --git a/api/v4/bus_types.go b/api/v4/bus_types.go index 4d9cd3a42..a45be59d6 100644 --- a/api/v4/bus_types.go +++ b/api/v4/bus_types.go @@ -61,6 +61,10 @@ type SQSSpec struct { // +kubebuilder:validation:Pattern=`^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$` // Amazon SQS Service endpoint Endpoint string `json:"endpoint"` + + // +optional + // List of remote storage volumes + VolList []VolumeSpec `json:"volumes,omitempty"` } // BusStatus defines the observed state of Bus diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index dc19b7f10..eb142f146 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -185,7 +185,7 @@ func (in *Bus) DeepCopyInto(out *Bus) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } @@ -234,7 +234,7 @@ func (in *BusList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *BusSpec) DeepCopyInto(out *BusSpec) { *out = *in - out.SQS = in.SQS + in.SQS.DeepCopyInto(&out.SQS) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusSpec. @@ -637,7 +637,7 @@ func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { if in.Bus != nil { in, out := &in.Bus, &out.Bus *out = new(BusSpec) - **out = **in + (*in).DeepCopyInto(*out) } if in.LargeMessageStore != nil { in, out := &in.LargeMessageStore, &out.LargeMessageStore @@ -740,7 +740,7 @@ func (in *IngestorClusterStatus) DeepCopyInto(out *IngestorClusterStatus) { if in.Bus != nil { in, out := &in.Bus, &out.Bus *out = new(BusSpec) - **out = **in + (*in).DeepCopyInto(*out) } if in.LargeMessageStore != nil { in, out := &in.LargeMessageStore, &out.LargeMessageStore @@ -1104,6 +1104,11 @@ func (in *S3Spec) DeepCopy() *S3Spec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SQSSpec) DeepCopyInto(out *SQSSpec) { *out = *in + if in.VolList != nil { + in, out := &in.VolList, &out.VolList + *out = make([]VolumeSpec, len(*in)) + copy(*out, *in) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SQSSpec. diff --git a/config/crd/bases/enterprise.splunk.com_buses.yaml b/config/crd/bases/enterprise.splunk.com_buses.yaml index 54d498834..db62f351c 100644 --- a/config/crd/bases/enterprise.splunk.com_buses.yaml +++ b/config/crd/bases/enterprise.splunk.com_buses.yaml @@ -78,6 +78,39 @@ spec: description: Region of the resources pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ type: string + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio, azure, gcp.' + type: string + region: + description: Region of the remote storage volume where apps + reside. Used for aws, if provided. Not used for minio + and azure. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: s3, + blob, gcs. s3 works with aws or minio providers, whereas + blob works with azure provider, gcs works for gcp.' + type: string + type: object + type: array required: - dlq - name diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 67e1021f6..3389a98d5 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8368,6 +8368,40 @@ spec: description: Region of the resources pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ type: string + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio, azure, gcp.' + type: string + region: + description: Region of the remote storage volume where + apps reside. Used for aws, if provided. Not used for + minio and azure. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: + s3, blob, gcs. s3 works with aws or minio providers, + whereas blob works with azure provider, gcs works + for gcp.' + type: string + type: object + type: array required: - dlq - name diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 4ecaa8d32..5b065baa5 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -4618,6 +4618,40 @@ spec: description: Region of the resources pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ type: string + volumes: + description: List of remote storage volumes + items: + description: VolumeSpec defines remote volume config + properties: + endpoint: + description: Remote volume URI + type: string + name: + description: Remote volume name + type: string + path: + description: Remote volume path + type: string + provider: + description: 'App Package Remote Store provider. Supported + values: aws, minio, azure, gcp.' + type: string + region: + description: Region of the remote storage volume where + apps reside. Used for aws, if provided. Not used for + minio and azure. + type: string + secretRef: + description: Secret object name + type: string + storageType: + description: 'Remote Storage type. Supported values: + s3, blob, gcs. s3 works with aws or minio providers, + whereas blob works with azure provider, gcs works + for gcp.' + type: string + type: object + type: array required: - dlq - name diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml index bbf162332..e5b881717 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml @@ -29,6 +29,10 @@ spec: {{- if .region }} region: {{ .region | quote }} {{- end }} + {{- if .volumes }} + volumes: + {{ toYaml . | indent 4 }} + {{- end }} {{- end }} {{- end }} {{- end }} \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml index f34dd2e6c..1a4e4a60a 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml @@ -1,136 +1,5 @@ --- -# assert for bus custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: Bus -metadata: - name: bus -spec: - provider: sqs - sqs: - name: sqs-test - region: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test -status: - phase: Ready - ---- -# assert for large message store custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: LargeMessageStore -metadata: - name: lms -spec: - provider: s3 - s3: - endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test -status: - phase: Ready - ---- -# assert for cluster manager custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: ClusterManager -metadata: - name: cm -status: - phase: Ready - ---- -# check if stateful sets are created -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-cm-cluster-manager -status: - replicas: 1 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-cm-cluster-manager-secret-v1 - ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IndexerCluster -metadata: - name: indexer -spec: - replicas: 3 - busRef: - name: bus -status: - phase: Ready - bus: - provider: sqs - sqs: - name: sqs-test - region: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test - largeMessageStore: - provider: s3 - s3: - endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-indexer-indexer -status: - replicas: 3 - ---- -# check if secret object are created -apiVersion: v1 -kind: Secret -metadata: - name: splunk-indexer-indexer-secret-v1 - ---- -# assert for indexer cluster custom resource to be ready -apiVersion: enterprise.splunk.com/v4 -kind: IngestorCluster -metadata: - name: ingestor -spec: - replicas: 3 - busRef: - name: bus -status: - phase: Ready - bus: - provider: sqs - sqs: - name: sqs-test - region: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test - largeMessageStore: - provider: s3 - s3: - endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test - ---- -# check for stateful set and replicas as configured -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: splunk-ingestor-ingestor -status: - replicas: 3 - ---- -# check if secret object are created apiVersion: v1 kind: Secret metadata: - name: splunk-ingestor-ingestor-secret-v1 \ No newline at end of file + name: s3-secret \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-create-se-secret.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-create-se-secret.yaml new file mode 100644 index 000000000..8f1b1b95f --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/01-create-se-secret.yaml @@ -0,0 +1,7 @@ +--- +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: kubectl create secret generic s3-secret --from-literal=s3_access_key=$AWS_ACCESS_KEY_ID --from-literal=s3_secret_key=$AWS_SECRET_ACCESS_KEY --namespace $NAMESPACE + background: false + skipLogOutput: true \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index 291eddeba..f34dd2e6c 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -1,11 +1,107 @@ --- -# assert for ingestor cluster custom resource to be ready +# assert for bus custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: Bus +metadata: + name: bus +spec: + provider: sqs + sqs: + name: sqs-test + region: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + dlq: sqs-dlq-test +status: + phase: Ready + +--- +# assert for large message store custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: LargeMessageStore +metadata: + name: lms +spec: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test +status: + phase: Ready + +--- +# assert for cluster manager custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: ClusterManager +metadata: + name: cm +status: + phase: Ready + +--- +# check if stateful sets are created +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-cm-cluster-manager +status: + replicas: 1 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-cm-cluster-manager-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IndexerCluster +metadata: + name: indexer +spec: + replicas: 3 + busRef: + name: bus +status: + phase: Ready + bus: + provider: sqs + sqs: + name: sqs-test + region: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + dlq: sqs-dlq-test + largeMessageStore: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test + +--- +# check for stateful set and replicas as configured +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-indexer-indexer +status: + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-indexer-indexer-secret-v1 + +--- +# assert for indexer cluster custom resource to be ready apiVersion: enterprise.splunk.com/v4 kind: IngestorCluster metadata: name: ingestor spec: - replicas: 4 + replicas: 3 busRef: name: bus status: @@ -24,10 +120,17 @@ status: path: s3://ingestion/smartbus-test --- -# check for stateful sets and replicas updated +# check for stateful set and replicas as configured apiVersion: apps/v1 kind: StatefulSet metadata: name: splunk-ingestor-ingestor status: - replicas: 4 + replicas: 3 + +--- +# check if secret object are created +apiVersion: v1 +kind: Secret +metadata: + name: splunk-ingestor-ingestor-secret-v1 \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-install-setup.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-install-setup.yaml similarity index 100% rename from kuttl/tests/helm/index-and-ingest-separation/01-install-setup.yaml rename to kuttl/tests/helm/index-and-ingest-separation/02-install-setup.yaml diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml new file mode 100644 index 000000000..291eddeba --- /dev/null +++ b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml @@ -0,0 +1,33 @@ +--- +# assert for ingestor cluster custom resource to be ready +apiVersion: enterprise.splunk.com/v4 +kind: IngestorCluster +metadata: + name: ingestor +spec: + replicas: 4 + busRef: + name: bus +status: + phase: Ready + bus: + provider: sqs + sqs: + name: sqs-test + region: us-west-2 + endpoint: https://sqs.us-west-2.amazonaws.com + dlq: sqs-dlq-test + largeMessageStore: + provider: s3 + s3: + endpoint: https://s3.us-west-2.amazonaws.com + path: s3://ingestion/smartbus-test + +--- +# check for stateful sets and replicas updated +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: splunk-ingestor-ingestor +status: + replicas: 4 diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-scaleup-ingestor.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-scaleup-ingestor.yaml similarity index 100% rename from kuttl/tests/helm/index-and-ingest-separation/02-scaleup-ingestor.yaml rename to kuttl/tests/helm/index-and-ingest-separation/03-scaleup-ingestor.yaml diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-uninstall-setup.yaml b/kuttl/tests/helm/index-and-ingest-separation/04-uninstall-setup.yaml similarity index 100% rename from kuttl/tests/helm/index-and-ingest-separation/03-uninstall-setup.yaml rename to kuttl/tests/helm/index-and-ingest-separation/04-uninstall-setup.yaml diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index a73c51ac2..f75668cf1 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -14,6 +14,9 @@ bus: region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test + volumes: + - name: helm-bus-secret-ref-test + secretRef: s3-secret largeMessageStore: enabled: true diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 2170e914a..88b75af70 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -295,9 +295,8 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // If bus is updated if cr.Spec.BusRef.Name != "" { - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { + if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(cr.Status.LargeMessageStore, lms.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) @@ -618,9 +617,8 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // If bus is updated if cr.Spec.BusRef.Name != "" { - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { + if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(cr.Status.LargeMessageStore, lms.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) @@ -1328,7 +1326,21 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne afterDelete = true } - busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, afterDelete) + // Secret reference + s3AccessKey, s3SecretKey := "", "" + if bus.Spec.Provider == "sqs" { + for _, vol := range bus.Spec.SQS.VolList { + if vol.SecretRef != "" { + s3AccessKey, s3SecretKey, err = GetBusRemoteVolumeSecrets(ctx, vol, k8s, newCR) + if err != nil { + scopedLog.Error(err, "Failed to get bus remote volume secrets") + return err + } + } + } + } + + busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, afterDelete, s3AccessKey, s3SecretKey) for _, pbVal := range busChangedFieldsOutputs { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name), [][]string{pbVal}); err != nil { @@ -1354,7 +1366,7 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne } // getChangedBusFieldsForIndexer returns a list of changed bus and pipeline fields for indexer pods -func getChangedBusFieldsForIndexer(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields [][]string) { +func getChangedBusFieldsForIndexer(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool, s3AccessKey, s3SecretKey string) (busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields [][]string) { // Compare bus fields oldPB := busIndexerStatus.Status.Bus if oldPB == nil { @@ -1369,7 +1381,7 @@ func getChangedBusFieldsForIndexer(bus *enterpriseApi.Bus, lms *enterpriseApi.La newLMS := lms.Spec // Push all bus fields - busChangedFieldsInputs, busChangedFieldsOutputs = pullBusChanged(oldPB, &newPB, oldLMS, &newLMS, afterDelete) + busChangedFieldsInputs, busChangedFieldsOutputs = pullBusChanged(oldPB, &newPB, oldLMS, &newLMS, afterDelete, s3AccessKey, s3SecretKey) // Always set all pipeline fields, not just changed ones pipelineChangedFields = pipelineConfig(true) @@ -1387,7 +1399,7 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { return strings.HasPrefix(previousVersion, "8") && strings.HasPrefix(currentVersion, "9") } -func pullBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (inputs, outputs [][]string) { +func pullBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (inputs, outputs [][]string) { busProvider := "" if newBus.Provider == "sqs" { busProvider = "sqs_smartbus" @@ -1400,6 +1412,10 @@ func pullBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enter if oldBus.Provider != newBus.Provider || afterDelete { inputs = append(inputs, []string{"remote_queue.type", busProvider}) } + if !reflect.DeepEqual(oldBus.SQS.VolList, newBus.SQS.VolList) || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.access_key", busProvider), s3AccessKey}) + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.secret_key", busProvider), s3SecretKey}) + } if oldBus.SQS.Region != newBus.SQS.Region || afterDelete { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.SQS.Region}) } diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index ff10e453d..da3f1dfe2 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2063,6 +2063,9 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", + VolList: []enterpriseApi.VolumeSpec{ + {SecretRef: "secret"}, + }, }, }, } @@ -2095,10 +2098,14 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { }, } - busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, false) - assert.Equal(t, 8, len(busChangedFieldsInputs)) + key := "key" + secret := "secret" + busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, false, key, secret) + assert.Equal(t, 10, len(busChangedFieldsInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, @@ -2108,9 +2115,11 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, }, busChangedFieldsInputs) - assert.Equal(t, 10, len(busChangedFieldsOutputs)) + assert.Equal(t, 12, len(busChangedFieldsOutputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 524f183b5..5582166b9 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -259,9 +259,8 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } // If bus is updated - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { + if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(cr.Status.LargeMessageStore, lms.Spec) { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePushBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) @@ -370,7 +369,21 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n afterDelete = true } - busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, afterDelete) + // Secret reference + s3AccessKey, s3SecretKey := "", "" + if bus.Spec.Provider == "sqs" { + for _, vol := range bus.Spec.SQS.VolList { + if vol.SecretRef != "" { + s3AccessKey, s3SecretKey, err = GetBusRemoteVolumeSecrets(ctx, vol, k8s, newCR) + if err != nil { + scopedLog.Error(err, "Failed to get bus remote volume secrets") + return err + } + } + } + } + + busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, afterDelete, s3AccessKey, s3SecretKey) for _, pbVal := range busChangedFields { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name), [][]string{pbVal}); err != nil { @@ -390,7 +403,7 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n } // getChangedBusFieldsForIngestor returns a list of changed bus and pipeline fields for ingestor pods -func getChangedBusFieldsForIngestor(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (busChangedFields, pipelineChangedFields [][]string) { +func getChangedBusFieldsForIngestor(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool, s3AccessKey, s3SecretKey string) (busChangedFields, pipelineChangedFields [][]string) { oldPB := busIngestorStatus.Status.Bus if oldPB == nil { oldPB = &enterpriseApi.BusSpec{} @@ -404,7 +417,7 @@ func getChangedBusFieldsForIngestor(bus *enterpriseApi.Bus, lms *enterpriseApi.L newLMS := &lms.Spec // Push changed bus fields - busChangedFields = pushBusChanged(oldPB, newPB, oldLMS, newLMS, afterDelete) + busChangedFields = pushBusChanged(oldPB, newPB, oldLMS, newLMS, afterDelete, s3AccessKey, s3SecretKey) // Always changed pipeline fields pipelineChangedFields = pipelineConfig(false) @@ -443,7 +456,7 @@ func pipelineConfig(isIndexer bool) (output [][]string) { return output } -func pushBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (output [][]string) { +func pushBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (output [][]string) { busProvider := "" if newBus.Provider == "sqs" { busProvider = "sqs_smartbus" @@ -456,6 +469,10 @@ func pushBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enter if oldBus.Provider != newBus.Provider || afterDelete { output = append(output, []string{"remote_queue.type", busProvider}) } + if !reflect.DeepEqual(oldBus.SQS.VolList, newBus.SQS.VolList) || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.access_key", busProvider), s3AccessKey}) + output = append(output, []string{fmt.Sprintf("remote_queue.%s.secret_key", busProvider), s3SecretKey}) + } if oldBus.SQS.Region != newBus.SQS.Region || afterDelete { output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.SQS.Region}) } diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index 75cc14ec5..6136b3f2f 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -434,6 +434,9 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { Region: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", DLQ: "sqs-dlq-test", + VolList: []enterpriseApi.VolumeSpec{ + {SecretRef: "secret"}, + }, }, }, } @@ -467,11 +470,15 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { Status: enterpriseApi.IngestorClusterStatus{}, } - busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, false) + key := "key" + secret := "secret" + busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, false, key, secret) - assert.Equal(t, 10, len(busChangedFields)) + assert.Equal(t, 12, len(busChangedFields)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index e8f0736b3..c68b2ca71 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -417,6 +417,25 @@ func GetSmartstoreRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi. return accessKey, secretKey, namespaceScopedSecret.ResourceVersion, nil } +// GetBusRemoteVolumeSecrets is used to retrieve access key and secrete key for Index & Ingestion separation +func GetBusRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject) (string, string, error) { + namespaceScopedSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), cr.GetName(), volume.SecretRef) + if err != nil { + return "", "", err + } + + accessKey := string(namespaceScopedSecret.Data[s3AccessKey]) + secretKey := string(namespaceScopedSecret.Data[s3SecretKey]) + + if accessKey == "" { + return "", "", errors.New("access Key is missing") + } else if secretKey == "" { + return "", "", errors.New("secret Key is missing") + } + + return accessKey, secretKey, nil +} + // getLocalAppFileName generates the local app file name // For e.g., if the app package name is sample_app.tgz // and etag is "abcd1234", then it will be downloaded locally as sample_app.tgz_abcd1234 diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 1b3d27c70..6868dd168 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -79,6 +79,11 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // TODO: Remove secret reference once IRSA fixed for Splunk and EKS 1.34+ + // Secret reference + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + bus.SQS.VolList = volumeSpec + // Deploy Bus testcaseEnvInst.Log.Info("Deploy Bus") b, err := deployment.DeployBus(ctx, "bus", bus) @@ -152,6 +157,11 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // TODO: Remove secret reference once IRSA fixed for Splunk and EKS 1.34+ + // Secret reference + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + bus.SQS.VolList = volumeSpec + // Deploy Bus testcaseEnvInst.Log.Info("Deploy Bus") bc, err := deployment.DeployBus(ctx, "bus", bus) @@ -256,6 +266,11 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // TODO: Remove secret reference once IRSA fixed for Splunk and EKS 1.34+ + // Secret reference + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + bus.SQS.VolList = volumeSpec + // Deploy Bus testcaseEnvInst.Log.Info("Deploy Bus") bc, err := deployment.DeployBus(ctx, "bus", bus) @@ -363,6 +378,11 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // TODO: Remove secret reference once IRSA fixed for Splunk and EKS 1.34+ + // Secret reference + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + bus.SQS.VolList = volumeSpec + // Deploy Bus testcaseEnvInst.Log.Info("Deploy Bus") bc, err := deployment.DeployBus(ctx, "bus", bus) diff --git a/test/testenv/remote_index_utils.go b/test/testenv/remote_index_utils.go index 0eb2b485c..84e5c0709 100644 --- a/test/testenv/remote_index_utils.go +++ b/test/testenv/remote_index_utils.go @@ -86,6 +86,14 @@ func RollHotToWarm(ctx context.Context, deployment *Deployment, podName string, return true } +// GeneratBusVolumeSpec return VolumeSpec struct with given values +func GenerateBusVolumeSpec(name, secretRef string) enterpriseApi.VolumeSpec { + return enterpriseApi.VolumeSpec{ + Name: name, + SecretRef: secretRef, + } +} + // GenerateIndexVolumeSpec return VolumeSpec struct with given values func GenerateIndexVolumeSpec(volumeName string, endpoint string, secretRef string, provider string, storageType string, region string) enterpriseApi.VolumeSpec { return enterpriseApi.VolumeSpec{ From f992c40483f60cb3426d2639a5e84c71973ca2e6 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Tue, 16 Dec 2025 12:44:42 +0100 Subject: [PATCH 06/25] CSPL-4360 Fix failing tests --- .../templates/enterprise_v4_indexercluster.yaml | 1 + .../index_and_ingestion_separation_test.go | 1 + 2 files changed, 2 insertions(+) diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml index 0e6a96673..62497d0e6 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml @@ -169,6 +169,7 @@ items: {{- if .namespace }} namespace: {{ .namespace }} {{- end }} + {{- end }} {{- with $.Values.indexerCluster.largeMessageStoreRef }} largeMessageStoreRef: name: {{ .name }} diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 6868dd168..17ab5903b 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -382,6 +382,7 @@ var _ = Describe("indingsep test", func() { // Secret reference volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} bus.SQS.VolList = volumeSpec + updateBus.SQS.VolList = volumeSpec // Deploy Bus testcaseEnvInst.Log.Info("Deploy Bus") From 143dbe0917e34256ce09fbf51a060e15e3f13f19 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Tue, 16 Dec 2025 12:54:28 +0100 Subject: [PATCH 07/25] CSPL-4360 Add Splunk restart --- docs/IndexIngestionSeparation.md | 18 ++++++++-- pkg/splunk/enterprise/indexercluster.go | 18 ++++++++++ pkg/splunk/enterprise/ingestorcluster.go | 36 +++++++++++++++++-- pkg/splunk/enterprise/ingestorcluster_test.go | 12 +++---- pkg/splunk/enterprise/util_test.go | 5 +++ .../index_and_ingestion_separation_test.go | 8 ----- 6 files changed, 77 insertions(+), 20 deletions(-) diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index e8c6211d7..195338c7d 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -1,3 +1,9 @@ +--- +title: Index and Ingestion Separation +parent: Deploy & Configure +nav_order: 6 +--- + # Background Separation between ingestion and indexing services within Splunk Operator for Kubernetes enables the operator to independently manage the ingestion service while maintaining seamless integration with the indexing service. @@ -10,7 +16,7 @@ This separation enables: # Important Note > [!WARNING] -> **As of now, only brand new deployments are supported for Index and Ingestion Separation. No migration path is implemented, described or tested for existing deployments to move from a standard model to Index & Ingestion separation model.** +> **For customers deploying SmartBus on CMP, the Splunk Operator for Kubernetes (SOK) manages the configuration and lifecycle of the ingestor tier. The following SOK guide provides implementation details for setting up ingestion separation and integrating with existing indexers. This reference is primarily intended for CMP users leveraging SOK-managed ingestors.** # Document Variables @@ -38,7 +44,7 @@ SQS message bus inputs can be found in the table below. | endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint | dlq | string | [Required] Name of the dead letter queue | -Change of any of the bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +**First provisioning or update of any of the bus inputs requires Ingestor Cluster and Indexer Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.** ## Example ``` @@ -425,6 +431,14 @@ In the following example, the dashboard presents ingestion and indexing data in - [kube-prometheus-stack](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-prometheus-stack) +# App Installation for Ingestor Cluster Instances + +Application installation is supported for Ingestor Cluster instances. However, as of now, applications are installed using local scope and if any application requires Splunk restart, there is no automated way to detect it and trigger automatically via Splunk Operator. + +Therefore, to be able to enforce Splunk restart for each of the Ingestor Cluster pods, it is recommended to add/update IngestorCluster CR annotations/labels and apply the new configuration which will trigger the rolling restart of Splunk pods for Ingestor Cluster. + +We are under the investigation on how to make it fully automated. What is more, ideally, update of annotations and labels should not trigger pod restart at all and we are investigating on how to fix this behaviour eventually. + # Example 1. Install CRDs and Splunk Operator for Kubernetes. diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 88b75af70..d22b7008e 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -305,6 +305,15 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } cr.Status.Bus = &bus.Spec + + for i := int32(0); i < cr.Spec.Replicas; i++ { + idxcClient := mgr.getClient(ctx, i) + err = idxcClient.RestartSplunk() + if err != nil { + return result, err + } + scopedLog.Info("Restarted splunk", "indexer", i) + } } } @@ -627,6 +636,15 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } cr.Status.Bus = &bus.Spec + + for i := int32(0); i < cr.Spec.Replicas; i++ { + idxcClient := mgr.getClient(ctx, i) + err = idxcClient.RestartSplunk() + if err != nil { + return result, err + } + scopedLog.Info("Restarted splunk", "indexer", i) + } } } diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 5582166b9..94d51a8f7 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -260,7 +260,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // If bus is updated if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(cr.Status.LargeMessageStore, lms.Spec) { - mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) err = mgr.handlePushBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) @@ -269,6 +269,15 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } cr.Status.Bus = &bus.Spec + + for i := int32(0); i < cr.Spec.Replicas; i++ { + ingClient := mgr.getClient(ctx, i) + err = ingClient.RestartSplunk() + if err != nil { + return result, err + } + scopedLog.Info("Restarted splunk", "ingestor", i) + } } // Upgrade fron automated MC to MC CRD @@ -311,6 +320,27 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr return result, nil } +// getClient for ingestorClusterPodManager returns a SplunkClient for the member n +func (mgr *ingestorClusterPodManager) getClient(ctx context.Context, n int32) *splclient.SplunkClient { + reqLogger := log.FromContext(ctx) + scopedLog := reqLogger.WithName("ingestorClusterPodManager.getClient").WithValues("name", mgr.cr.GetName(), "namespace", mgr.cr.GetNamespace()) + + // Get Pod Name + memberName := GetSplunkStatefulsetPodName(SplunkIngestor, mgr.cr.GetName(), n) + + // Get Fully Qualified Domain Name + fqdnName := splcommon.GetServiceFQDN(mgr.cr.GetNamespace(), + fmt.Sprintf("%s.%s", memberName, GetSplunkServiceName(SplunkIngestor, mgr.cr.GetName(), true))) + + // Retrieve admin password from Pod + adminPwd, err := splutil.GetSpecificSecretTokenFromPod(ctx, mgr.c, memberName, mgr.cr.GetNamespace(), "password") + if err != nil { + scopedLog.Error(err, "Couldn't retrieve the admin password from pod") + } + + return mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", adminPwd) +} + // validateIngestorClusterSpec checks validity and makes default updates to a IngestorClusterSpec and returns error if something is wrong func validateIngestorClusterSpec(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.IngestorCluster) error { // We cannot have 0 replicas in IngestorCluster spec since this refers to number of ingestion pods in an ingestor cluster @@ -426,6 +456,7 @@ func getChangedBusFieldsForIngestor(bus *enterpriseApi.Bus, lms *enterpriseApi.L } type ingestorClusterPodManager struct { + c splcommon.ControllerClient log logr.Logger cr *enterpriseApi.IngestorCluster secrets *corev1.Secret @@ -433,12 +464,13 @@ type ingestorClusterPodManager struct { } // newIngestorClusterPodManager function to create pod manager this is added to write unit test case -var newIngestorClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc) ingestorClusterPodManager { +var newIngestorClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc, c splcommon.ControllerClient) ingestorClusterPodManager { return ingestorClusterPodManager{ log: log, cr: cr, secrets: secret, newSplunkClient: newSplunkClient, + c: c, } } diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index 6136b3f2f..a72179453 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -25,15 +25,14 @@ import ( "github.com/go-logr/logr" enterpriseApi "github.com/splunk/splunk-operator/api/v4" splclient "github.com/splunk/splunk-operator/pkg/splunk/client" + splcommon "github.com/splunk/splunk-operator/pkg/splunk/common" spltest "github.com/splunk/splunk-operator/pkg/splunk/test" splutil "github.com/splunk/splunk-operator/pkg/splunk/util" "github.com/stretchr/testify/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func init() { @@ -56,11 +55,7 @@ func TestApplyIngestorCluster(t *testing.T) { ctx := context.TODO() - scheme := runtime.NewScheme() - _ = enterpriseApi.AddToScheme(scheme) - _ = corev1.AddToScheme(scheme) - _ = appsv1.AddToScheme(scheme) - c := fake.NewClientBuilder().WithScheme(scheme).Build() + c := spltest.NewMockClient() // Object definitions provider := "sqs_smartbus" @@ -273,8 +268,9 @@ func TestApplyIngestorCluster(t *testing.T) { // outputs.conf origNew := newIngestorClusterPodManager mockHTTPClient := &spltest.MockHTTPClient{} - newIngestorClusterPodManager = func(l logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, _ NewSplunkClientFunc) ingestorClusterPodManager { + newIngestorClusterPodManager = func(l logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, _ NewSplunkClientFunc, c splcommon.ControllerClient) ingestorClusterPodManager { return ingestorClusterPodManager{ + c: c, log: l, cr: cr, secrets: secret, newSplunkClient: func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ManagementURI: uri, Username: user, Password: pass, Client: mockHTTPClient} diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index f5405b2cf..6ea7b021e 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -2624,6 +2624,9 @@ func TestUpdateCRStatus(t *testing.T) { WithStatusSubresource(&enterpriseApi.Standalone{}). WithStatusSubresource(&enterpriseApi.MonitoringConsole{}). WithStatusSubresource(&enterpriseApi.IndexerCluster{}). + WithStatusSubresource(&enterpriseApi.Bus{}). + WithStatusSubresource(&enterpriseApi.LargeMessageStore{}). + WithStatusSubresource(&enterpriseApi.IngestorCluster{}). WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}) c := builder.Build() ctx := context.TODO() @@ -3304,6 +3307,8 @@ func TestGetCurrentImage(t *testing.T) { WithStatusSubresource(&enterpriseApi.MonitoringConsole{}). WithStatusSubresource(&enterpriseApi.IndexerCluster{}). WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}). + WithStatusSubresource(&enterpriseApi.Bus{}). + WithStatusSubresource(&enterpriseApi.LargeMessageStore{}). WithStatusSubresource(&enterpriseApi.IngestorCluster{}) client := builder.Build() client.Create(ctx, ¤t) diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 17ab5903b..a21146e11 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -433,14 +433,6 @@ var _ = Describe("indingsep test", func() { err = deployment.UpdateCR(ctx, bus) Expect(err).To(Succeed(), "Unable to deploy Bus with updated CR") - // Ensure that Ingestor Cluster has not been restarted - testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster has not been restarted") - testenv.IngestorReady(ctx, deployment, testcaseEnvInst) - - // Ensure that Indexer Cluster has not been restarted - testcaseEnvInst.Log.Info("Ensure that Indexer Cluster has not been restarted") - testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) - // Get instance of current Ingestor Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Ingestor Cluster CR with latest config") ingest := &enterpriseApi.IngestorCluster{} From 3c7b2d7c2ae00e126903579ab7d797ab853e4c6f Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Tue, 16 Dec 2025 16:05:02 +0100 Subject: [PATCH 08/25] CSPL-4360 Fix failing tests --- pkg/splunk/enterprise/indexercluster.go | 8 ++-- pkg/splunk/enterprise/indexercluster_test.go | 27 ++++++++++- pkg/splunk/enterprise/ingestorcluster.go | 8 ++-- pkg/splunk/enterprise/ingestorcluster_test.go | 47 ++++++++----------- 4 files changed, 54 insertions(+), 36 deletions(-) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index d22b7008e..a5ebdbaa1 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -1346,7 +1346,7 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne // Secret reference s3AccessKey, s3SecretKey := "", "" - if bus.Spec.Provider == "sqs" { + if bus.Spec.Provider == "sqs" && newCR.Spec.ServiceAccount == "" { for _, vol := range bus.Spec.SQS.VolList { if vol.SecretRef != "" { s3AccessKey, s3SecretKey, err = GetBusRemoteVolumeSecrets(ctx, vol, k8s, newCR) @@ -1431,8 +1431,10 @@ func pullBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enter inputs = append(inputs, []string{"remote_queue.type", busProvider}) } if !reflect.DeepEqual(oldBus.SQS.VolList, newBus.SQS.VolList) || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.access_key", busProvider), s3AccessKey}) - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.secret_key", busProvider), s3SecretKey}) + if s3AccessKey != "" && s3SecretKey != "" { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.access_key", busProvider), s3AccessKey}) + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.secret_key", busProvider), s3SecretKey}) + } } if oldBus.SQS.Region != newBus.SQS.Region || afterDelete { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.SQS.Region}) diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index da3f1dfe2..00f20656f 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2404,7 +2404,7 @@ func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions - bus := enterpriseApi.Bus{ + bus := &enterpriseApi.Bus{ TypeMeta: metav1.TypeMeta{ Kind: "Bus", APIVersion: "enterprise.splunk.com/v4", @@ -2423,7 +2423,26 @@ func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { }, }, } - c.Create(ctx, &bus) + c.Create(ctx, bus) + + lms := &enterpriseApi.LargeMessageStore{ + TypeMeta: metav1.TypeMeta{ + Kind: "LargeMessageStore", + APIVersion: "enterprise.splunk.com/v4", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "lms", + Namespace: "test", + }, + Spec: enterpriseApi.LargeMessageStoreSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://bucket/key", + }, + }, + } + c.Create(ctx, lms) cm := &enterpriseApi.ClusterManager{ TypeMeta: metav1.TypeMeta{Kind: "ClusterManager"}, @@ -2449,6 +2468,10 @@ func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { Name: bus.Name, Namespace: bus.Namespace, }, + LargeMessageStoreRef: corev1.ObjectReference{ + Name: lms.Name, + Namespace: lms.Namespace, + }, CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ ClusterManagerRef: corev1.ObjectReference{ Name: "cm", diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 94d51a8f7..90c067494 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -401,7 +401,7 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n // Secret reference s3AccessKey, s3SecretKey := "", "" - if bus.Spec.Provider == "sqs" { + if bus.Spec.Provider == "sqs" && newCR.Spec.ServiceAccount == "" { for _, vol := range bus.Spec.SQS.VolList { if vol.SecretRef != "" { s3AccessKey, s3SecretKey, err = GetBusRemoteVolumeSecrets(ctx, vol, k8s, newCR) @@ -502,8 +502,10 @@ func pushBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enter output = append(output, []string{"remote_queue.type", busProvider}) } if !reflect.DeepEqual(oldBus.SQS.VolList, newBus.SQS.VolList) || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.access_key", busProvider), s3AccessKey}) - output = append(output, []string{fmt.Sprintf("remote_queue.%s.secret_key", busProvider), s3SecretKey}) + if s3AccessKey != "" && s3SecretKey != "" { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.access_key", busProvider), s3AccessKey}) + output = append(output, []string{fmt.Sprintf("remote_queue.%s.secret_key", busProvider), s3SecretKey}) + } } if oldBus.SQS.Region != newBus.SQS.Region || afterDelete { output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.SQS.Region}) diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index a72179453..0f5fae8fa 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -32,7 +32,8 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func init() { @@ -55,7 +56,11 @@ func TestApplyIngestorCluster(t *testing.T) { ctx := context.TODO() - c := spltest.NewMockClient() + scheme := runtime.NewScheme() + _ = enterpriseApi.AddToScheme(scheme) + _ = corev1.AddToScheme(scheme) + _ = appsv1.AddToScheme(scheme) + c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions provider := "sqs_smartbus" @@ -81,7 +86,7 @@ func TestApplyIngestorCluster(t *testing.T) { } c.Create(ctx, bus) - lms := enterpriseApi.LargeMessageStore{ + lms := &enterpriseApi.LargeMessageStore{ TypeMeta: metav1.TypeMeta{ Kind: "LargeMessageStore", APIVersion: "enterprise.splunk.com/v4", @@ -98,7 +103,7 @@ func TestApplyIngestorCluster(t *testing.T) { }, }, } - c.Create(ctx, &lms) + c.Create(ctx, lms) cr := &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ @@ -112,7 +117,8 @@ func TestApplyIngestorCluster(t *testing.T) { Spec: enterpriseApi.IngestorClusterSpec{ Replicas: 3, CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - Mock: true, + Mock: true, + ServiceAccount: "sa", }, BusRef: corev1.ObjectReference{ Name: bus.Name, @@ -242,29 +248,6 @@ func TestApplyIngestorCluster(t *testing.T) { assert.True(t, result.Requeue) assert.NotEqual(t, enterpriseApi.PhaseError, cr.Status.Phase) - // Ensure stored StatefulSet status reflects readiness after any reconcile modifications - fetched := &appsv1.StatefulSet{} - _ = c.Get(ctx, types.NamespacedName{Name: "splunk-test-ingestor", Namespace: "test"}, fetched) - fetched.Status.Replicas = replicas - fetched.Status.ReadyReplicas = replicas - fetched.Status.UpdatedReplicas = replicas - if fetched.Status.UpdateRevision == "" { - fetched.Status.UpdateRevision = "v1" - } - c.Update(ctx, fetched) - - // Guarantee all pods have matching revision label - for _, pn := range []string{"splunk-test-ingestor-0", "splunk-test-ingestor-1", "splunk-test-ingestor-2"} { - p := &corev1.Pod{} - if err := c.Get(ctx, types.NamespacedName{Name: pn, Namespace: "test"}, p); err == nil { - if p.Labels == nil { - p.Labels = map[string]string{} - } - p.Labels["controller-revision-hash"] = fetched.Status.UpdateRevision - c.Update(ctx, p) - } - } - // outputs.conf origNew := newIngestorClusterPodManager mockHTTPClient := &spltest.MockHTTPClient{} @@ -280,6 +263,7 @@ func TestApplyIngestorCluster(t *testing.T) { defer func() { newIngestorClusterPodManager = origNew }() propertyKVList := [][]string{ + {"remote_queue.type", provider}, {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, @@ -318,6 +302,13 @@ func TestApplyIngestorCluster(t *testing.T) { } } + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { + podName := fmt.Sprintf("splunk-test-ingestor-%d", i) + baseURL := fmt.Sprintf("https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/services/server/control/restart", podName, cr.GetName(), cr.GetNamespace()) + req, _ := http.NewRequest("POST", baseURL, nil) + mockHTTPClient.AddHandler(req, 200, "", nil) + } + // Second reconcile should now yield Ready cr.Status.TelAppInstalled = true result, err = ApplyIngestorCluster(ctx, c, cr) From e4e083a981061529ca4d948105997901879a1355 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Wed, 17 Dec 2025 11:37:38 +0100 Subject: [PATCH 09/25] CSPL-4360 Fix failing tests --- .../enterprise_v4_largemessagestores.yaml | 16 ++++++++-------- ...se-secret.yaml => 01-create-s3-secret.yaml} | 0 .../index-and-ingest-separation/02-assert.yaml | 4 ++++ .../index-and-ingest-separation/03-assert.yaml | 2 ++ pkg/splunk/enterprise/indexercluster.go | 4 ++++ pkg/splunk/enterprise/ingestorcluster.go | 2 ++ .../index_and_ingestion_separation_test.go | 18 ++++++++++++++---- 7 files changed, 34 insertions(+), 12 deletions(-) rename kuttl/tests/helm/index-and-ingest-separation/{01-create-se-secret.yaml => 01-create-s3-secret.yaml} (100%) diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml index 77ef09e69..1e4e9b5db 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml @@ -1,21 +1,21 @@ -{{- if .Values.largemessagestore }} -{{- if .Values.largemessagestore.enabled }} +{{- if .Values.largeMessageStore }} +{{- if .Values.largeMessageStore.enabled }} apiVersion: enterprise.splunk.com/v4 kind: LargeMessageStore metadata: - name: {{ .Values.largemessagestore.name }} - namespace: {{ default .Release.Namespace .Values.largemessagestore.namespaceOverride }} - {{- with .Values.largemessagestore.additionalLabels }} + name: {{ .Values.largeMessageStore.name }} + namespace: {{ default .Release.Namespace .Values.largeMessageStore.namespaceOverride }} + {{- with .Values.largeMessageStore.additionalLabels }} labels: {{ toYaml . | nindent 4 }} {{- end }} - {{- with .Values.largemessagestore.additionalAnnotations }} + {{- with .Values.largeMessageStore.additionalAnnotations }} annotations: {{ toYaml . | nindent 4 }} {{- end }} spec: - provider: {{ .Values.largemessagestore.provider | quote }} - {{- with .Values.largemessagestore.s3 }} + provider: {{ .Values.largeMessageStore.provider | quote }} + {{- with .Values.largeMessageStore.s3 }} s3: {{- if .endpoint }} endpoint: {{ .endpoint | quote }} diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-create-se-secret.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml similarity index 100% rename from kuttl/tests/helm/index-and-ingest-separation/01-create-se-secret.yaml rename to kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index f34dd2e6c..42e003418 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -63,6 +63,8 @@ spec: replicas: 3 busRef: name: bus + largeMessageStoreRef: + name: lms status: phase: Ready bus: @@ -104,6 +106,8 @@ spec: replicas: 3 busRef: name: bus + largeMessageStoreRef: + name: lms status: phase: Ready bus: diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml index 291eddeba..819620baa 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml @@ -8,6 +8,8 @@ spec: replicas: 4 busRef: name: bus + largeMessageStoreRef: + name: lms status: phase: Ready bus: diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index a5ebdbaa1..4acbc3d11 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -79,6 +79,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { cr.Status.Bus = &enterpriseApi.BusSpec{} + cr.Status.LargeMessageStore = &enterpriseApi.LargeMessageStoreSpec{} } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -305,6 +306,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } cr.Status.Bus = &bus.Spec + cr.Status.LargeMessageStore = &lms.Spec for i := int32(0); i < cr.Spec.Replicas; i++ { idxcClient := mgr.getClient(ctx, i) @@ -407,6 +409,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { cr.Status.Bus = &enterpriseApi.BusSpec{} + cr.Status.LargeMessageStore = &enterpriseApi.LargeMessageStoreSpec{} } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -636,6 +639,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } cr.Status.Bus = &bus.Spec + cr.Status.LargeMessageStore = &lms.Spec for i := int32(0); i < cr.Spec.Replicas; i++ { idxcClient := mgr.getClient(ctx, i) diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 90c067494..1a1dcd428 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -74,6 +74,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr if cr.Status.Replicas < cr.Spec.Replicas { cr.Status.Bus = &enterpriseApi.BusSpec{} + cr.Status.LargeMessageStore = &enterpriseApi.LargeMessageStoreSpec{} } cr.Status.Replicas = cr.Spec.Replicas @@ -269,6 +270,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } cr.Status.Bus = &bus.Spec + cr.Status.LargeMessageStore = &lms.Spec for i := int32(0); i < cr.Spec.Replicas; i++ { ingClient := mgr.getClient(ctx, i) diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index a21146e11..4b90db6bd 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -83,6 +83,7 @@ var _ = Describe("indingsep test", func() { // Secret reference volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} bus.SQS.VolList = volumeSpec + updateBus.SQS.VolList = volumeSpec // Deploy Bus testcaseEnvInst.Log.Info("Deploy Bus") @@ -161,6 +162,7 @@ var _ = Describe("indingsep test", func() { // Secret reference volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} bus.SQS.VolList = volumeSpec + updateBus.SQS.VolList = volumeSpec // Deploy Bus testcaseEnvInst.Log.Info("Deploy Bus") @@ -316,7 +318,7 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.Bus).To(Equal(bus), "Ingestor bus status is not the same as provided as input") + Expect(*ingest.Status.Bus).To(Equal(bus), "Ingestor bus status is not the same as provided as input") // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -326,7 +328,7 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.Bus).To(Equal(bus), "Indexer bus status is not the same as provided as input") + Expect(*index.Status.Bus).To(Equal(bus), "Indexer bus status is not the same as provided as input") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") @@ -433,6 +435,10 @@ var _ = Describe("indingsep test", func() { err = deployment.UpdateCR(ctx, bus) Expect(err).To(Succeed(), "Unable to deploy Bus with updated CR") + // Ensure that Ingestor Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") + testenv.IngestorReady(ctx, deployment, testcaseEnvInst) + // Get instance of current Ingestor Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Ingestor Cluster CR with latest config") ingest := &enterpriseApi.IngestorCluster{} @@ -441,7 +447,11 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.Bus).To(Equal(updateBus), "Ingestor bus status is not the same as provided as input") + Expect(*ingest.Status.Bus).To(Equal(updateBus), "Ingestor bus status is not the same as provided as input") + + // Ensure that Indexer Cluster is in Ready phase + testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") + testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -451,7 +461,7 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.Bus).To(Equal(updateBus), "Indexer bus status is not the same as provided as input") + Expect(*index.Status.Bus).To(Equal(updateBus), "Indexer bus status is not the same as provided as input") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") From 3cb9148536edda1fbfd229c009bb6b7dd1ef9ba4 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Wed, 17 Dec 2025 13:25:54 +0100 Subject: [PATCH 10/25] CSPL-4360 Fix errors with failing validation on status --- pkg/splunk/enterprise/indexercluster.go | 31 +++++++++---------- pkg/splunk/enterprise/ingestorcluster.go | 25 +++++++-------- pkg/splunk/enterprise/ingestorcluster_test.go | 5 ++- 3 files changed, 30 insertions(+), 31 deletions(-) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 4acbc3d11..b9b644599 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -77,10 +77,6 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // updates status after function completes cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError - if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Bus = &enterpriseApi.BusSpec{} - cr.Status.LargeMessageStore = &enterpriseApi.LargeMessageStoreSpec{} - } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) if cr.Status.Peers == nil { @@ -296,7 +292,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // If bus is updated if cr.Spec.BusRef.Name != "" { - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(cr.Status.LargeMessageStore, lms.Spec) { + if cr.Status.Bus == nil || cr.Status.LargeMessageStore == nil || !reflect.DeepEqual(*cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(*cr.Status.LargeMessageStore, lms.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) err = mgr.handlePullBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { @@ -305,9 +301,6 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller return result, err } - cr.Status.Bus = &bus.Spec - cr.Status.LargeMessageStore = &lms.Spec - for i := int32(0); i < cr.Spec.Replicas; i++ { idxcClient := mgr.getClient(ctx, i) err = idxcClient.RestartSplunk() @@ -316,6 +309,9 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } scopedLog.Info("Restarted splunk", "indexer", i) } + + cr.Status.Bus = &bus.Spec + cr.Status.LargeMessageStore = &lms.Spec } } @@ -407,10 +403,6 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // updates status after function completes cr.Status.Phase = enterpriseApi.PhaseError cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError - if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Bus = &enterpriseApi.BusSpec{} - cr.Status.LargeMessageStore = &enterpriseApi.LargeMessageStoreSpec{} - } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) if cr.Status.Peers == nil { @@ -629,7 +621,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // If bus is updated if cr.Spec.BusRef.Name != "" { - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(cr.Status.LargeMessageStore, lms.Spec) { + if cr.Status.Bus == nil || cr.Status.LargeMessageStore == nil || !reflect.DeepEqual(*cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(*cr.Status.LargeMessageStore, lms.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) err = mgr.handlePullBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { @@ -638,9 +630,6 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, return result, err } - cr.Status.Bus = &bus.Spec - cr.Status.LargeMessageStore = &lms.Spec - for i := int32(0); i < cr.Spec.Replicas; i++ { idxcClient := mgr.getClient(ctx, i) err = idxcClient.RestartSplunk() @@ -649,6 +638,9 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } scopedLog.Info("Restarted splunk", "indexer", i) } + + cr.Status.Bus = &bus.Spec + cr.Status.LargeMessageStore = &lms.Spec } } @@ -1336,6 +1328,13 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne } splunkClient := newSplunkClientForBusPipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) + if newCR.Status.Bus == nil { + newCR.Status.Bus = &enterpriseApi.BusSpec{} + } + if newCR.Status.LargeMessageStore == nil { + newCR.Status.LargeMessageStore = &enterpriseApi.LargeMessageStoreSpec{} + } + afterDelete := false if (bus.Spec.SQS.Name != "" && newCR.Status.Bus.SQS.Name != "" && bus.Spec.SQS.Name != newCR.Status.Bus.SQS.Name) || (bus.Spec.Provider != "" && newCR.Status.Bus.Provider != "" && bus.Spec.Provider != newCR.Status.Bus.Provider) { diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 1a1dcd428..f87a1eaa7 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -72,10 +72,6 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // Update the CR Status defer updateCRStatus(ctx, client, cr, &err) - if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Bus = &enterpriseApi.BusSpec{} - cr.Status.LargeMessageStore = &enterpriseApi.LargeMessageStoreSpec{} - } cr.Status.Replicas = cr.Spec.Replicas // If needed, migrate the app framework status @@ -260,7 +256,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } // If bus is updated - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(cr.Status.LargeMessageStore, lms.Spec) { + if cr.Status.Bus == nil || cr.Status.LargeMessageStore == nil || !reflect.DeepEqual(*cr.Status.Bus, bus.Spec) || !reflect.DeepEqual(*cr.Status.LargeMessageStore, lms.Spec) { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) err = mgr.handlePushBusChange(ctx, cr, busCopy, lmsCopy, client) if err != nil { @@ -269,9 +265,6 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr return result, err } - cr.Status.Bus = &bus.Spec - cr.Status.LargeMessageStore = &lms.Spec - for i := int32(0); i < cr.Spec.Replicas; i++ { ingClient := mgr.getClient(ctx, i) err = ingClient.RestartSplunk() @@ -280,6 +273,9 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } scopedLog.Info("Restarted splunk", "ingestor", i) } + + cr.Status.Bus = &bus.Spec + cr.Status.LargeMessageStore = &lms.Spec } // Upgrade fron automated MC to MC CRD @@ -392,6 +388,13 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n } splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) + if newCR.Status.Bus == nil { + newCR.Status.Bus = &enterpriseApi.BusSpec{} + } + if newCR.Status.LargeMessageStore == nil { + newCR.Status.LargeMessageStore = &enterpriseApi.LargeMessageStoreSpec{} + } + afterDelete := false if (bus.Spec.SQS.Name != "" && newCR.Status.Bus.SQS.Name != "" && bus.Spec.SQS.Name != newCR.Status.Bus.SQS.Name) || (bus.Spec.Provider != "" && newCR.Status.Bus.Provider != "" && bus.Spec.Provider != newCR.Status.Bus.Provider) { @@ -437,15 +440,9 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n // getChangedBusFieldsForIngestor returns a list of changed bus and pipeline fields for ingestor pods func getChangedBusFieldsForIngestor(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool, s3AccessKey, s3SecretKey string) (busChangedFields, pipelineChangedFields [][]string) { oldPB := busIngestorStatus.Status.Bus - if oldPB == nil { - oldPB = &enterpriseApi.BusSpec{} - } newPB := &bus.Spec oldLMS := busIngestorStatus.Status.LargeMessageStore - if oldLMS == nil { - oldLMS = &enterpriseApi.LargeMessageStoreSpec{} - } newLMS := &lms.Spec // Push changed bus fields diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index 0f5fae8fa..63d94facb 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -454,7 +454,10 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { Name: lms.Name, }, }, - Status: enterpriseApi.IngestorClusterStatus{}, + Status: enterpriseApi.IngestorClusterStatus{ + Bus: &enterpriseApi.BusSpec{}, + LargeMessageStore: &enterpriseApi.LargeMessageStoreSpec{}, + }, } key := "key" From ba73a8779bec65a9230ef2f23a88f5968d8f2501 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 19 Dec 2025 08:47:17 +0100 Subject: [PATCH 11/25] CSPL-4358 Rename Bus to Queue --- PROJECT | 2 +- api/v4/indexercluster_types.go | 10 +- api/v4/ingestorcluster_types.go | 8 +- api/v4/{bus_types.go => queue_types.go} | 48 ++--- api/v4/zz_generated.deepcopy.go | 194 +++++++++--------- cmd/main.go | 4 +- ...enterprise.splunk.com_indexerclusters.yaml | 170 +++++++-------- ...nterprise.splunk.com_ingestorclusters.yaml | 166 +++++++-------- ...yaml => enterprise.splunk.com_queues.yaml} | 24 +-- config/crd/kustomization.yaml | 2 +- ...ditor_role.yaml => queue_editor_role.yaml} | 6 +- ...iewer_role.yaml => queue_viewer_role.yaml} | 6 +- config/rbac/role.yaml | 6 +- ...e_v4_bus.yaml => enterprise_v4_queue.yaml} | 4 +- config/samples/kustomization.yaml | 2 +- docs/CustomResources.md | 28 +-- docs/IndexIngestionSeparation.md | 92 ++++----- .../enterprise_v4_indexercluster.yaml | 4 +- .../enterprise_v4_ingestorcluster.yaml | 10 +- ...4_buses.yaml => enterprise_v4_queues.yaml} | 18 +- helm-chart/splunk-enterprise/values.yaml | 4 +- ...ditor_role.yaml => queue_editor_role.yaml} | 12 +- ...iewer_role.yaml => queue_viewer_role.yaml} | 12 +- .../splunk-operator/templates/rbac/role.yaml | 6 +- .../controller/indexercluster_controller.go | 8 +- .../controller/ingestorcluster_controller.go | 8 +- .../ingestorcluster_controller_test.go | 24 +-- ...{bus_controller.go => queue_controller.go} | 38 ++-- ...oller_test.go => queue_controller_test.go} | 84 ++++---- internal/controller/suite_test.go | 2 +- internal/controller/testutils/new.go | 10 +- .../01-assert.yaml | 18 +- .../02-assert.yaml | 6 +- .../splunk_index_ingest_sep.yaml | 12 +- pkg/splunk/enterprise/clustermanager.go | 5 +- pkg/splunk/enterprise/indexercluster.go | 169 ++++++++------- pkg/splunk/enterprise/indexercluster_test.go | 134 ++++++------ pkg/splunk/enterprise/ingestorcluster.go | 108 +++++----- pkg/splunk/enterprise/ingestorcluster_test.go | 112 +++++----- pkg/splunk/enterprise/monitoringconsole.go | 3 +- pkg/splunk/enterprise/{bus.go => queue.go} | 6 +- .../enterprise/{bus_test.go => queue_test.go} | 20 +- pkg/splunk/enterprise/types.go | 8 +- pkg/splunk/enterprise/upgrade.go | 9 +- pkg/splunk/enterprise/util.go | 16 +- ...dex_and_ingestion_separation_suite_test.go | 4 +- .../index_and_ingestion_separation_test.go | 86 ++++---- test/testenv/deployment.go | 30 +-- test/testenv/util.go | 20 +- 49 files changed, 887 insertions(+), 891 deletions(-) rename api/v4/{bus_types.go => queue_types.go} (75%) rename config/crd/bases/{enterprise.splunk.com_buses.yaml => enterprise.splunk.com_queues.yaml} (89%) rename config/rbac/{bus_editor_role.yaml => queue_editor_role.yaml} (92%) rename config/rbac/{bus_viewer_role.yaml => queue_viewer_role.yaml} (91%) rename config/samples/{enterprise_v4_bus.yaml => enterprise_v4_queue.yaml} (81%) rename helm-chart/splunk-enterprise/templates/{enterprise_v4_buses.yaml => enterprise_v4_queues.yaml} (57%) rename helm-chart/splunk-operator/templates/rbac/{bus_editor_role.yaml => queue_editor_role.yaml} (82%) rename helm-chart/splunk-operator/templates/rbac/{bus_viewer_role.yaml => queue_viewer_role.yaml} (81%) rename internal/controller/{bus_controller.go => queue_controller.go} (72%) rename internal/controller/{bus_controller_test.go => queue_controller_test.go} (68%) rename pkg/splunk/enterprise/{bus.go => queue.go} (91%) rename pkg/splunk/enterprise/{bus_test.go => queue_test.go} (81%) diff --git a/PROJECT b/PROJECT index aa4aa1078..c2f3680d3 100644 --- a/PROJECT +++ b/PROJECT @@ -128,7 +128,7 @@ resources: controller: true domain: splunk.com group: enterprise - kind: Bus + kind: Queue path: github.com/splunk/splunk-operator/api/v4 version: v4 - api: diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 1f096ccdd..5e76d3e57 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -34,14 +34,14 @@ const ( IndexerClusterPausedAnnotation = "indexercluster.enterprise.splunk.com/paused" ) -// +kubebuilder:validation:XValidation:rule="has(self.busRef) == has(self.largeMessageStoreRef)",message="busRef and largeMessageStoreRef must both be set or both be empty" +// +kubebuilder:validation:XValidation:rule="has(self.queueRef) == has(self.largeMessageStoreRef)",message="queueRef and largeMessageStoreRef must both be set or both be empty" // IndexerClusterSpec defines the desired state of a Splunk Enterprise indexer cluster type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` // +optional - // Bus reference - BusRef corev1.ObjectReference `json:"busRef"` + // Queue reference + QueueRef corev1.ObjectReference `json:"queueRef"` // +optional // Large Message Store reference @@ -121,8 +121,8 @@ type IndexerClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Bus - Bus *BusSpec `json:"bus,omitempty"` + // Queue + Queue *QueueSpec `json:"queue,omitempty"` // Large Message Store LargeMessageStore *LargeMessageStoreSpec `json:"largeMessageStore,omitempty"` diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index 811f780a4..aa2281864 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -40,8 +40,8 @@ type IngestorClusterSpec struct { AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` // +kubebuilder:validation:Required - // Bus reference - BusRef corev1.ObjectReference `json:"busRef"` + // Queue reference + QueueRef corev1.ObjectReference `json:"queueRef"` // +kubebuilder:validation:Required // Large Message Store reference @@ -74,8 +74,8 @@ type IngestorClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Bus - Bus *BusSpec `json:"bus,omitempty"` + // Queue + Queue *QueueSpec `json:"queue,omitempty"` // Large Message Store LargeMessageStore *LargeMessageStoreSpec `json:"largeMessageStore,omitempty"` diff --git a/api/v4/bus_types.go b/api/v4/queue_types.go similarity index 75% rename from api/v4/bus_types.go rename to api/v4/queue_types.go index 4d9cd3a42..a094b76ce 100644 --- a/api/v4/bus_types.go +++ b/api/v4/queue_types.go @@ -23,14 +23,14 @@ import ( ) const ( - // BusPausedAnnotation is the annotation that pauses the reconciliation (triggers + // QueuePausedAnnotation is the annotation that pauses the reconciliation (triggers // an immediate requeue) - BusPausedAnnotation = "bus.enterprise.splunk.com/paused" + QueuePausedAnnotation = "queue.enterprise.splunk.com/paused" ) // +kubebuilder:validation:XValidation:rule="self.provider != 'sqs' || has(self.sqs)",message="sqs must be provided when provider is sqs" -// BusSpec defines the desired state of Bus -type BusSpec struct { +// QueueSpec defines the desired state of Queue +type QueueSpec struct { // +kubebuilder:validation:Required // +kubebuilder:validation:Enum=sqs // Provider of queue resources @@ -63,9 +63,9 @@ type SQSSpec struct { Endpoint string `json:"endpoint"` } -// BusStatus defines the observed state of Bus -type BusStatus struct { - // Phase of the bus +// QueueStatus defines the observed state of Queue +type QueueStatus struct { + // Phase of the queue Phase Phase `json:"phase"` // Resource revision tracker @@ -78,27 +78,27 @@ type BusStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// Bus is the Schema for a Splunk Enterprise bus +// Queue is the Schema for a Splunk Enterprise queue // +k8s:openapi-gen=true // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector -// +kubebuilder:resource:path=buses,scope=Namespaced,shortName=bus -// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of bus" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of bus resource" +// +kubebuilder:resource:path=queues,scope=Namespaced,shortName=queue +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of queue" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of queue resource" // +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status" // +kubebuilder:storageversion -// Bus is the Schema for the buses API -type Bus struct { +// Queue is the Schema for the queues API +type Queue struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` - Spec BusSpec `json:"spec"` - Status BusStatus `json:"status,omitempty,omitzero"` + Spec QueueSpec `json:"spec"` + Status QueueStatus `json:"status,omitempty,omitzero"` } // DeepCopyObject implements runtime.Object -func (in *Bus) DeepCopyObject() runtime.Object { +func (in *Queue) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -107,20 +107,20 @@ func (in *Bus) DeepCopyObject() runtime.Object { // +kubebuilder:object:root=true -// BusList contains a list of Bus -type BusList struct { +// QueueList contains a list of Queue +type QueueList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` - Items []Bus `json:"items"` + Items []Queue `json:"items"` } func init() { - SchemeBuilder.Register(&Bus{}, &BusList{}) + SchemeBuilder.Register(&Queue{}, &QueueList{}) } // NewEvent creates a new event associated with the object and ready // to be published to Kubernetes API -func (bc *Bus) NewEvent(eventType, reason, message string) corev1.Event { +func (bc *Queue) NewEvent(eventType, reason, message string) corev1.Event { t := metav1.Now() return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ @@ -128,7 +128,7 @@ func (bc *Bus) NewEvent(eventType, reason, message string) corev1.Event { Namespace: bc.ObjectMeta.Namespace, }, InvolvedObject: corev1.ObjectReference{ - Kind: "Bus", + Kind: "Queue", Namespace: bc.Namespace, Name: bc.Name, UID: bc.UID, @@ -137,12 +137,12 @@ func (bc *Bus) NewEvent(eventType, reason, message string) corev1.Event { Reason: reason, Message: message, Source: corev1.EventSource{ - Component: "splunk-bus-controller", + Component: "splunk-queue-controller", }, FirstTimestamp: t, LastTimestamp: t, Count: 1, Type: eventType, - ReportingController: "enterprise.splunk.com/bus-controller", + ReportingController: "enterprise.splunk.com/queue-controller", } } diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index dc19b7f10..2fb0eebc8 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -180,95 +180,6 @@ func (in *BundlePushTracker) DeepCopy() *BundlePushTracker { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Bus) DeepCopyInto(out *Bus) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Bus. -func (in *Bus) DeepCopy() *Bus { - if in == nil { - return nil - } - out := new(Bus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusList) DeepCopyInto(out *BusList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Bus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusList. -func (in *BusList) DeepCopy() *BusList { - if in == nil { - return nil - } - out := new(BusList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *BusList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusSpec) DeepCopyInto(out *BusSpec) { - *out = *in - out.SQS = in.SQS -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusSpec. -func (in *BusSpec) DeepCopy() *BusSpec { - if in == nil { - return nil - } - out := new(BusSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BusStatus) DeepCopyInto(out *BusStatus) { - *out = *in - if in.ResourceRevMap != nil { - in, out := &in.ResourceRevMap, &out.ResourceRevMap - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BusStatus. -func (in *BusStatus) DeepCopy() *BusStatus { - if in == nil { - return nil - } - out := new(BusStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CacheManagerSpec) DeepCopyInto(out *CacheManagerSpec) { *out = *in @@ -600,7 +511,7 @@ func (in *IndexerClusterMemberStatus) DeepCopy() *IndexerClusterMemberStatus { func (in *IndexerClusterSpec) DeepCopyInto(out *IndexerClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) - out.BusRef = in.BusRef + out.QueueRef = in.QueueRef out.LargeMessageStoreRef = in.LargeMessageStoreRef } @@ -634,9 +545,9 @@ func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { *out = make([]IndexerClusterMemberStatus, len(*in)) copy(*out, *in) } - if in.Bus != nil { - in, out := &in.Bus, &out.Bus - *out = new(BusSpec) + if in.Queue != nil { + in, out := &in.Queue, &out.Queue + *out = new(QueueSpec) **out = **in } if in.LargeMessageStore != nil { @@ -712,7 +623,7 @@ func (in *IngestorClusterSpec) DeepCopyInto(out *IngestorClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) - out.BusRef = in.BusRef + out.QueueRef = in.QueueRef out.LargeMessageStoreRef = in.LargeMessageStoreRef } @@ -737,9 +648,9 @@ func (in *IngestorClusterStatus) DeepCopyInto(out *IngestorClusterStatus) { } } in.AppContext.DeepCopyInto(&out.AppContext) - if in.Bus != nil { - in, out := &in.Bus, &out.Bus - *out = new(BusSpec) + if in.Queue != nil { + in, out := &in.Queue, &out.Queue + *out = new(QueueSpec) **out = **in } if in.LargeMessageStore != nil { @@ -1086,6 +997,95 @@ func (in *Probe) DeepCopy() *Probe { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Queue) DeepCopyInto(out *Queue) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Queue. +func (in *Queue) DeepCopy() *Queue { + if in == nil { + return nil + } + out := new(Queue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueList) DeepCopyInto(out *QueueList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Queue, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueList. +func (in *QueueList) DeepCopy() *QueueList { + if in == nil { + return nil + } + out := new(QueueList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *QueueList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueSpec) DeepCopyInto(out *QueueSpec) { + *out = *in + out.SQS = in.SQS +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueSpec. +func (in *QueueSpec) DeepCopy() *QueueSpec { + if in == nil { + return nil + } + out := new(QueueSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QueueStatus) DeepCopyInto(out *QueueStatus) { + *out = *in + if in.ResourceRevMap != nil { + in, out := &in.ResourceRevMap, &out.ResourceRevMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QueueStatus. +func (in *QueueStatus) DeepCopy() *QueueStatus { + if in == nil { + return nil + } + out := new(QueueStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *S3Spec) DeepCopyInto(out *S3Spec) { *out = *in diff --git a/cmd/main.go b/cmd/main.go index 0d14d691a..72a3e38c7 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -230,11 +230,11 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "IngestorCluster") os.Exit(1) } - if err := (&controller.BusReconciler{ + if err := (&controller.QueueReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Bus") + setupLog.Error(err, "unable to create controller", "controller", "Queue") os.Exit(1) } if err := (&controller.LargeMessageStoreReconciler{ diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 67e1021f6..90c266230 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -5165,49 +5165,6 @@ spec: x-kubernetes-list-type: atomic type: object type: object - busRef: - description: Bus reference - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic clusterManagerRef: description: ClusterManagerRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -5690,6 +5647,49 @@ spec: type: string type: object x-kubernetes-map-type: atomic + queueRef: + description: Queue reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic readinessInitialDelaySeconds: description: |- ReadinessInitialDelaySeconds defines initialDelaySeconds(See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) for Readiness probe @@ -8329,9 +8329,9 @@ spec: type: array type: object x-kubernetes-validations: - - message: busRef and largeMessageStoreRef must both be set or both be - empty - rule: has(self.busRef) == has(self.largeMessageStoreRef) + - message: queueRef and largeMessageStoreRef must both be set or both + be empty + rule: has(self.queueRef) == has(self.largeMessageStoreRef) status: description: IndexerClusterStatus defines the observed state of a Splunk Enterprise indexer cluster @@ -8341,45 +8341,6 @@ spec: type: boolean description: Holds secrets whose IDXC password has changed type: object - bus: - description: Bus - properties: - provider: - description: Provider of queue resources - enum: - - sqs - type: string - sqs: - description: sqs specific inputs - properties: - dlq: - description: Name of the dead letter queue resource - minLength: 1 - type: string - endpoint: - description: Amazon SQS Service endpoint - pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ - type: string - name: - description: Name of the queue - minLength: 1 - type: string - region: - description: Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string - required: - - dlq - - name - - region - type: object - required: - - provider - - sqs - type: object - x-kubernetes-validations: - - message: sqs must be provided when provider is sqs - rule: self.provider != 'sqs' || has(self.sqs) clusterManagerPhase: description: current phase of the cluster manager enum: @@ -8493,6 +8454,45 @@ spec: - Terminating - Error type: string + queue: + description: Queue + properties: + provider: + description: Provider of queue resources + enum: + - sqs + type: string + sqs: + description: sqs specific inputs + properties: + dlq: + description: Name of the dead letter queue resource + minLength: 1 + type: string + endpoint: + description: Amazon SQS Service endpoint + pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + type: string + name: + description: Name of the queue + minLength: 1 + type: string + region: + description: Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string + required: + - dlq + - name + - region + type: object + required: + - provider + - sqs + type: object + x-kubernetes-validations: + - message: sqs must be provided when provider is sqs + rule: self.provider != 'sqs' || has(self.sqs) readyReplicas: description: current number of ready indexer peers format: int32 diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 4ecaa8d32..37c820c4c 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -1141,49 +1141,6 @@ spec: type: object type: array type: object - busRef: - description: Bus reference - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic clusterManagerRef: description: ClusterManagerRef refers to a Splunk Enterprise indexer cluster managed by the operator within Kubernetes @@ -1666,6 +1623,49 @@ spec: type: string type: object x-kubernetes-map-type: atomic + queueRef: + description: Queue reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic readinessInitialDelaySeconds: description: |- ReadinessInitialDelaySeconds defines initialDelaySeconds(See https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-readiness-probes) for Readiness probe @@ -4303,8 +4303,8 @@ spec: type: object type: array required: - - busRef - largeMessageStoreRef + - queueRef type: object status: description: IngestorClusterStatus defines the observed state of Ingestor @@ -4591,45 +4591,6 @@ spec: description: App Framework version info for future use type: integer type: object - bus: - description: Bus - properties: - provider: - description: Provider of queue resources - enum: - - sqs - type: string - sqs: - description: sqs specific inputs - properties: - dlq: - description: Name of the dead letter queue resource - minLength: 1 - type: string - endpoint: - description: Amazon SQS Service endpoint - pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ - type: string - name: - description: Name of the queue - minLength: 1 - type: string - region: - description: Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string - required: - - dlq - - name - - region - type: object - required: - - provider - - sqs - type: object - x-kubernetes-validations: - - message: sqs must be provided when provider is sqs - rule: self.provider != 'sqs' || has(self.sqs) largeMessageStore: description: Large Message Store properties: @@ -4673,6 +4634,45 @@ spec: - Terminating - Error type: string + queue: + description: Queue + properties: + provider: + description: Provider of queue resources + enum: + - sqs + type: string + sqs: + description: sqs specific inputs + properties: + dlq: + description: Name of the dead letter queue resource + minLength: 1 + type: string + endpoint: + description: Amazon SQS Service endpoint + pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + type: string + name: + description: Name of the queue + minLength: 1 + type: string + region: + description: Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string + required: + - dlq + - name + - region + type: object + required: + - provider + - sqs + type: object + x-kubernetes-validations: + - message: sqs must be provided when provider is sqs + rule: self.provider != 'sqs' || has(self.sqs) readyReplicas: description: Number of ready ingestor pods format: int32 diff --git a/config/crd/bases/enterprise.splunk.com_buses.yaml b/config/crd/bases/enterprise.splunk.com_queues.yaml similarity index 89% rename from config/crd/bases/enterprise.splunk.com_buses.yaml rename to config/crd/bases/enterprise.splunk.com_queues.yaml index 54d498834..928cd34ce 100644 --- a/config/crd/bases/enterprise.splunk.com_buses.yaml +++ b/config/crd/bases/enterprise.splunk.com_queues.yaml @@ -4,24 +4,24 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.16.1 - name: buses.enterprise.splunk.com + name: queues.enterprise.splunk.com spec: group: enterprise.splunk.com names: - kind: Bus - listKind: BusList - plural: buses + kind: Queue + listKind: QueueList + plural: queues shortNames: - - bus - singular: bus + - queue + singular: queue scope: Namespaced versions: - additionalPrinterColumns: - - description: Status of bus + - description: Status of queue jsonPath: .status.phase name: Phase type: string - - description: Age of bus resource + - description: Age of queue resource jsonPath: .metadata.creationTimestamp name: Age type: date @@ -32,7 +32,7 @@ spec: name: v4 schema: openAPIV3Schema: - description: Bus is the Schema for the buses API + description: Queue is the Schema for the queues API properties: apiVersion: description: |- @@ -52,7 +52,7 @@ spec: metadata: type: object spec: - description: BusSpec defines the desired state of Bus + description: QueueSpec defines the desired state of Queue properties: provider: description: Provider of queue resources @@ -91,13 +91,13 @@ spec: - message: sqs must be provided when provider is sqs rule: self.provider != 'sqs' || has(self.sqs) status: - description: BusStatus defines the observed state of Bus + description: QueueStatus defines the observed state of Queue properties: message: description: Auxillary message describing CR status type: string phase: - description: Phase of the bus + description: Phase of the queue enum: - Pending - Ready diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index c8ba16418..f80dfec5e 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -11,7 +11,7 @@ resources: - bases/enterprise.splunk.com_searchheadclusters.yaml - bases/enterprise.splunk.com_standalones.yaml - bases/enterprise.splunk.com_ingestorclusters.yaml -- bases/enterprise.splunk.com_buses.yaml +- bases/enterprise.splunk.com_queues.yaml - bases/enterprise.splunk.com_largemessagestores.yaml #+kubebuilder:scaffold:crdkustomizeresource diff --git a/config/rbac/bus_editor_role.yaml b/config/rbac/queue_editor_role.yaml similarity index 92% rename from config/rbac/bus_editor_role.yaml rename to config/rbac/queue_editor_role.yaml index c08c2ce39..bf7e4d890 100644 --- a/config/rbac/bus_editor_role.yaml +++ b/config/rbac/queue_editor_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: bus-editor-role + name: queue-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - buses + - queues verbs: - create - delete @@ -25,6 +25,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/status + - queues/status verbs: - get diff --git a/config/rbac/bus_viewer_role.yaml b/config/rbac/queue_viewer_role.yaml similarity index 91% rename from config/rbac/bus_viewer_role.yaml rename to config/rbac/queue_viewer_role.yaml index 6f9c42d2a..b186c8650 100644 --- a/config/rbac/bus_viewer_role.yaml +++ b/config/rbac/queue_viewer_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: bus-viewer-role + name: queue-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - buses + - queues verbs: - get - list @@ -21,6 +21,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/status + - queues/status verbs: - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 94ed9d59e..295e080c6 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -47,7 +47,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses - clustermanagers - clustermasters - indexerclusters @@ -56,6 +55,7 @@ rules: - licensemanagers - licensemasters - monitoringconsoles + - queues - searchheadclusters - standalones verbs: @@ -69,7 +69,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/finalizers - clustermanagers/finalizers - clustermasters/finalizers - indexerclusters/finalizers @@ -78,6 +77,7 @@ rules: - licensemanagers/finalizers - licensemasters/finalizers - monitoringconsoles/finalizers + - queues/finalizers - searchheadclusters/finalizers - standalones/finalizers verbs: @@ -85,7 +85,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/status - clustermanagers/status - clustermasters/status - indexerclusters/status @@ -94,6 +93,7 @@ rules: - licensemanagers/status - licensemasters/status - monitoringconsoles/status + - queues/status - searchheadclusters/status - standalones/status verbs: diff --git a/config/samples/enterprise_v4_bus.yaml b/config/samples/enterprise_v4_queue.yaml similarity index 81% rename from config/samples/enterprise_v4_bus.yaml rename to config/samples/enterprise_v4_queue.yaml index 51af9d05a..374d4adb2 100644 --- a/config/samples/enterprise_v4_bus.yaml +++ b/config/samples/enterprise_v4_queue.yaml @@ -1,7 +1,7 @@ apiVersion: enterprise.splunk.com/v4 -kind: Bus +kind: Queue metadata: - name: bus-sample + name: queue-sample finalizers: - "enterprise.splunk.com/delete-pvc" spec: {} diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 1ea90a3ae..4de2ec89d 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -14,6 +14,6 @@ resources: - enterprise_v4_clustermanager.yaml - enterprise_v4_licensemanager.yaml - enterprise_v4_ingestorcluster.yaml -- enterprise_v4_bus.yaml +- enterprise_v4_queue.yaml - enterprise_v4_largemessagestore.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/docs/CustomResources.md b/docs/CustomResources.md index 95ca6c1d9..f69a8fa50 100644 --- a/docs/CustomResources.md +++ b/docs/CustomResources.md @@ -18,7 +18,7 @@ you can use to manage Splunk Enterprise deployments in your Kubernetes cluster. - [LicenseManager Resource Spec Parameters](#licensemanager-resource-spec-parameters) - [Standalone Resource Spec Parameters](#standalone-resource-spec-parameters) - [SearchHeadCluster Resource Spec Parameters](#searchheadcluster-resource-spec-parameters) - - [Bus Resource Spec Parameters](#bus-resource-spec-parameters) + - [Queue Resource Spec Parameters](#queue-resource-spec-parameters) - [ClusterManager Resource Spec Parameters](#clustermanager-resource-spec-parameters) - [IndexerCluster Resource Spec Parameters](#indexercluster-resource-spec-parameters) - [IngestorCluster Resource Spec Parameters](#ingestorcluster-resource-spec-parameters) @@ -281,13 +281,13 @@ spec: cpu: "4" ``` -## Bus Resource Spec Parameters +## Queue Resource Spec Parameters ```yaml apiVersion: enterprise.splunk.com/v4 -kind: Bus +kind: Queue metadata: - name: bus + name: queue spec: replicas: 3 provider: sqs @@ -298,14 +298,14 @@ spec: dlq: sqs-dlq-test ``` -Bus inputs can be found in the table below. As of now, only SQS provider of message bus is supported. +Queue inputs can be found in the table below. As of now, only SQS provider of message queue is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| provider | string | [Required] Provider of message bus (Allowed values: sqs) | -| sqs | SQS | [Required if provider=sqs] SQS message bus inputs | +| provider | string | [Required] Provider of message queue (Allowed values: sqs) | +| sqs | SQS | [Required if provider=sqs] SQS message queue inputs | -SQS message bus inputs can be found in the table below. +SQS message queue inputs can be found in the table below. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | @@ -314,7 +314,7 @@ SQS message bus inputs can be found in the table below. | endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint | dlq | string | [Required] Name of the dead letter queue | -Change of any of the bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +Change of any of the queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. ## ClusterManager Resource Spec Parameters ClusterManager resource does not have a required spec parameter, but to configure SmartStore, you can specify indexes and volume configuration as below - @@ -375,12 +375,12 @@ metadata: name: ic spec: replicas: 3 - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms ``` -Note: `busRef` and `largeMessageStoreRef` are required fields in case of IngestorCluster resource since they will be used to connect the IngestorCluster to Bus and LargeMessageStore resources. +Note: `queueRef` and `largeMessageStoreRef` are required fields in case of IngestorCluster resource since they will be used to connect the IngestorCluster to Queue and LargeMessageStore resources. In addition to [Common Spec Parameters for All Resources](#common-spec-parameters-for-all-resources) and [Common Spec Parameters for All Splunk Enterprise Resources](#common-spec-parameters-for-all-splunk-enterprise-resources), @@ -418,7 +418,7 @@ S3 large message store inputs can be found in the table below. | path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | | endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint -Change of any of the large message bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +Change of any of the large message queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. ## MonitoringConsole Resource Spec Parameters @@ -531,7 +531,7 @@ The Splunk Operator controller reconciles every Splunk Enterprise CR. However, t | Customer Resource Definition | Annotation | | ----------- | --------- | -| bus.enterprise.splunk.com | "bus.enterprise.splunk.com/paused" | +| queue.enterprise.splunk.com | "queue.enterprise.splunk.com/paused" | | clustermaster.enterprise.splunk.com | "clustermaster.enterprise.splunk.com/paused" | | clustermanager.enterprise.splunk.com | "clustermanager.enterprise.splunk.com/paused" | | indexercluster.enterprise.splunk.com | "indexercluster.enterprise.splunk.com/paused" | diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index e8c6211d7..257e37400 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -4,7 +4,7 @@ Separation between ingestion and indexing services within Splunk Operator for Ku This separation enables: - Independent scaling: Match resource allocation to ingestion or indexing workload. -- Data durability: Off‑load buffer management and retry logic to a durable message bus. +- Data durability: Off‑load buffer management and retry logic to a durable message queue. - Operational clarity: Separate monitoring dashboards for ingestion throughput vs indexing latency. # Important Note @@ -16,20 +16,20 @@ This separation enables: - SPLUNK_IMAGE_VERSION: Splunk Enterprise Docker Image version -# Bus +# Queue -Bus is introduced to store message bus information to be shared among IngestorCluster and IndexerCluster. +Queue is introduced to store message queue information to be shared among IngestorCluster and IndexerCluster. ## Spec -Bus inputs can be found in the table below. As of now, only SQS provider of message bus is supported. +Queue inputs can be found in the table below. As of now, only SQS provider of message queue is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| provider | string | [Required] Provider of message bus (Allowed values: sqs) | -| sqs | SQS | [Required if provider=sqs] SQS message bus inputs | +| provider | string | [Required] Provider of message queue (Allowed values: sqs) | +| sqs | SQS | [Required if provider=sqs] SQS message queue inputs | -SQS message bus inputs can be found in the table below. +SQS message queue inputs can be found in the table below. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | @@ -38,14 +38,14 @@ SQS message bus inputs can be found in the table below. | endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint | dlq | string | [Required] Name of the dead letter queue | -Change of any of the bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +Change of any of the queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. ## Example ``` apiVersion: enterprise.splunk.com/v4 -kind: Bus +kind: Queue metadata: - name: bus + name: queue spec: provider: sqs sqs: @@ -75,7 +75,7 @@ S3 large message store inputs can be found in the table below. | path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | | endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint -Change of any of the large message bus inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +Change of any of the large message queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. ## Example ``` @@ -92,7 +92,7 @@ spec: # IngestorCluster -IngestorCluster is introduced for high‑throughput data ingestion into a durable message bus. Its Splunk pods are configured to receive events (outputs.conf) and publish them to a message bus. +IngestorCluster is introduced for high‑throughput data ingestion into a durable message queue. Its Splunk pods are configured to receive events (outputs.conf) and publish them to a message queue. ## Spec @@ -101,12 +101,12 @@ In addition to common spec inputs, the IngestorCluster resource provides the fol | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | -| busRef | corev1.ObjectReference | Message bus reference | +| queueRef | corev1.ObjectReference | Message queue reference | | largeMessageStoreRef | corev1.ObjectReference | Large message store reference | ## Example -The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Bus and LargeMessageStore references allow the user to specify queue and bucket settings for the ingestion process. +The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and LargeMessageStore references allow the user to specify queue and bucket settings for the ingestion process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. @@ -121,15 +121,15 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms ``` # IndexerCluster -IndexerCluster is enhanced to support index‑only mode enabling independent scaling, loss‑safe buffering, and simplified day‑0/day‑n management via Kubernetes CRDs. Its Splunk pods are configured to pull events from the bus (inputs.conf) and index them. +IndexerCluster is enhanced to support index‑only mode enabling independent scaling, loss‑safe buffering, and simplified day‑0/day‑n management via Kubernetes CRDs. Its Splunk pods are configured to pull events from the queue (inputs.conf) and index them. ## Spec @@ -138,12 +138,12 @@ In addition to common spec inputs, the IndexerCluster resource provides the foll | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | -| busRef | corev1.ObjectReference | Message bus reference | +| queueRef | corev1.ObjectReference | Message queue reference | | largeMessageStoreRef | corev1.ObjectReference | Large message store reference | ## Example -The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Bus and LargeMessageStore references allow the user to specify queue and bucket settings for the indexing process. +The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and LargeMessageStore references allow the user to specify queue and bucket settings for the indexing process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. @@ -170,8 +170,8 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms ``` @@ -182,16 +182,16 @@ Common spec values for all SOK Custom Resources can be found in [CustomResources # Helm Charts -Bus, LargeMessageStore and IngestorCluster have been added to the splunk/splunk-enterprise Helm chart. IndexerCluster has also been enhanced to support new inputs. +Queue, LargeMessageStore and IngestorCluster have been added to the splunk/splunk-enterprise Helm chart. IndexerCluster has also been enhanced to support new inputs. ## Example -Below examples describe how to define values for Bus, LargeMessageStoe, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. +Below examples describe how to define values for Queue, LargeMessageStoe, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. ``` -bus: +queue: enabled: true - name: bus + name: queue provider: sqs sqs: name: sqs-test @@ -216,8 +216,8 @@ ingestorCluster: name: ingestor replicaCount: 3 serviceAccount: ingestor-sa - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms ``` @@ -236,8 +236,8 @@ indexerCluster: serviceAccount: ingestor-sa clusterManagerRef: name: cm - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms ``` @@ -541,14 +541,14 @@ $ aws iam list-attached-role-policies --role-name eksctl-ind-ing-sep-demo-addon- } ``` -3. Install Bus resource. +3. Install Queue resource. ``` -$ cat bus.yaml +$ cat queue.yaml apiVersion: enterprise.splunk.com/v4 -kind: Bus +kind: Queue metadata: - name: bus + name: queue finalizers: - enterprise.splunk.com/delete-pvc spec: @@ -561,23 +561,23 @@ spec: ``` ``` -$ kubectl apply -f bus.yaml +$ kubectl apply -f queue.yaml ``` ``` -$ kubectl get bus +$ kubectl get queue NAME PHASE AGE MESSAGE -bus Ready 20s +queue Ready 20s ``` ``` -kubectl describe bus -Name: bus +kubectl describe queue +Name: queue Namespace: default Labels: Annotations: API Version: enterprise.splunk.com/v4 -Kind: Bus +Kind: Queue Metadata: Creation Timestamp: 2025-10-27T10:25:53Z Finalizers: @@ -667,8 +667,8 @@ spec: serviceAccount: ingestor-sa replicas: 3 image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms ``` @@ -699,8 +699,8 @@ Metadata: Resource Version: 12345678 UID: 12345678-1234-1234-1234-1234567890123 Spec: - Bus Ref: - Name: bus + Queue Ref: + Name: queue Namespace: default Image: splunk/splunk:${SPLUNK_IMAGE_VERSION} Large Message Store Ref: @@ -720,7 +720,7 @@ Status: Is Deployment In Progress: false Last App Info Check Time: 0 Version: 0 - Bus: + Queue: Sqs: Region: us-west-2 DLQ: sqs-dlq-test @@ -811,8 +811,8 @@ spec: clusterManagerRef: name: cm serviceAccount: ingestor-sa - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms ``` diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml index 0e6a96673..536be0cd2 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml @@ -163,8 +163,8 @@ items: {{ toYaml . | indent 6 }} {{- end }} {{- end }} - {{- with $.Values.indexerCluster.busRef }} - busRef: + {{- with $.Values.indexerCluster.queueRef }} + queueRef: name: {{ .name }} {{- if .namespace }} namespace: {{ .namespace }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml index b6c1640ec..b9ec62107 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml @@ -95,11 +95,11 @@ spec: topologySpreadConstraints: {{- toYaml . | nindent 4 }} {{- end }} - {{- with $.Values.ingestorCluster.busRef }} - busRef: - name: {{ $.Values.ingestorCluster.busRef.name }} - {{- if $.Values.ingestorCluster.busRef.namespace }} - namespace: {{ $.Values.ingestorCluster.busRef.namespace }} + {{- with $.Values.ingestorCluster.queueRef }} + queueRef: + name: {{ $.Values.ingestorCluster.queueRef.name }} + {{- if $.Values.ingestorCluster.queueRef.namespace }} + namespace: {{ $.Values.ingestorCluster.queueRef.namespace }} {{- end }} {{- end }} {{- with $.Values.ingestorCluster.largeMessageStoreRef }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml similarity index 57% rename from helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml rename to helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml index bbf162332..b586e45da 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_buses.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml @@ -1,21 +1,21 @@ -{{- if .Values.bus }} -{{- if .Values.bus.enabled }} +{{- if .Values.queue }} +{{- if .Values.queue.enabled }} apiVersion: enterprise.splunk.com/v4 -kind: Bus +kind: Queue metadata: - name: {{ .Values.bus.name }} - namespace: {{ default .Release.Namespace .Values.bus.namespaceOverride }} - {{- with .Values.bus.additionalLabels }} + name: {{ .Values.queue.name }} + namespace: {{ default .Release.Namespace .Values.queue.namespaceOverride }} + {{- with .Values.queue.additionalLabels }} labels: {{ toYaml . | nindent 4 }} {{- end }} - {{- with .Values.bus.additionalAnnotations }} + {{- with .Values.queue.additionalAnnotations }} annotations: {{ toYaml . | nindent 4 }} {{- end }} spec: - provider: {{ .Values.bus.provider | quote }} - {{- with .Values.bus.sqs }} + provider: {{ .Values.queue.provider | quote }} + {{- with .Values.queue.sqs }} sqs: {{- if .endpoint }} endpoint: {{ .endpoint | quote }} diff --git a/helm-chart/splunk-enterprise/values.yaml b/helm-chart/splunk-enterprise/values.yaml index a001bbead..ea4921b52 100644 --- a/helm-chart/splunk-enterprise/values.yaml +++ b/helm-chart/splunk-enterprise/values.yaml @@ -350,7 +350,7 @@ indexerCluster: # nodeAffinityPolicy: [Honor|Ignore] # optional; beta since v1.26 # nodeTaintsPolicy: [Honor|Ignore] # optional; beta since v1.26 - busRef: {} + queueRef: {} largeMessageStoreRef: {} @@ -901,6 +901,6 @@ ingestorCluster: affinity: {} - busRef: {} + queueRef: {} largeMessageStoreRef: {} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/bus_editor_role.yaml b/helm-chart/splunk-operator/templates/rbac/queue_editor_role.yaml similarity index 82% rename from helm-chart/splunk-operator/templates/rbac/bus_editor_role.yaml rename to helm-chart/splunk-operator/templates/rbac/queue_editor_role.yaml index f285a1ca5..6c04be75b 100644 --- a/helm-chart/splunk-operator/templates/rbac/bus_editor_role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/queue_editor_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-bus-editor-role + name: {{ include "splunk-operator.operator.fullname" . }}-queue-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - buses + - queues verbs: - create - delete @@ -25,19 +25,19 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/status + - queues/status verbs: - get {{- else }} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-bus-editor-role + name: {{ include "splunk-operator.operator.fullname" . }}-queue-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - buses + - queues verbs: - create - delete @@ -49,7 +49,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/status + - queues/status verbs: - get {{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/bus_viewer_role.yaml b/helm-chart/splunk-operator/templates/rbac/queue_viewer_role.yaml similarity index 81% rename from helm-chart/splunk-operator/templates/rbac/bus_viewer_role.yaml rename to helm-chart/splunk-operator/templates/rbac/queue_viewer_role.yaml index c4381a3cc..2c81b98fd 100644 --- a/helm-chart/splunk-operator/templates/rbac/bus_viewer_role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/queue_viewer_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-bus-viewer-role + name: {{ include "splunk-operator.operator.fullname" . }}-queue-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - buses + - queues verbs: - get - list @@ -21,19 +21,19 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/status + - queues/status verbs: - get {{- else }} apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: - name: {{ include "splunk-operator.operator.fullname" . }}-bus-viewer-role + name: {{ include "splunk-operator.operator.fullname" . }}-queue-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - buses + - queues verbs: - get - list @@ -41,7 +41,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/status + - queues/status verbs: - get {{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/role.yaml b/helm-chart/splunk-operator/templates/rbac/role.yaml index 61cf4ada9..26824528f 100644 --- a/helm-chart/splunk-operator/templates/rbac/role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/role.yaml @@ -251,7 +251,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses + - queues verbs: - create - delete @@ -263,13 +263,13 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - buses/finalizers + - queues/finalizers verbs: - update - apiGroups: - enterprise.splunk.com resources: - - buses/status + - queues/status verbs: - get - patch diff --git a/internal/controller/indexercluster_controller.go b/internal/controller/indexercluster_controller.go index 676f81d23..2ed4d775e 100644 --- a/internal/controller/indexercluster_controller.go +++ b/internal/controller/indexercluster_controller.go @@ -172,9 +172,9 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IndexerCluster{}, )). - Watches(&enterpriseApi.Bus{}, + Watches(&enterpriseApi.Queue{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - b, ok := obj.(*enterpriseApi.Bus) + b, ok := obj.(*enterpriseApi.Queue) if !ok { return nil } @@ -184,11 +184,11 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { } var reqs []reconcile.Request for _, ic := range list.Items { - ns := ic.Spec.BusRef.Namespace + ns := ic.Spec.QueueRef.Namespace if ns == "" { ns = ic.Namespace } - if ic.Spec.BusRef.Name == b.Name && ns == b.Namespace { + if ic.Spec.QueueRef.Name == b.Name && ns == b.Namespace { reqs = append(reqs, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ic.Name, diff --git a/internal/controller/ingestorcluster_controller.go b/internal/controller/ingestorcluster_controller.go index 1df81eb78..a46a1dcff 100644 --- a/internal/controller/ingestorcluster_controller.go +++ b/internal/controller/ingestorcluster_controller.go @@ -141,9 +141,9 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IngestorCluster{}, )). - Watches(&enterpriseApi.Bus{}, + Watches(&enterpriseApi.Queue{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - b, ok := obj.(*enterpriseApi.Bus) + queue, ok := obj.(*enterpriseApi.Queue) if !ok { return nil } @@ -153,11 +153,11 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { } var reqs []reconcile.Request for _, ic := range list.Items { - ns := ic.Spec.BusRef.Namespace + ns := ic.Spec.QueueRef.Namespace if ns == "" { ns = ic.Namespace } - if ic.Spec.BusRef.Name == b.Name && ns == b.Namespace { + if ic.Spec.QueueRef.Name == queue.Name && ns == queue.Namespace { reqs = append(reqs, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ic.Name, diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go index 053195d44..4d140e1d6 100644 --- a/internal/controller/ingestorcluster_controller_test.go +++ b/internal/controller/ingestorcluster_controller_test.go @@ -71,12 +71,12 @@ var _ = Describe("IngestorCluster Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - bus := &enterpriseApi.Bus{ + queue := &enterpriseApi.Queue{ ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", Namespace: nsSpecs.Name, }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "smartbus-queue", @@ -99,7 +99,7 @@ var _ = Describe("IngestorCluster Controller", func() { }, }, } - CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, lms, bus) + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, lms, queue) icSpec, _ := GetIngestorCluster("test", nsSpecs.Name) annotations = map[string]string{} icSpec.Annotations = annotations @@ -119,12 +119,12 @@ var _ = Describe("IngestorCluster Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) annotations := make(map[string]string) - bus := &enterpriseApi.Bus{ + queue := &enterpriseApi.Queue{ ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", Namespace: nsSpecs.Name, }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "smartbus-queue", @@ -147,7 +147,7 @@ var _ = Describe("IngestorCluster Controller", func() { }, }, } - CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, lms, bus) + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, lms, queue) DeleteIngestorCluster("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) @@ -220,7 +220,7 @@ func GetIngestorCluster(name string, namespace string) (*enterpriseApi.IngestorC return ic, err } -func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, lms *enterpriseApi.LargeMessageStore, bus *enterpriseApi.Bus) *enterpriseApi.IngestorCluster { +func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, lms *enterpriseApi.LargeMessageStore, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster { By("Expecting IngestorCluster custom resource to be created successfully") key := types.NamespacedName{ @@ -240,9 +240,9 @@ func CreateIngestorCluster(name string, namespace string, annotations map[string }, }, Replicas: 3, - BusRef: corev1.ObjectReference{ - Name: bus.Name, - Namespace: bus.Namespace, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + Namespace: queue.Namespace, }, LargeMessageStoreRef: corev1.ObjectReference{ Name: lms.Name, diff --git a/internal/controller/bus_controller.go b/internal/controller/queue_controller.go similarity index 72% rename from internal/controller/bus_controller.go rename to internal/controller/queue_controller.go index b52e91991..6fff662b9 100644 --- a/internal/controller/bus_controller.go +++ b/internal/controller/queue_controller.go @@ -36,34 +36,34 @@ import ( enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" ) -// BusReconciler reconciles a Bus object -type BusReconciler struct { +// QueueReconciler reconciles a Queue object +type QueueReconciler struct { client.Client Scheme *runtime.Scheme } -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=buses,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=buses/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=buses/finalizers,verbs=update +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/finalizers,verbs=update // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // TODO(user): Modify the Reconcile function to compare the state specified by -// the Bus object against the actual cluster state, and then +// the Queue object against the actual cluster state, and then // perform operations to make the cluster state reflect the state specified by // the user. // // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile -func (r *BusReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "Bus")).Inc() - defer recordInstrumentionData(time.Now(), req, "controller", "Bus") +func (r *QueueReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "Queue")).Inc() + defer recordInstrumentionData(time.Now(), req, "controller", "Queue") reqLogger := log.FromContext(ctx) - reqLogger = reqLogger.WithValues("bus", req.NamespacedName) + reqLogger = reqLogger.WithValues("queue", req.NamespacedName) - // Fetch the Bus - instance := &enterpriseApi.Bus{} + // Fetch the Queue + instance := &enterpriseApi.Queue{} err := r.Get(ctx, req.NamespacedName, instance) if err != nil { if k8serrors.IsNotFound(err) { @@ -74,20 +74,20 @@ func (r *BusReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.R return ctrl.Result{}, nil } // Error reading the object - requeue the request. - return ctrl.Result{}, errors.Wrap(err, "could not load bus data") + return ctrl.Result{}, errors.Wrap(err, "could not load queue data") } // If the reconciliation is paused, requeue annotations := instance.GetAnnotations() if annotations != nil { - if _, ok := annotations[enterpriseApi.BusPausedAnnotation]; ok { + if _, ok := annotations[enterpriseApi.QueuePausedAnnotation]; ok { return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil } } reqLogger.Info("start", "CR version", instance.GetResourceVersion()) - result, err := ApplyBus(ctx, r.Client, instance) + result, err := ApplyQueue(ctx, r.Client, instance) if result.Requeue && result.RequeueAfter != 0 { reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) } @@ -95,14 +95,14 @@ func (r *BusReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.R return result, err } -var ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { - return enterprise.ApplyBus(ctx, client, instance) +var ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { + return enterprise.ApplyQueue(ctx, client, instance) } // SetupWithManager sets up the controller with the Manager. -func (r *BusReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *QueueReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&enterpriseApi.Bus{}). + For(&enterpriseApi.Queue{}). WithEventFilter(predicate.Or( common.GenerationChangedPredicate(), common.AnnotationChangedPredicate(), diff --git a/internal/controller/bus_controller_test.go b/internal/controller/queue_controller_test.go similarity index 68% rename from internal/controller/bus_controller_test.go rename to internal/controller/queue_controller_test.go index c45c66420..23d40ae4c 100644 --- a/internal/controller/bus_controller_test.go +++ b/internal/controller/queue_controller_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -var _ = Describe("Bus Controller", func() { +var _ = Describe("Queue Controller", func() { BeforeEach(func() { time.Sleep(2 * time.Second) }) @@ -43,34 +43,34 @@ var _ = Describe("Bus Controller", func() { }) - Context("Bus Management", func() { + Context("Queue Management", func() { - It("Get Bus custom resource should fail", func() { - namespace := "ns-splunk-bus-1" - ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { + It("Get Queue custom resource should fail", func() { + namespace := "ns-splunk-queue-1" + ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - _, err := GetBus("test", nsSpecs.Name) - Expect(err.Error()).Should(Equal("buses.enterprise.splunk.com \"test\" not found")) + _, err := GetQueue("test", nsSpecs.Name) + Expect(err.Error()).Should(Equal("queues.enterprise.splunk.com \"test\" not found")) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) - It("Create Bus custom resource with annotations should pause", func() { - namespace := "ns-splunk-bus-2" + It("Create Queue custom resource with annotations should pause", func() { + namespace := "ns-splunk-queue-2" annotations := make(map[string]string) - annotations[enterpriseApi.BusPausedAnnotation] = "" - ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { + annotations[enterpriseApi.QueuePausedAnnotation] = "" + ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - spec := enterpriseApi.BusSpec{ + spec := enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "smartbus-queue", @@ -79,19 +79,19 @@ var _ = Describe("Bus Controller", func() { Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } - CreateBus("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - icSpec, _ := GetBus("test", nsSpecs.Name) + CreateQueue("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + icSpec, _ := GetQueue("test", nsSpecs.Name) annotations = map[string]string{} icSpec.Annotations = annotations icSpec.Status.Phase = "Ready" - UpdateBus(icSpec, enterpriseApi.PhaseReady, spec) - DeleteBus("test", nsSpecs.Name) + UpdateQueue(icSpec, enterpriseApi.PhaseReady, spec) + DeleteQueue("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) - It("Create Bus custom resource should succeeded", func() { - namespace := "ns-splunk-bus-3" - ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { + It("Create Queue custom resource should succeeded", func() { + namespace := "ns-splunk-queue-3" + ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -99,7 +99,7 @@ var _ = Describe("Bus Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) annotations := make(map[string]string) - spec := enterpriseApi.BusSpec{ + spec := enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "smartbus-queue", @@ -108,14 +108,14 @@ var _ = Describe("Bus Controller", func() { Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } - CreateBus("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - DeleteBus("test", nsSpecs.Name) + CreateQueue("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + DeleteQueue("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) It("Cover Unused methods", func() { - namespace := "ns-splunk-bus-4" - ApplyBus = func(ctx context.Context, client client.Client, instance *enterpriseApi.Bus) (reconcile.Result, error) { + namespace := "ns-splunk-queue-4" + ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -125,7 +125,7 @@ var _ = Describe("Bus Controller", func() { ctx := context.TODO() builder := fake.NewClientBuilder() c := builder.Build() - instance := BusReconciler{ + instance := QueueReconciler{ Client: c, Scheme: scheme.Scheme, } @@ -138,7 +138,7 @@ var _ = Describe("Bus Controller", func() { _, err := instance.Reconcile(ctx, request) Expect(err).ToNot(HaveOccurred()) - spec := enterpriseApi.BusSpec{ + spec := enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "smartbus-queue", @@ -147,11 +147,11 @@ var _ = Describe("Bus Controller", func() { Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } - bcSpec := testutils.NewBus("test", namespace, spec) + bcSpec := testutils.NewQueue("test", namespace, spec) Expect(c.Create(ctx, bcSpec)).Should(Succeed()) annotations := make(map[string]string) - annotations[enterpriseApi.BusPausedAnnotation] = "" + annotations[enterpriseApi.QueuePausedAnnotation] = "" bcSpec.Annotations = annotations Expect(c.Update(ctx, bcSpec)).Should(Succeed()) @@ -173,14 +173,14 @@ var _ = Describe("Bus Controller", func() { }) }) -func GetBus(name string, namespace string) (*enterpriseApi.Bus, error) { - By("Expecting Bus custom resource to be retrieved successfully") +func GetQueue(name string, namespace string) (*enterpriseApi.Queue, error) { + By("Expecting Queue custom resource to be retrieved successfully") key := types.NamespacedName{ Name: name, Namespace: namespace, } - b := &enterpriseApi.Bus{} + b := &enterpriseApi.Queue{} err := k8sClient.Get(context.Background(), key, b) if err != nil { @@ -190,14 +190,14 @@ func GetBus(name string, namespace string) (*enterpriseApi.Bus, error) { return b, err } -func CreateBus(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.BusSpec) *enterpriseApi.Bus { - By("Expecting Bus custom resource to be created successfully") +func CreateQueue(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.QueueSpec) *enterpriseApi.Queue { + By("Expecting Queue custom resource to be created successfully") key := types.NamespacedName{ Name: name, Namespace: namespace, } - ingSpec := &enterpriseApi.Bus{ + ingSpec := &enterpriseApi.Queue{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -209,7 +209,7 @@ func CreateBus(name string, namespace string, annotations map[string]string, sta Expect(k8sClient.Create(context.Background(), ingSpec)).Should(Succeed()) time.Sleep(2 * time.Second) - b := &enterpriseApi.Bus{} + b := &enterpriseApi.Queue{} Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, b) if status != "" { @@ -224,20 +224,20 @@ func CreateBus(name string, namespace string, annotations map[string]string, sta return b } -func UpdateBus(instance *enterpriseApi.Bus, status enterpriseApi.Phase, spec enterpriseApi.BusSpec) *enterpriseApi.Bus { - By("Expecting Bus custom resource to be updated successfully") +func UpdateQueue(instance *enterpriseApi.Queue, status enterpriseApi.Phase, spec enterpriseApi.QueueSpec) *enterpriseApi.Queue { + By("Expecting Queue custom resource to be updated successfully") key := types.NamespacedName{ Name: instance.Name, Namespace: instance.Namespace, } - bSpec := testutils.NewBus(instance.Name, instance.Namespace, spec) + bSpec := testutils.NewQueue(instance.Name, instance.Namespace, spec) bSpec.ResourceVersion = instance.ResourceVersion Expect(k8sClient.Update(context.Background(), bSpec)).Should(Succeed()) time.Sleep(2 * time.Second) - b := &enterpriseApi.Bus{} + b := &enterpriseApi.Queue{} Eventually(func() bool { _ = k8sClient.Get(context.Background(), key, b) if status != "" { @@ -252,8 +252,8 @@ func UpdateBus(instance *enterpriseApi.Bus, status enterpriseApi.Phase, spec ent return b } -func DeleteBus(name string, namespace string) { - By("Expecting Bus custom resource to be deleted successfully") +func DeleteQueue(name string, namespace string) { + By("Expecting Queue custom resource to be deleted successfully") key := types.NamespacedName{ Name: name, @@ -261,7 +261,7 @@ func DeleteBus(name string, namespace string) { } Eventually(func() error { - b := &enterpriseApi.Bus{} + b := &enterpriseApi.Queue{} _ = k8sClient.Get(context.Background(), key, b) err := k8sClient.Delete(context.Background(), b) return err diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 17ce5e760..eda9f320d 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -98,7 +98,7 @@ var _ = BeforeSuite(func(ctx context.Context) { Scheme: clientgoscheme.Scheme, }) Expect(err).ToNot(HaveOccurred()) - if err := (&BusReconciler{ + if err := (&QueueReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { diff --git a/internal/controller/testutils/new.go b/internal/controller/testutils/new.go index e3e37efc2..b5b620337 100644 --- a/internal/controller/testutils/new.go +++ b/internal/controller/testutils/new.go @@ -54,16 +54,16 @@ func NewIngestorCluster(name, ns, image string) *enterpriseApi.IngestorCluster { Spec: enterpriseApi.Spec{ImagePullPolicy: string(pullPolicy)}, }, Replicas: 3, - BusRef: corev1.ObjectReference{ - Name: "bus", + QueueRef: corev1.ObjectReference{ + Name: "queue", }, }, } } -// NewBus returns new Bus instance with its config hash -func NewBus(name, ns string, spec enterpriseApi.BusSpec) *enterpriseApi.Bus { - return &enterpriseApi.Bus{ +// NewQueue returns new Queue instance with its config hash +func NewQueue(name, ns string, spec enterpriseApi.QueueSpec) *enterpriseApi.Queue { + return &enterpriseApi.Queue{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, Spec: spec, } diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml index f34dd2e6c..2b0596fdd 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml @@ -1,9 +1,9 @@ --- -# assert for bus custom resource to be ready +# assert for queue custom resource to be ready apiVersion: enterprise.splunk.com/v4 -kind: Bus +kind: Queue metadata: - name: bus + name: queue spec: provider: sqs sqs: @@ -61,11 +61,11 @@ metadata: name: indexer spec: replicas: 3 - busRef: - name: bus + queueRef: + name: queue status: phase: Ready - bus: + queue: provider: sqs sqs: name: sqs-test @@ -102,11 +102,11 @@ metadata: name: ingestor spec: replicas: 3 - busRef: - name: bus + queueRef: + name: queue status: phase: Ready - bus: + queue: provider: sqs sqs: name: sqs-test diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index 291eddeba..57e6c4c68 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -6,11 +6,11 @@ metadata: name: ingestor spec: replicas: 4 - busRef: - name: bus + queueRef: + name: queue status: phase: Ready - bus: + queue: provider: sqs sqs: name: sqs-test diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index a73c51ac2..1e8af1663 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -5,9 +5,9 @@ splunk-operator: persistentVolumeClaim: storageClassName: gp2 -bus: +queue: enabled: true - name: bus + name: queue provider: sqs sqs: name: sqs-test @@ -27,8 +27,8 @@ ingestorCluster: enabled: true name: ingestor replicaCount: 3 - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms @@ -43,7 +43,7 @@ indexerCluster: replicaCount: 3 clusterManagerRef: name: cm - busRef: - name: bus + queueRef: + name: queue largeMessageStoreRef: name: lms diff --git a/pkg/splunk/enterprise/clustermanager.go b/pkg/splunk/enterprise/clustermanager.go index 269753c5c..150dfdbbe 100644 --- a/pkg/splunk/enterprise/clustermanager.go +++ b/pkg/splunk/enterprise/clustermanager.go @@ -22,7 +22,6 @@ import ( "time" enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "sigs.k8s.io/controller-runtime/pkg/client" rclient "sigs.k8s.io/controller-runtime/pkg/client" "github.com/go-logr/logr" @@ -427,9 +426,9 @@ func PushManagerAppsBundle(ctx context.Context, c splcommon.ControllerClient, cr return splunkClient.BundlePush(true) } - + // helper function to get the list of ClusterManager types in the current namespace -func getClusterManagerList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (int, error) { +func getClusterManagerList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []rclient.ListOption) (int, error) { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("getClusterManagerList").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 2170e914a..5e468196c 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -37,7 +37,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -78,7 +77,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // updates status after function completes cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Bus = &enterpriseApi.BusSpec{} + cr.Status.Queue = &enterpriseApi.QueueSpec{} } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -245,27 +244,27 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // no need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus - bus := enterpriseApi.Bus{} - if cr.Spec.BusRef.Name != "" { + // Queue + queue := enterpriseApi.Queue{} + if cr.Spec.QueueRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusRef.Namespace != "" { - ns = cr.Spec.BusRef.Namespace + if cr.Spec.QueueRef.Namespace != "" { + ns = cr.Spec.QueueRef.Namespace } err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.BusRef.Name, + Name: cr.Spec.QueueRef.Name, Namespace: ns, - }, &bus) + }, &queue) if err != nil { return result, err } } - // Can not override original bus spec due to comparison in the later code - busCopy := bus - if busCopy.Spec.Provider == "sqs" { - if busCopy.Spec.SQS.Endpoint == "" { - busCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", busCopy.Spec.SQS.Region) + // Can not override original queue spec due to comparison in the later code + queueCopy := queue + if queueCopy.Spec.Provider == "sqs" { + if queueCopy.Spec.SQS.Endpoint == "" { + queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.Region) } } @@ -289,23 +288,23 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller lmsCopy := lms if lmsCopy.Spec.Provider == "s3" { if lmsCopy.Spec.S3.Endpoint == "" { - lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", busCopy.Spec.SQS.Region) + lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.Region) } } - // If bus is updated - if cr.Spec.BusRef.Name != "" { - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { + // If queue is updated + if cr.Spec.QueueRef.Name != "" { + if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, busCopy, lmsCopy, client) + err = mgr.handlePullQueueChange(ctx, cr, queueCopy, lmsCopy, client) if err != nil { - eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) - scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) + scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") return result, err } - cr.Status.Bus = &bus.Spec + cr.Status.Queue = &queue.Spec } } @@ -398,7 +397,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr.Status.Phase = enterpriseApi.PhaseError cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Bus = &enterpriseApi.BusSpec{} + cr.Status.Queue = &enterpriseApi.QueueSpec{} } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -568,27 +567,27 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // no need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus - bus := enterpriseApi.Bus{} - if cr.Spec.BusRef.Name != "" { + // Queue + queue := enterpriseApi.Queue{} + if cr.Spec.QueueRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusRef.Namespace != "" { - ns = cr.Spec.BusRef.Namespace + if cr.Spec.QueueRef.Namespace != "" { + ns = cr.Spec.QueueRef.Namespace } err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.BusRef.Name, + Name: cr.Spec.QueueRef.Name, Namespace: ns, - }, &bus) + }, &queue) if err != nil { return result, err } } - // Can not override original bus spec due to comparison in the later code - busCopy := bus - if busCopy.Spec.Provider == "sqs" { - if busCopy.Spec.SQS.Endpoint == "" { - busCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", busCopy.Spec.SQS.Region) + // Can not override original queue spec due to comparison in the later code + queueCopy := queue + if queueCopy.Spec.Provider == "sqs" { + if queueCopy.Spec.SQS.Endpoint == "" { + queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.Region) } } @@ -602,33 +601,33 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, err = client.Get(context.Background(), types.NamespacedName{ Name: cr.Spec.LargeMessageStoreRef.Name, Namespace: ns, - }, &bus) + }, &queue) if err != nil { return result, err } } - // Can not override original bus spec due to comparison in the later code + // Can not override original queue spec due to comparison in the later code lmsCopy := lms if lmsCopy.Spec.Provider == "s3" { if lmsCopy.Spec.S3.Endpoint == "" { - lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", busCopy.Spec.SQS.Region) + lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.Region) } } - // If bus is updated - if cr.Spec.BusRef.Name != "" { - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { + // If queue is updated + if cr.Spec.QueueRef.Name != "" { + if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullBusChange(ctx, cr, busCopy, lmsCopy, client) + err = mgr.handlePullQueueChange(ctx, cr, queueCopy, lmsCopy, client) if err != nil { - eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) - scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) + scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") return result, err } - cr.Status.Bus = &bus.Spec + cr.Status.Queue = &queue.Spec } } @@ -1218,7 +1217,7 @@ func validateIndexerClusterSpec(ctx context.Context, c splcommon.ControllerClien } // helper function to get the list of IndexerCluster types in the current namespace -func getIndexerClusterList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (enterpriseApi.IndexerClusterList, error) { +func getIndexerClusterList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []rclient.ListOption) (enterpriseApi.IndexerClusterList, error) { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("getIndexerClusterList").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) @@ -1295,12 +1294,12 @@ func getSiteName(ctx context.Context, c splcommon.ControllerClient, cr *enterpri return extractedValue } -var newSplunkClientForBusPipeline = splclient.NewSplunkClient +var newSplunkClientForQueuePipeline = splclient.NewSplunkClient -// Checks if only PullBus or Pipeline config changed, and updates the conf file if so -func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, bus enterpriseApi.Bus, lms enterpriseApi.LargeMessageStore, k8s client.Client) error { +// Checks if only PullQueue or Pipeline config changed, and updates the conf file if so +func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue enterpriseApi.Queue, lms enterpriseApi.LargeMessageStore, k8s rclient.Client) error { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("handlePullBusChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) + scopedLog := reqLogger.WithName("handlePullQueueChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) // Only update config for pods that exist readyReplicas := newCR.Status.ReadyReplicas @@ -1314,30 +1313,30 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne if err != nil { return err } - splunkClient := newSplunkClientForBusPipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) + splunkClient := newSplunkClientForQueuePipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) afterDelete := false - if (bus.Spec.SQS.Name != "" && newCR.Status.Bus.SQS.Name != "" && bus.Spec.SQS.Name != newCR.Status.Bus.SQS.Name) || - (bus.Spec.Provider != "" && newCR.Status.Bus.Provider != "" && bus.Spec.Provider != newCR.Status.Bus.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.SQS.Name)); err != nil { + if (queue.Spec.SQS.Name != "" && newCR.Status.Queue.SQS.Name != "" && queue.Spec.SQS.Name != newCR.Status.Queue.SQS.Name) || + (queue.Spec.Provider != "" && newCR.Status.Queue.Provider != "" && queue.Spec.Provider != newCR.Status.Queue.Provider) { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { updateErr = err } - if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.SQS.Name)); err != nil { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { updateErr = err } afterDelete = true } - busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, afterDelete) + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &lms, newCR, afterDelete) - for _, pbVal := range busChangedFieldsOutputs { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name), [][]string{pbVal}); err != nil { + for _, pbVal := range queueChangedFieldsOutputs { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } - for _, pbVal := range busChangedFieldsInputs { - if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name), [][]string{pbVal}); err != nil { + for _, pbVal := range queueChangedFieldsInputs { + if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } @@ -1353,23 +1352,23 @@ func (mgr *indexerClusterPodManager) handlePullBusChange(ctx context.Context, ne return updateErr } -// getChangedBusFieldsForIndexer returns a list of changed bus and pipeline fields for indexer pods -func getChangedBusFieldsForIndexer(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields [][]string) { - // Compare bus fields - oldPB := busIndexerStatus.Status.Bus +// getChangedQueueFieldsForIndexer returns a list of changed queue and pipeline fields for indexer pods +func getChangedQueueFieldsForIndexer(queue *enterpriseApi.Queue, lms *enterpriseApi.LargeMessageStore, queueIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields [][]string) { + // Compare queue fields + oldPB := queueIndexerStatus.Status.Queue if oldPB == nil { - oldPB = &enterpriseApi.BusSpec{} + oldPB = &enterpriseApi.QueueSpec{} } - newPB := bus.Spec + newPB := queue.Spec - oldLMS := busIndexerStatus.Status.LargeMessageStore + oldLMS := queueIndexerStatus.Status.LargeMessageStore if oldLMS == nil { oldLMS = &enterpriseApi.LargeMessageStoreSpec{} } newLMS := lms.Spec - // Push all bus fields - busChangedFieldsInputs, busChangedFieldsOutputs = pullBusChanged(oldPB, &newPB, oldLMS, &newLMS, afterDelete) + // Push all queue fields + queueChangedFieldsInputs, queueChangedFieldsOutputs = pullQueueChanged(oldPB, &newPB, oldLMS, &newLMS, afterDelete) // Always set all pipeline fields, not just changed ones pipelineChangedFields = pipelineConfig(true) @@ -1387,24 +1386,24 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { return strings.HasPrefix(previousVersion, "8") && strings.HasPrefix(currentVersion, "9") } -func pullBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (inputs, outputs [][]string) { - busProvider := "" - if newBus.Provider == "sqs" { - busProvider = "sqs_smartbus" +func pullQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (inputs, outputs [][]string) { + queueProvider := "" + if newQueue.Provider == "sqs" { + queueProvider = "sqs_smartbus" } lmsProvider := "" if newLMS.Provider == "s3" { lmsProvider = "sqs_smartbus" } - if oldBus.Provider != newBus.Provider || afterDelete { - inputs = append(inputs, []string{"remote_queue.type", busProvider}) + if oldQueue.Provider != newQueue.Provider || afterDelete { + inputs = append(inputs, []string{"remote_queue.type", queueProvider}) } - if oldBus.SQS.Region != newBus.SQS.Region || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.SQS.Region}) + if oldQueue.SQS.Region != newQueue.SQS.Region || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.Region}) } - if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", busProvider), newBus.SQS.Endpoint}) + if oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) } if oldLMS.S3.Endpoint != newLMS.S3.Endpoint || afterDelete { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", lmsProvider), newLMS.S3.Endpoint}) @@ -1412,18 +1411,18 @@ func pullBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enter if oldLMS.S3.Path != newLMS.S3.Path || afterDelete { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", lmsProvider), newLMS.S3.Path}) } - if oldBus.SQS.DLQ != newBus.SQS.DLQ || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busProvider), newBus.SQS.DLQ}) + if oldQueue.SQS.DLQ != newQueue.SQS.DLQ || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), newQueue.SQS.DLQ}) } inputs = append(inputs, - []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busProvider), "4"}, - []string{fmt.Sprintf("remote_queue.%s.retry_policy", busProvider), "max_count"}, + []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"}, + []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"}, ) outputs = inputs outputs = append(outputs, - []string{fmt.Sprintf("remote_queue.%s.send_interval", busProvider), "5s"}, - []string{fmt.Sprintf("remote_queue.%s.encoding_format", busProvider), "s2s"}, + []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}, + []string{fmt.Sprintf("remote_queue.%s.encoding_format", queueProvider), "s2s"}, ) return inputs, outputs diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index ff10e453d..4c166c8e0 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -1344,15 +1344,15 @@ func TestInvalidIndexerClusterSpec(t *testing.T) { func TestGetIndexerStatefulSet(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") - bus := enterpriseApi.Bus{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -1369,8 +1369,8 @@ func TestGetIndexerStatefulSet(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IndexerClusterSpec{ - BusRef: corev1.ObjectReference{ - Name: bus.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, }, }, } @@ -2045,18 +2045,18 @@ func TestImageUpdatedTo9(t *testing.T) { } } -func TestGetChangedBusFieldsForIndexer(t *testing.T) { +func TestGetChangedQueueFieldsForIndexer(t *testing.T) { provider := "sqs_smartbus" - bus := enterpriseApi.Bus{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -2086,8 +2086,8 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { newCR := &enterpriseApi.IndexerCluster{ Spec: enterpriseApi.IndexerClusterSpec{ - BusRef: corev1.ObjectReference{ - Name: bus.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, }, LargeMessageStoreRef: corev1.ObjectReference{ Name: lms.Name, @@ -2095,32 +2095,32 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { }, } - busChangedFieldsInputs, busChangedFieldsOutputs, pipelineChangedFields := getChangedBusFieldsForIndexer(&bus, &lms, newCR, false) - assert.Equal(t, 8, len(busChangedFieldsInputs)) + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &lms, newCR, false) + assert.Equal(t, 8, len(queueChangedFieldsInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, - {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, - }, busChangedFieldsInputs) + }, queueChangedFieldsInputs) - assert.Equal(t, 10, len(busChangedFieldsOutputs)) + assert.Equal(t, 10, len(queueChangedFieldsOutputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, - {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, - }, busChangedFieldsOutputs) + }, queueChangedFieldsOutputs) assert.Equal(t, 5, len(pipelineChangedFields)) assert.Equal(t, [][]string{ @@ -2132,20 +2132,20 @@ func TestGetChangedBusFieldsForIndexer(t *testing.T) { }, pipelineChangedFields) } -func TestHandlePullBusChange(t *testing.T) { +func TestHandlePullQueueChange(t *testing.T) { // Object definitions provider := "sqs_smartbus" - bus := enterpriseApi.Bus{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", Namespace: "test", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -2183,8 +2183,8 @@ func TestHandlePullBusChange(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IndexerClusterSpec{ - BusRef: corev1.ObjectReference{ - Name: bus.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, }, LargeMessageStoreRef: corev1.ObjectReference{ Name: lms.Name, @@ -2193,7 +2193,7 @@ func TestHandlePullBusChange(t *testing.T) { }, Status: enterpriseApi.IndexerClusterStatus{ ReadyReplicas: 3, - Bus: &enterpriseApi.BusSpec{}, + Queue: &enterpriseApi.QueueSpec{}, LargeMessageStore: &enterpriseApi.LargeMessageStoreSpec{}, }, } @@ -2251,7 +2251,7 @@ func TestHandlePullBusChange(t *testing.T) { // Mock pods c := spltest.NewMockClient() ctx := context.TODO() - c.Create(ctx, &bus) + c.Create(ctx, &queue) c.Create(ctx, &lms) c.Create(ctx, newCR) c.Create(ctx, pod0) @@ -2260,7 +2260,7 @@ func TestHandlePullBusChange(t *testing.T) { // Negative test case: secret not found mgr := &indexerClusterPodManager{} - err := mgr.handlePullBusChange(ctx, newCR, bus, lms, c) + err := mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) assert.NotNil(t, err) // Mock secret @@ -2269,18 +2269,18 @@ func TestHandlePullBusChange(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} // Negative test case: failure in creating remote queue stanza - mgr = newTestPullBusPipelineManager(mockHTTPClient) + mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, bus, lms, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) assert.NotNil(t, err) // outputs.conf propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, - {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, } @@ -2290,22 +2290,22 @@ func TestHandlePullBusChange(t *testing.T) { propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}) body := buildFormBody(propertyKVListOutputs) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, bus, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, queue, newCR.Status.ReadyReplicas, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza - mgr = newTestPullBusPipelineManager(mockHTTPClient) + mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, bus, lms, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) assert.NotNil(t, err) // inputs.conf body = buildFormBody(propertyKVList) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, bus, newCR.Status.ReadyReplicas, "conf-inputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, queue, newCR.Status.ReadyReplicas, "conf-inputs", body) // Negative test case: failure in updating remote queue stanza - mgr = newTestPullBusPipelineManager(mockHTTPClient) + mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, bus, lms, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) assert.NotNil(t, err) // default-mode.conf @@ -2331,9 +2331,9 @@ func TestHandlePullBusChange(t *testing.T) { } } - mgr = newTestPullBusPipelineManager(mockHTTPClient) + mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullBusChange(ctx, newCR, bus, lms, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) assert.Nil(t, err) } @@ -2351,7 +2351,7 @@ func buildFormBody(pairs [][]string) string { return b.String() } -func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, bus enterpriseApi.Bus, replicas int32, confName, body string) { +func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, queue enterpriseApi.Queue, replicas int32, confName, body string) { for i := 0; i < int(replicas); i++ { podName := fmt.Sprintf("splunk-%s-indexer-%d", cr.GetName(), i) baseURL := fmt.Sprintf( @@ -2359,18 +2359,18 @@ func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } } -func newTestPullBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *indexerClusterPodManager { - newSplunkClientForBusPipeline = func(uri, user, pass string) *splclient.SplunkClient { +func newTestPullQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *indexerClusterPodManager { + newSplunkClientForQueuePipeline = func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ ManagementURI: uri, Username: user, @@ -2379,11 +2379,11 @@ func newTestPullBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *inde } } return &indexerClusterPodManager{ - newSplunkClient: newSplunkClientForBusPipeline, + newSplunkClient: newSplunkClientForQueuePipeline, } } -func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { +func TestApplyIndexerClusterManager_Queue_Success(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") ctx := context.TODO() @@ -2395,16 +2395,16 @@ func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions - bus := enterpriseApi.Bus{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", Namespace: "test", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -2414,7 +2414,7 @@ func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { }, }, } - c.Create(ctx, &bus) + c.Create(ctx, &queue) cm := &enterpriseApi.ClusterManager{ TypeMeta: metav1.TypeMeta{Kind: "ClusterManager"}, @@ -2436,9 +2436,9 @@ func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { }, Spec: enterpriseApi.IndexerClusterSpec{ Replicas: 1, - BusRef: corev1.ObjectReference{ - Name: bus.Name, - Namespace: bus.Namespace, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + Namespace: queue.Namespace, }, CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ ClusterManagerRef: corev1.ObjectReference{ @@ -2552,14 +2552,14 @@ func TestApplyIndexerClusterManager_Bus_Success(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} base := "https://splunk-test-indexer-0.splunk-test-indexer-headless.test.svc.cluster.local:8089/servicesNS/nobody/system/configs" - queue := "remote_queue:test-queue" + q := "remote_queue:test-queue" - mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs", base), "name="+queue), 200, "", nil) - mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs/%s", base, queue), ""), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs", base), "name="+q), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-outputs/%s", base, q), ""), 200, "", nil) // inputs.conf - mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs", base), "name="+queue), 200, "", nil) - mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs/%s", base, queue), ""), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs", base), "name="+q), 200, "", nil) + mockHTTPClient.AddHandler(mustReq("POST", fmt.Sprintf("%s/conf-inputs/%s", base, q), ""), 200, "", nil) // default-mode.conf pipelineFields := []string{ diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 524f183b5..299aa8d0c 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -73,7 +73,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr defer updateCRStatus(ctx, client, cr, &err) if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Bus = &enterpriseApi.BusSpec{} + cr.Status.Queue = &enterpriseApi.QueueSpec{} } cr.Status.Replicas = cr.Spec.Replicas @@ -210,27 +210,27 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // No need to requeue if everything is ready if cr.Status.Phase == enterpriseApi.PhaseReady { - // Bus - bus := enterpriseApi.Bus{} - if cr.Spec.BusRef.Name != "" { + // Queue + queue := enterpriseApi.Queue{} + if cr.Spec.QueueRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.BusRef.Namespace != "" { - ns = cr.Spec.BusRef.Namespace + if cr.Spec.QueueRef.Namespace != "" { + ns = cr.Spec.QueueRef.Namespace } err = client.Get(ctx, types.NamespacedName{ - Name: cr.Spec.BusRef.Name, + Name: cr.Spec.QueueRef.Name, Namespace: ns, - }, &bus) + }, &queue) if err != nil { return result, err } } - // Can not override original bus spec due to comparison in the later code - busCopy := bus - if busCopy.Spec.Provider == "sqs" { - if busCopy.Spec.SQS.Endpoint == "" { - busCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", busCopy.Spec.SQS.Region) + // Can not override original queue spec due to comparison in the later code + queueCopy := queue + if queueCopy.Spec.Provider == "sqs" { + if queueCopy.Spec.SQS.Endpoint == "" { + queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.Region) } } @@ -250,26 +250,26 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } } - // Can not override original bus spec due to comparison in the later code + // Can not override original queue spec due to comparison in the later code lmsCopy := lms if lmsCopy.Spec.Provider == "s3" { if lmsCopy.Spec.S3.Endpoint == "" { - lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", bus.Spec.SQS.Region) + lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.Region) } } - // If bus is updated - if !reflect.DeepEqual(cr.Status.Bus, bus.Spec) { + // If queue is updated + if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePushBusChange(ctx, cr, busCopy, lmsCopy, client) + err = mgr.handlePushQueueChange(ctx, cr, queueCopy, lmsCopy, client) if err != nil { - eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Bus/Pipeline config change after pod creation: %s", err.Error())) - scopedLog.Error(err, "Failed to update conf file for Bus/Pipeline config change after pod creation") + eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) + scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") return result, err } - cr.Status.Bus = &bus.Spec + cr.Status.Queue = &queue.Spec } // Upgrade fron automated MC to MC CRD @@ -342,10 +342,10 @@ func getIngestorStatefulSet(ctx context.Context, client splcommon.ControllerClie return ss, nil } -// Checks if only Bus or Pipeline config changed, and updates the conf file if so -func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, bus enterpriseApi.Bus, lms enterpriseApi.LargeMessageStore, k8s client.Client) error { +// Checks if only Queue or Pipeline config changed, and updates the conf file if so +func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue enterpriseApi.Queue, lms enterpriseApi.LargeMessageStore, k8s client.Client) error { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("handlePushBusChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) + scopedLog := reqLogger.WithName("handlePushQueueChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) // Only update config for pods that exist readyReplicas := newCR.Status.Replicas @@ -362,18 +362,18 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) afterDelete := false - if (bus.Spec.SQS.Name != "" && newCR.Status.Bus.SQS.Name != "" && bus.Spec.SQS.Name != newCR.Status.Bus.SQS.Name) || - (bus.Spec.Provider != "" && newCR.Status.Bus.Provider != "" && bus.Spec.Provider != newCR.Status.Bus.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Bus.SQS.Name)); err != nil { + if (queue.Spec.SQS.Name != "" && newCR.Status.Queue.SQS.Name != "" && queue.Spec.SQS.Name != newCR.Status.Queue.SQS.Name) || + (queue.Spec.Provider != "" && newCR.Status.Queue.Provider != "" && queue.Spec.Provider != newCR.Status.Queue.Provider) { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { updateErr = err } afterDelete = true } - busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, afterDelete) + queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &lms, newCR, afterDelete) - for _, pbVal := range busChangedFields { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name), [][]string{pbVal}); err != nil { + for _, pbVal := range queueChangedFields { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } @@ -389,22 +389,22 @@ func (mgr *ingestorClusterPodManager) handlePushBusChange(ctx context.Context, n return updateErr } -// getChangedBusFieldsForIngestor returns a list of changed bus and pipeline fields for ingestor pods -func getChangedBusFieldsForIngestor(bus *enterpriseApi.Bus, lms *enterpriseApi.LargeMessageStore, busIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (busChangedFields, pipelineChangedFields [][]string) { - oldPB := busIngestorStatus.Status.Bus +// getChangedQueueFieldsForIngestor returns a list of changed queue and pipeline fields for ingestor pods +func getChangedQueueFieldsForIngestor(queue *enterpriseApi.Queue, lms *enterpriseApi.LargeMessageStore, queueIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (queueChangedFields, pipelineChangedFields [][]string) { + oldPB := queueIngestorStatus.Status.Queue if oldPB == nil { - oldPB = &enterpriseApi.BusSpec{} + oldPB = &enterpriseApi.QueueSpec{} } - newPB := &bus.Spec + newPB := &queue.Spec - oldLMS := busIngestorStatus.Status.LargeMessageStore + oldLMS := queueIngestorStatus.Status.LargeMessageStore if oldLMS == nil { oldLMS = &enterpriseApi.LargeMessageStoreSpec{} } newLMS := &lms.Spec - // Push changed bus fields - busChangedFields = pushBusChanged(oldPB, newPB, oldLMS, newLMS, afterDelete) + // Push changed queue fields + queueChangedFields = pushQueueChanged(oldPB, newPB, oldLMS, newLMS, afterDelete) // Always changed pipeline fields pipelineChangedFields = pipelineConfig(false) @@ -443,24 +443,24 @@ func pipelineConfig(isIndexer bool) (output [][]string) { return output } -func pushBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (output [][]string) { - busProvider := "" - if newBus.Provider == "sqs" { - busProvider = "sqs_smartbus" +func pushQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (output [][]string) { + queueProvider := "" + if newQueue.Provider == "sqs" { + queueProvider = "sqs_smartbus" } lmsProvider := "" if newLMS.Provider == "s3" { lmsProvider = "sqs_smartbus" } - if oldBus.Provider != newBus.Provider || afterDelete { - output = append(output, []string{"remote_queue.type", busProvider}) + if oldQueue.Provider != newQueue.Provider || afterDelete { + output = append(output, []string{"remote_queue.type", queueProvider}) } - if oldBus.SQS.Region != newBus.SQS.Region || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", busProvider), newBus.SQS.Region}) + if oldQueue.SQS.Region != newQueue.SQS.Region || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.Region}) } - if oldBus.SQS.Endpoint != newBus.SQS.Endpoint || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", busProvider), newBus.SQS.Endpoint}) + if oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) } if oldLMS.S3.Endpoint != newLMS.S3.Endpoint || afterDelete { output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", lmsProvider), newLMS.S3.Endpoint}) @@ -468,15 +468,15 @@ func pushBusChanged(oldBus, newBus *enterpriseApi.BusSpec, oldLMS, newLMS *enter if oldLMS.S3.Path != newLMS.S3.Path || afterDelete { output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", lmsProvider), newLMS.S3.Path}) } - if oldBus.SQS.DLQ != newBus.SQS.DLQ || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", busProvider), newBus.SQS.DLQ}) + if oldQueue.SQS.DLQ != newQueue.SQS.DLQ || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), newQueue.SQS.DLQ}) } output = append(output, - []string{fmt.Sprintf("remote_queue.%s.encoding_format", busProvider), "s2s"}, - []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", busProvider), "4"}, - []string{fmt.Sprintf("remote_queue.%s.retry_policy", busProvider), "max_count"}, - []string{fmt.Sprintf("remote_queue.%s.send_interval", busProvider), "5s"}) + []string{fmt.Sprintf("remote_queue.%s.encoding_format", queueProvider), "s2s"}, + []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"}, + []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"}, + []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}) return output } diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index 75cc14ec5..424806846 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -65,16 +65,16 @@ func TestApplyIngestorCluster(t *testing.T) { // Object definitions provider := "sqs_smartbus" - bus := &enterpriseApi.Bus{ + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", Namespace: "test", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -84,7 +84,7 @@ func TestApplyIngestorCluster(t *testing.T) { }, }, } - c.Create(ctx, bus) + c.Create(ctx, queue) lms := enterpriseApi.LargeMessageStore{ TypeMeta: metav1.TypeMeta{ @@ -119,9 +119,9 @@ func TestApplyIngestorCluster(t *testing.T) { CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ Mock: true, }, - BusRef: corev1.ObjectReference{ - Name: bus.Name, - Namespace: bus.Namespace, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, + Namespace: queue.Namespace, }, LargeMessageStoreRef: corev1.ObjectReference{ Name: lms.Name, @@ -285,18 +285,18 @@ func TestApplyIngestorCluster(t *testing.T) { propertyKVList := [][]string{ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, - {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, bus, cr.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, queue, cr.Status.ReadyReplicas, "conf-outputs", body) // default-mode.conf propertyKVList = [][]string{ @@ -333,15 +333,15 @@ func TestGetIngestorStatefulSet(t *testing.T) { // Object definitions os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") - bus := enterpriseApi.Bus{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -362,8 +362,8 @@ func TestGetIngestorStatefulSet(t *testing.T) { }, Spec: enterpriseApi.IngestorClusterSpec{ Replicas: 2, - BusRef: corev1.ObjectReference{ - Name: bus.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, }, }, } @@ -416,18 +416,18 @@ func TestGetIngestorStatefulSet(t *testing.T) { test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-test-ingestor","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"ownerReferences":[{"apiVersion":"","kind":"IngestorCluster","name":"test","uid":"","controller":true}]},"spec":{"replicas":3,"selector":{"matchLabels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-test-ingestor-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"},{"name":"user-defined","containerPort":32000,"protocol":"UDP"}],"env":[{"name":"TEST_ENV_VAR","value":"test_value"},{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"serviceAccountName":"defaults","securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-test-ingestor"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-test-ingestor-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) } -func TestGetChangedBusFieldsForIngestor(t *testing.T) { +func TestGetChangedQueueFieldsForIngestor(t *testing.T) { provider := "sqs_smartbus" - bus := enterpriseApi.Bus{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -457,8 +457,8 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { newCR := &enterpriseApi.IngestorCluster{ Spec: enterpriseApi.IngestorClusterSpec{ - BusRef: corev1.ObjectReference{ - Name: bus.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, }, LargeMessageStoreRef: corev1.ObjectReference{ Name: lms.Name, @@ -467,21 +467,21 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { Status: enterpriseApi.IngestorClusterStatus{}, } - busChangedFields, pipelineChangedFields := getChangedBusFieldsForIngestor(&bus, &lms, newCR, false) + queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &lms, newCR, false) - assert.Equal(t, 10, len(busChangedFields)) + assert.Equal(t, 10, len(queueChangedFields)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, - {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, - }, busChangedFields) + }, queueChangedFields) assert.Equal(t, 6, len(pipelineChangedFields)) assert.Equal(t, [][]string{ @@ -494,19 +494,19 @@ func TestGetChangedBusFieldsForIngestor(t *testing.T) { }, pipelineChangedFields) } -func TestHandlePushBusChange(t *testing.T) { +func TestHandlePushQueueChange(t *testing.T) { // Object definitions provider := "sqs_smartbus" - bus := enterpriseApi.Bus{ + queue := enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -543,8 +543,8 @@ func TestHandlePushBusChange(t *testing.T) { Namespace: "test", }, Spec: enterpriseApi.IngestorClusterSpec{ - BusRef: corev1.ObjectReference{ - Name: bus.Name, + QueueRef: corev1.ObjectReference{ + Name: queue.Name, }, LargeMessageStoreRef: corev1.ObjectReference{ Name: lms.Name, @@ -553,7 +553,7 @@ func TestHandlePushBusChange(t *testing.T) { Status: enterpriseApi.IngestorClusterStatus{ Replicas: 3, ReadyReplicas: 3, - Bus: &enterpriseApi.BusSpec{}, + Queue: &enterpriseApi.QueueSpec{}, LargeMessageStore: &enterpriseApi.LargeMessageStoreSpec{}, }, } @@ -618,7 +618,7 @@ func TestHandlePushBusChange(t *testing.T) { // Negative test case: secret not found mgr := &ingestorClusterPodManager{} - err := mgr.handlePushBusChange(ctx, newCR, bus, lms, c) + err := mgr.handlePushQueueChange(ctx, newCR, queue, lms, c) assert.NotNil(t, err) // Mock secret @@ -627,31 +627,31 @@ func TestHandlePushBusChange(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} // Negative test case: failure in creating remote queue stanza - mgr = newTestPushBusPipelineManager(mockHTTPClient) + mgr = newTestPushQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, bus, lms, c) + err = mgr.handlePushQueueChange(ctx, newCR, queue, lms, c) assert.NotNil(t, err) // outputs.conf propertyKVList := [][]string{ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), bus.Spec.SQS.Region}, - {fmt.Sprintf("remote_queue.%s.endpoint", provider), bus.Spec.SQS.Endpoint}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, - {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), bus.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.max_count.%s.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, newCR, &bus, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, newCR, &queue, newCR.Status.ReadyReplicas, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza - mgr = newTestPushBusPipelineManager(mockHTTPClient) + mgr = newTestPushQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, bus, lms, c) + err = mgr.handlePushQueueChange(ctx, newCR, queue, lms, c) assert.NotNil(t, err) // default-mode.conf @@ -678,13 +678,13 @@ func TestHandlePushBusChange(t *testing.T) { } } - mgr = newTestPushBusPipelineManager(mockHTTPClient) + mgr = newTestPushQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushBusChange(ctx, newCR, bus, lms, c) + err = mgr.handlePushQueueChange(ctx, newCR, queue, lms, c) assert.Nil(t, err) } -func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, bus *enterpriseApi.Bus, replicas int32, confName, body string) { +func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, queue *enterpriseApi.Queue, replicas int32, confName, body string) { for i := 0; i < int(replicas); i++ { podName := fmt.Sprintf("splunk-%s-ingestor-%d", cr.GetName(), i) baseURL := fmt.Sprintf( @@ -692,18 +692,18 @@ func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, c podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", bus.Spec.SQS.Name)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } } -func newTestPushBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *ingestorClusterPodManager { - newSplunkClientForPushBusPipeline := func(uri, user, pass string) *splclient.SplunkClient { +func newTestPushQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *ingestorClusterPodManager { + newSplunkClientForPushQueuePipeline := func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ ManagementURI: uri, Username: user, @@ -712,6 +712,6 @@ func newTestPushBusPipelineManager(mockHTTPClient *spltest.MockHTTPClient) *inge } } return &ingestorClusterPodManager{ - newSplunkClient: newSplunkClientForPushBusPipeline, + newSplunkClient: newSplunkClientForPushQueuePipeline, } } diff --git a/pkg/splunk/enterprise/monitoringconsole.go b/pkg/splunk/enterprise/monitoringconsole.go index 64de4a2de..77c58c328 100644 --- a/pkg/splunk/enterprise/monitoringconsole.go +++ b/pkg/splunk/enterprise/monitoringconsole.go @@ -33,7 +33,6 @@ import ( k8serrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" rclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -207,7 +206,7 @@ func getMonitoringConsoleStatefulSet(ctx context.Context, client splcommon.Contr } // helper function to get the list of MonitoringConsole types in the current namespace -func getMonitoringConsoleList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []client.ListOption) (enterpriseApi.MonitoringConsoleList, error) { +func getMonitoringConsoleList(ctx context.Context, c splcommon.ControllerClient, cr splcommon.MetaObject, listOpts []rclient.ListOption) (enterpriseApi.MonitoringConsoleList, error) { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("getMonitoringConsoleList").WithValues("name", cr.GetName(), "namespace", cr.GetNamespace()) diff --git a/pkg/splunk/enterprise/bus.go b/pkg/splunk/enterprise/queue.go similarity index 91% rename from pkg/splunk/enterprise/bus.go rename to pkg/splunk/enterprise/queue.go index b6e8318ed..1f36f6bad 100644 --- a/pkg/splunk/enterprise/bus.go +++ b/pkg/splunk/enterprise/queue.go @@ -27,8 +27,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -// ApplyBus reconciles the state of an IngestorCluster custom resource -func ApplyBus(ctx context.Context, client client.Client, cr *enterpriseApi.Bus) (reconcile.Result, error) { +// ApplyQueue reconciles the state of an IngestorCluster custom resource +func ApplyQueue(ctx context.Context, client client.Client, cr *enterpriseApi.Queue) (reconcile.Result, error) { var err error // Unless modified, reconcile for this object will be requeued after 5 seconds @@ -44,7 +44,7 @@ func ApplyBus(ctx context.Context, client client.Client, cr *enterpriseApi.Bus) eventPublisher, _ := newK8EventPublisher(client, cr) ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) - cr.Kind = "Bus" + cr.Kind = "Queue" // Initialize phase cr.Status.Phase = enterpriseApi.PhaseError diff --git a/pkg/splunk/enterprise/bus_test.go b/pkg/splunk/enterprise/queue_test.go similarity index 81% rename from pkg/splunk/enterprise/bus_test.go rename to pkg/splunk/enterprise/queue_test.go index 6e5bf1aa7..45a813282 100644 --- a/pkg/splunk/enterprise/bus_test.go +++ b/pkg/splunk/enterprise/queue_test.go @@ -27,7 +27,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" ) -func TestApplyBus(t *testing.T) { +func TestApplyQueue(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") ctx := context.TODO() @@ -39,16 +39,16 @@ func TestApplyBus(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions - bus := &enterpriseApi.Bus{ + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "bus", + Name: "queue", Namespace: "test", }, - Spec: enterpriseApi.BusSpec{ + Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -58,12 +58,12 @@ func TestApplyBus(t *testing.T) { }, }, } - c.Create(ctx, bus) + c.Create(ctx, queue) - // ApplyBus - result, err := ApplyBus(ctx, c, bus) + // ApplyQueue + result, err := ApplyQueue(ctx, c, queue) assert.NoError(t, err) assert.True(t, result.Requeue) - assert.NotEqual(t, enterpriseApi.PhaseError, bus.Status.Phase) - assert.Equal(t, enterpriseApi.PhaseReady, bus.Status.Phase) + assert.NotEqual(t, enterpriseApi.PhaseError, queue.Status.Phase) + assert.Equal(t, enterpriseApi.PhaseReady, queue.Status.Phase) } diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go index 180659498..b7b691415 100644 --- a/pkg/splunk/enterprise/types.go +++ b/pkg/splunk/enterprise/types.go @@ -63,8 +63,8 @@ const ( // SplunkIngestor may be a standalone or clustered ingestion peer SplunkIngestor InstanceType = "ingestor" - // SplunkBus is the bus instance - SplunkBus InstanceType = "bus" + // SplunkQueue is the queue instance + SplunkQueue InstanceType = "queue" // SplunkLargeMessageStore is the large message store instance SplunkLargeMessageStore InstanceType = "large-message-store" @@ -297,8 +297,8 @@ func KindToInstanceString(kind string) string { return SplunkIndexer.ToString() case "IngestorCluster": return SplunkIngestor.ToString() - case "Bus": - return SplunkBus.ToString() + case "Queue": + return SplunkQueue.ToString() case "LargeMessageStore": return SplunkLargeMessageStore.ToString() case "LicenseManager": diff --git a/pkg/splunk/enterprise/upgrade.go b/pkg/splunk/enterprise/upgrade.go index 5d50e8cec..71fc017da 100644 --- a/pkg/splunk/enterprise/upgrade.go +++ b/pkg/splunk/enterprise/upgrade.go @@ -10,7 +10,6 @@ import ( appsv1 "k8s.io/api/apps/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" - rclient "sigs.k8s.io/controller-runtime/pkg/client" runtime "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -161,8 +160,8 @@ IndexerCluster: } // check if cluster is multisite if clusterInfo.MultiSite == "true" { - opts := []rclient.ListOption{ - rclient.InNamespace(cr.GetNamespace()), + opts := []runtime.ListOption{ + runtime.InNamespace(cr.GetNamespace()), } indexerList, err := getIndexerClusterList(ctx, c, cr, opts) if err != nil { @@ -220,8 +219,8 @@ SearchHeadCluster: // check if a search head cluster exists with the same ClusterManager instance attached searchHeadClusterInstance := enterpriseApi.SearchHeadCluster{} - opts := []rclient.ListOption{ - rclient.InNamespace(cr.GetNamespace()), + opts := []runtime.ListOption{ + runtime.InNamespace(cr.GetNamespace()), } searchHeadList, err := getSearchHeadClusterList(ctx, c, cr, opts) if err != nil { diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index e8f0736b3..01b304c12 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2291,19 +2291,19 @@ func fetchCurrentCRWithStatusUpdate(ctx context.Context, client splcommon.Contro origCR.(*enterpriseApi.IngestorCluster).Status.DeepCopyInto(&latestIngCR.Status) return latestIngCR, nil - case "Bus": - latestBusCR := &enterpriseApi.Bus{} - err = client.Get(ctx, namespacedName, latestBusCR) + case "Queue": + latestQueueCR := &enterpriseApi.Queue{} + err = client.Get(ctx, namespacedName, latestQueueCR) if err != nil { return nil, err } - origCR.(*enterpriseApi.Bus).Status.Message = "" + origCR.(*enterpriseApi.Queue).Status.Message = "" if (crError != nil) && ((*crError) != nil) { - origCR.(*enterpriseApi.Bus).Status.Message = (*crError).Error() + origCR.(*enterpriseApi.Queue).Status.Message = (*crError).Error() } - origCR.(*enterpriseApi.Bus).Status.DeepCopyInto(&latestBusCR.Status) - return latestBusCR, nil + origCR.(*enterpriseApi.Queue).Status.DeepCopyInto(&latestQueueCR.Status) + return latestQueueCR, nil case "LargeMessageStore": latestLmsCR := &enterpriseApi.LargeMessageStore{} @@ -2547,7 +2547,7 @@ func loadFixture(t *testing.T, filename string) string { if err != nil { t.Fatalf("Failed to load fixture %s: %v", filename, err) } - + // Compact the JSON to match the output from json.Marshal var compactJSON bytes.Buffer if err := json.Compact(&compactJSON, data); err != nil { diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index 711580d99..687473bc0 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -39,7 +39,7 @@ var ( testenvInstance *testenv.TestEnv testSuiteName = "indingsep-" + testenv.RandomDNSName(3) - bus = enterpriseApi.BusSpec{ + queue = enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue", @@ -85,7 +85,7 @@ var ( "AWS_STS_REGIONAL_ENDPOINTS=regional", } - updateBus = enterpriseApi.BusSpec{ + updateQueue = enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ Name: "test-queue-updated", diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 1b3d27c70..a27269889 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -79,10 +79,10 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus - testcaseEnvInst.Log.Info("Deploy Bus") - b, err := deployment.DeployBus(ctx, "bus", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus") + // Deploy Queue + testcaseEnvInst.Log.Info("Deploy Queue") + q, err := deployment.DeployQueue(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to deploy Queue") // Deploy LargeMessageStore testcaseEnvInst.Log.Info("Deploy LargeMessageStore") @@ -91,7 +91,7 @@ var _ = Describe("indingsep test", func() { // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: b.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -101,7 +101,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: b.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -130,12 +130,12 @@ var _ = Describe("indingsep test", func() { err = deployment.DeleteCR(ctx, ingest) Expect(err).To(Succeed(), "Unable to delete Ingestor Cluster instance", "Ingestor Cluster Name", ingest) - // Delete the Bus - bus := &enterpriseApi.Bus{} - err = deployment.GetInstance(ctx, "bus", bus) - Expect(err).To(Succeed(), "Unable to get Bus instance", "Bus Name", bus) - err = deployment.DeleteCR(ctx, bus) - Expect(err).To(Succeed(), "Unable to delete Bus", "Bus Name", bus) + // Delete the Queue + queue := &enterpriseApi.Queue{} + err = deployment.GetInstance(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to get Queue instance", "Queue Name", queue) + err = deployment.DeleteCR(ctx, queue) + Expect(err).To(Succeed(), "Unable to delete Queue", "Queue Name", queue) // Delete the LargeMessageStore lm = &enterpriseApi.LargeMessageStore{} @@ -152,10 +152,10 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus - testcaseEnvInst.Log.Info("Deploy Bus") - bc, err := deployment.DeployBus(ctx, "bus", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus") + // Deploy Queue + testcaseEnvInst.Log.Info("Deploy Queue") + q, err := deployment.DeployQueue(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to deploy Queue") // Deploy LargeMessageStore testcaseEnvInst.Log.Info("Deploy LargeMessageStore") @@ -205,7 +205,7 @@ var _ = Describe("indingsep test", func() { Image: testcaseEnvInst.GetSplunkImage(), }, }, - BusRef: v1.ObjectReference{Name: bc.Name}, + QueueRef: v1.ObjectReference{Name: q.Name}, LargeMessageStoreRef: v1.ObjectReference{Name: lm.Name}, Replicas: 3, AppFrameworkConfig: appFrameworkSpec, @@ -256,10 +256,10 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus - testcaseEnvInst.Log.Info("Deploy Bus") - bc, err := deployment.DeployBus(ctx, "bus", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus") + // Deploy Queue + testcaseEnvInst.Log.Info("Deploy Queue") + q, err := deployment.DeployQueue(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to deploy Queue") // Deploy LargeMessageStore testcaseEnvInst.Log.Info("Deploy LargeMessageStore") @@ -268,7 +268,7 @@ var _ = Describe("indingsep test", func() { // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -278,7 +278,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -301,7 +301,7 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.Bus).To(Equal(bus), "Ingestor bus status is not the same as provided as input") + Expect(ingest.Status.Queue).To(Equal(queue), "Ingestor queue status is not the same as provided as input") // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -311,7 +311,7 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.Bus).To(Equal(bus), "Indexer bus status is not the same as provided as input") + Expect(index.Status.Queue).To(Equal(queue), "Indexer queue status is not the same as provided as input") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") @@ -363,10 +363,10 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Create Service Account") testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // Deploy Bus - testcaseEnvInst.Log.Info("Deploy Bus") - bc, err := deployment.DeployBus(ctx, "bus", bus) - Expect(err).To(Succeed(), "Unable to deploy Bus") + // Deploy Queue + testcaseEnvInst.Log.Info("Deploy Queue") + q, err := deployment.DeployQueue(ctx, "queue", queue) + Expect(err).To(Succeed(), "Unable to deploy Queue") // Deploy LargeMessageStore testcaseEnvInst.Log.Info("Deploy LargeMessageStore") @@ -375,7 +375,7 @@ var _ = Describe("indingsep test", func() { // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: bc.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -385,7 +385,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: bc.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -400,17 +400,17 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) - // Get instance of current Bus CR with latest config - testcaseEnvInst.Log.Info("Get instance of current Bus CR with latest config") - bus := &enterpriseApi.Bus{} - err = deployment.GetInstance(ctx, bc.Name, bus) - Expect(err).To(Succeed(), "Failed to get instance of Bus") + // Get instance of current Queue CR with latest config + testcaseEnvInst.Log.Info("Get instance of current Queue CR with latest config") + queue := &enterpriseApi.Queue{} + err = deployment.GetInstance(ctx, q.Name, queue) + Expect(err).To(Succeed(), "Failed to get instance of Queue") - // Update instance of Bus CR with new bus - testcaseEnvInst.Log.Info("Update instance of Bus CR with new bus") - bus.Spec = updateBus - err = deployment.UpdateCR(ctx, bus) - Expect(err).To(Succeed(), "Unable to deploy Bus with updated CR") + // Update instance of Queue CR with new queue + testcaseEnvInst.Log.Info("Update instance of Queue CR with new queue") + queue.Spec = updateQueue + err = deployment.UpdateCR(ctx, queue) + Expect(err).To(Succeed(), "Unable to deploy Queue with updated CR") // Ensure that Ingestor Cluster has not been restarted testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster has not been restarted") @@ -428,7 +428,7 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.Bus).To(Equal(updateBus), "Ingestor bus status is not the same as provided as input") + Expect(ingest.Status.Queue).To(Equal(updateQueue), "Ingestor queue status is not the same as provided as input") // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -438,7 +438,7 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.Bus).To(Equal(updateBus), "Indexer bus status is not the same as provided as input") + Expect(index.Status.Queue).To(Equal(updateQueue), "Indexer queue status is not the same as provided as input") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") diff --git a/test/testenv/deployment.go b/test/testenv/deployment.go index 3a7ba21d2..00d8f1e95 100644 --- a/test/testenv/deployment.go +++ b/test/testenv/deployment.go @@ -431,9 +431,9 @@ func (d *Deployment) DeployClusterMasterWithSmartStoreIndexes(ctx context.Contex } // DeployIndexerCluster deploys the indexer cluster -func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, bus, lms corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { +func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, queue, lms corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { d.testenv.Log.Info("Deploying indexer cluster", "name", name, "CM", clusterManagerRef) - indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, bus, lms, serviceAccountName) + indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, queue, lms, serviceAccountName) pdata, _ := json.Marshal(indexer) d.testenv.Log.Info("indexer cluster spec", "cr", string(pdata)) deployed, err := d.deployCR(ctx, name, indexer) @@ -445,10 +445,10 @@ func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseMana } // DeployIngestorCluster deploys the ingestor cluster -func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, bus, lms corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { +func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, queue, lms corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { d.testenv.Log.Info("Deploying ingestor cluster", "name", name) - ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, bus, lms, serviceAccountName) + ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, queue, lms, serviceAccountName) pdata, _ := json.Marshal(ingestor) d.testenv.Log.Info("ingestor cluster spec", "cr", string(pdata)) @@ -460,20 +460,20 @@ func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, cou return deployed.(*enterpriseApi.IngestorCluster), err } -// DeployBus deploys the bus -func (d *Deployment) DeployBus(ctx context.Context, name string, bus enterpriseApi.BusSpec) (*enterpriseApi.Bus, error) { - d.testenv.Log.Info("Deploying bus", "name", name) +// DeployQueue deploys the queue +func (d *Deployment) DeployQueue(ctx context.Context, name string, queue enterpriseApi.QueueSpec) (*enterpriseApi.Queue, error) { + d.testenv.Log.Info("Deploying queue", "name", name) - busCfg := newBus(name, d.testenv.namespace, bus) - pdata, _ := json.Marshal(busCfg) + queueCfg := newQueue(name, d.testenv.namespace, queue) + pdata, _ := json.Marshal(queueCfg) - d.testenv.Log.Info("bus spec", "cr", string(pdata)) - deployed, err := d.deployCR(ctx, name, busCfg) + d.testenv.Log.Info("queue spec", "cr", string(pdata)) + deployed, err := d.deployCR(ctx, name, queueCfg) if err != nil { return nil, err } - return deployed.(*enterpriseApi.Bus), err + return deployed.(*enterpriseApi.Queue), err } // DeployLargeMessageStore deploys the large message store @@ -648,13 +648,13 @@ func (d *Deployment) UpdateCR(ctx context.Context, cr client.Object) error { ucr := cr.(*enterpriseApi.IngestorCluster) current.Spec = ucr.Spec cobject = current - case "Bus": - current := &enterpriseApi.Bus{} + case "Queue": + current := &enterpriseApi.Queue{} err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) if err != nil { return err } - ucr := cr.(*enterpriseApi.Bus) + ucr := cr.(*enterpriseApi.Queue) current.Spec = ucr.Spec cobject = current case "LargeMessageStore": diff --git a/test/testenv/util.go b/test/testenv/util.go index 28bd67a13..f71cc31f3 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -359,7 +359,7 @@ func newClusterMasterWithGivenIndexes(name, ns, licenseManagerName, ansibleConfi } // newIndexerCluster creates and initialize the CR for IndexerCluster Kind -func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, bus, lms corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { +func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, queue, lms corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { licenseMasterRef, licenseManagerRef := swapLicenseManager(name, licenseManagerName) clusterMasterRef, clusterManagerRef := swapClusterManager(name, clusterManagerRef) @@ -396,8 +396,8 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste }, Defaults: ansibleConfig, }, - Replicas: int32(replicas), - BusRef: bus, + Replicas: int32(replicas), + QueueRef: queue, LargeMessageStoreRef: lms, }, } @@ -406,7 +406,7 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste } // newIngestorCluster creates and initialize the CR for IngestorCluster Kind -func newIngestorCluster(name, ns string, replicas int, splunkImage string, bus, lms corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { +func newIngestorCluster(name, ns string, replicas int, splunkImage string, queue, lms corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { return &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IngestorCluster", @@ -427,23 +427,23 @@ func newIngestorCluster(name, ns string, replicas int, splunkImage string, bus, }, }, Replicas: int32(replicas), - BusRef: bus, + QueueRef: queue, LargeMessageStoreRef: lms, }, } } -// newBus creates and initializes the CR for Bus Kind -func newBus(name, ns string, bus enterpriseApi.BusSpec) *enterpriseApi.Bus { - return &enterpriseApi.Bus{ +// newQueue creates and initializes the CR for Queue Kind +func newQueue(name, ns string, queue enterpriseApi.QueueSpec) *enterpriseApi.Queue { + return &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ - Kind: "Bus", + Kind: "Queue", }, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: ns, }, - Spec: bus, + Spec: queue, } } From b6f5b0bda26fad02b530955bfb8e3dab40cb9380 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 19 Dec 2025 09:35:30 +0100 Subject: [PATCH 12/25] CSPL-4358 Rename LargeMessageStore to ObjectStorage --- PROJECT | 2 +- api/v4/indexercluster_types.go | 10 +- api/v4/ingestorcluster_types.go | 8 +- ...messagestore.go => objectstorage_types.go} | 54 +++--- api/v4/queue_types.go | 10 +- api/v4/zz_generated.deepcopy.go | 170 +++++++++--------- cmd/main.go | 4 +- ...enterprise.splunk.com_indexerclusters.yaml | 113 ++++++------ ...nterprise.splunk.com_ingestorclusters.yaml | 98 +++++----- ...enterprise.splunk.com_objectstorages.yaml} | 22 +-- config/crd/kustomization.yaml | 2 +- ...le.yaml => objectstorage_editor_role.yaml} | 6 +- ...le.yaml => objectstorage_viewer_role.yaml} | 6 +- config/rbac/role.yaml | 6 +- ....yaml => enterprise_v4_objectstorage.yaml} | 4 +- config/samples/kustomization.yaml | 2 +- docs/CustomResources.md | 18 +- docs/IndexIngestionSeparation.md | 72 ++++---- .../enterprise_v4_indexercluster.yaml | 4 +- .../enterprise_v4_ingestorcluster.yaml | 10 +- .../enterprise_v4_largemessagestores.yaml | 28 --- .../enterprise_v4_objectstorages.yaml | 28 +++ helm-chart/splunk-enterprise/values.yaml | 4 +- .../splunk-operator/templates/rbac/role.yaml | 6 +- .../controller/indexercluster_controller.go | 8 +- .../controller/ingestorcluster_controller.go | 10 +- .../ingestorcluster_controller_test.go | 24 +-- ...troller.go => objectstorage_controller.go} | 38 ++-- ...st.go => objectstorage_controller_test.go} | 133 +++++++------- internal/controller/suite_test.go | 2 +- internal/controller/testutils/new.go | 6 +- .../01-assert.yaml | 8 +- .../02-assert.yaml | 2 +- .../splunk_index_ingest_sep.yaml | 12 +- pkg/splunk/enterprise/indexercluster.go | 82 ++++----- pkg/splunk/enterprise/indexercluster_test.go | 54 +++--- pkg/splunk/enterprise/ingestorcluster.go | 63 ++++--- pkg/splunk/enterprise/ingestorcluster_test.go | 64 +++---- ...{largemessagestore.go => objectstorage.go} | 6 +- ...agestore_test.go => objectstorage_test.go} | 20 +-- pkg/splunk/enterprise/types.go | 8 +- pkg/splunk/enterprise/util.go | 14 +- ...dex_and_ingestion_separation_suite_test.go | 2 +- .../index_and_ingestion_separation_test.go | 58 +++--- test/testenv/deployment.go | 30 ++-- test/testenv/util.go | 18 +- 46 files changed, 672 insertions(+), 677 deletions(-) rename api/v4/{largemessagestore.go => objectstorage_types.go} (66%) rename config/crd/bases/{enterprise.splunk.com_largemessagestores.yaml => enterprise.splunk.com_objectstorages.yaml} (86%) rename config/rbac/{largemessagestore_editor_role.yaml => objectstorage_editor_role.yaml} (87%) rename config/rbac/{largemessagestore_viewer_role.yaml => objectstorage_viewer_role.yaml} (87%) rename config/samples/{enterprise_v4_largemessagestore.yaml => enterprise_v4_objectstorage.yaml} (71%) delete mode 100644 helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml create mode 100644 helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml rename internal/controller/{largemessagestore_controller.go => objectstorage_controller.go} (68%) rename internal/controller/{largemessagestore_controller_test.go => objectstorage_controller_test.go} (51%) rename pkg/splunk/enterprise/{largemessagestore.go => objectstorage.go} (89%) rename pkg/splunk/enterprise/{largemessagestore_test.go => objectstorage_test.go} (82%) diff --git a/PROJECT b/PROJECT index c2f3680d3..e87979069 100644 --- a/PROJECT +++ b/PROJECT @@ -137,7 +137,7 @@ resources: controller: true domain: splunk.com group: enterprise - kind: LargeMessageStore + kind: ObjectStorage path: github.com/splunk/splunk-operator/api/v4 version: v4 version: "3" diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 5e76d3e57..e74f900a7 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -34,7 +34,7 @@ const ( IndexerClusterPausedAnnotation = "indexercluster.enterprise.splunk.com/paused" ) -// +kubebuilder:validation:XValidation:rule="has(self.queueRef) == has(self.largeMessageStoreRef)",message="queueRef and largeMessageStoreRef must both be set or both be empty" +// +kubebuilder:validation:XValidation:rule="has(self.queueRef) == has(self.objectStorageRef)",message="queueRef and objectStorageRef must both be set or both be empty" // IndexerClusterSpec defines the desired state of a Splunk Enterprise indexer cluster type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` @@ -44,8 +44,8 @@ type IndexerClusterSpec struct { QueueRef corev1.ObjectReference `json:"queueRef"` // +optional - // Large Message Store reference - LargeMessageStoreRef corev1.ObjectReference `json:"largeMessageStoreRef"` + // Object Storage reference + ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"` // Number of search head pods; a search head cluster will be created if > 1 Replicas int32 `json:"replicas"` @@ -124,8 +124,8 @@ type IndexerClusterStatus struct { // Queue Queue *QueueSpec `json:"queue,omitempty"` - // Large Message Store - LargeMessageStore *LargeMessageStoreSpec `json:"largeMessageStore,omitempty"` + // Object Storage + ObjectStorage *ObjectStorageSpec `json:"objectStorage,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index aa2281864..f2e061284 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -44,8 +44,8 @@ type IngestorClusterSpec struct { QueueRef corev1.ObjectReference `json:"queueRef"` // +kubebuilder:validation:Required - // Large Message Store reference - LargeMessageStoreRef corev1.ObjectReference `json:"largeMessageStoreRef"` + // Object Storage reference + ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"` } // IngestorClusterStatus defines the observed state of Ingestor Cluster @@ -77,8 +77,8 @@ type IngestorClusterStatus struct { // Queue Queue *QueueSpec `json:"queue,omitempty"` - // Large Message Store - LargeMessageStore *LargeMessageStoreSpec `json:"largeMessageStore,omitempty"` + // Object Storage + ObjectStorage *ObjectStorageSpec `json:"objectStorage,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v4/largemessagestore.go b/api/v4/objectstorage_types.go similarity index 66% rename from api/v4/largemessagestore.go rename to api/v4/objectstorage_types.go index 26c986f2d..80fcd45cf 100644 --- a/api/v4/largemessagestore.go +++ b/api/v4/objectstorage_types.go @@ -23,14 +23,14 @@ import ( ) const ( - // LargeMessageStorePausedAnnotation is the annotation that pauses the reconciliation (triggers + // ObjectStoragePausedAnnotation is the annotation that pauses the reconciliation (triggers // an immediate requeue) - LargeMessageStorePausedAnnotation = "largemessagestore.enterprise.splunk.com/paused" + ObjectStoragePausedAnnotation = "objectstorage.enterprise.splunk.com/paused" ) // +kubebuilder:validation:XValidation:rule="self.provider != 's3' || has(self.s3)",message="s3 must be provided when provider is s3" -// LargeMessageStoreSpec defines the desired state of LargeMessageStore -type LargeMessageStoreSpec struct { +// ObjectStorageSpec defines the desired state of ObjectStorage +type ObjectStorageSpec struct { // +kubebuilder:validation:Required // +kubebuilder:validation:Enum=s3 // Provider of queue resources @@ -53,8 +53,8 @@ type S3Spec struct { Path string `json:"path"` } -// LargeMessageStoreStatus defines the observed state of LargeMessageStore. -type LargeMessageStoreStatus struct { +// ObjectStorageStatus defines the observed state of ObjectStorage. +type ObjectStorageStatus struct { // Phase of the large message store Phase Phase `json:"phase"` @@ -68,27 +68,27 @@ type LargeMessageStoreStatus struct { // +kubebuilder:object:root=true // +kubebuilder:subresource:status -// LargeMessageStore is the Schema for a Splunk Enterprise large message store +// ObjectStorage is the Schema for a Splunk Enterprise object storage // +k8s:openapi-gen=true // +kubebuilder:subresource:status // +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.replicas,selectorpath=.status.selector -// +kubebuilder:resource:path=largemessagestores,scope=Namespaced,shortName=lms -// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of large message store" -// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of large message store resource" +// +kubebuilder:resource:path=objectstorages,scope=Namespaced,shortName=os +// +kubebuilder:printcolumn:name="Phase",type="string",JSONPath=".status.phase",description="Status of object storage" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="Age of object storage resource" // +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.message",description="Auxillary message describing CR status" // +kubebuilder:storageversion -// LargeMessageStore is the Schema for the largemessagestores API -type LargeMessageStore struct { +// ObjectStorage is the Schema for the objectstorages API +type ObjectStorage struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty,omitzero"` - Spec LargeMessageStoreSpec `json:"spec"` - Status LargeMessageStoreStatus `json:"status,omitempty,omitzero"` + Spec ObjectStorageSpec `json:"spec"` + Status ObjectStorageStatus `json:"status,omitempty,omitzero"` } // DeepCopyObject implements runtime.Object -func (in *LargeMessageStore) DeepCopyObject() runtime.Object { +func (in *ObjectStorage) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -97,42 +97,42 @@ func (in *LargeMessageStore) DeepCopyObject() runtime.Object { // +kubebuilder:object:root=true -// LargeMessageStoreList contains a list of LargeMessageStore -type LargeMessageStoreList struct { +// ObjectStorageList contains a list of ObjectStorage +type ObjectStorageList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` - Items []LargeMessageStore `json:"items"` + Items []ObjectStorage `json:"items"` } func init() { - SchemeBuilder.Register(&LargeMessageStore{}, &LargeMessageStoreList{}) + SchemeBuilder.Register(&ObjectStorage{}, &ObjectStorageList{}) } // NewEvent creates a new event associated with the object and ready // to be published to Kubernetes API -func (bc *LargeMessageStore) NewEvent(eventType, reason, message string) corev1.Event { +func (os *ObjectStorage) NewEvent(eventType, reason, message string) corev1.Event { t := metav1.Now() return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ GenerateName: reason + "-", - Namespace: bc.ObjectMeta.Namespace, + Namespace: os.ObjectMeta.Namespace, }, InvolvedObject: corev1.ObjectReference{ - Kind: "LargeMessageStore", - Namespace: bc.Namespace, - Name: bc.Name, - UID: bc.UID, + Kind: "ObjectStorage", + Namespace: os.Namespace, + Name: os.Name, + UID: os.UID, APIVersion: GroupVersion.String(), }, Reason: reason, Message: message, Source: corev1.EventSource{ - Component: "splunk-large-message-store-controller", + Component: "splunk-object-storage-controller", }, FirstTimestamp: t, LastTimestamp: t, Count: 1, Type: eventType, - ReportingController: "enterprise.splunk.com/large-message-store-controller", + ReportingController: "enterprise.splunk.com/object-storage-controller", } } diff --git a/api/v4/queue_types.go b/api/v4/queue_types.go index a094b76ce..06703ac95 100644 --- a/api/v4/queue_types.go +++ b/api/v4/queue_types.go @@ -120,18 +120,18 @@ func init() { // NewEvent creates a new event associated with the object and ready // to be published to Kubernetes API -func (bc *Queue) NewEvent(eventType, reason, message string) corev1.Event { +func (os *Queue) NewEvent(eventType, reason, message string) corev1.Event { t := metav1.Now() return corev1.Event{ ObjectMeta: metav1.ObjectMeta{ GenerateName: reason + "-", - Namespace: bc.ObjectMeta.Namespace, + Namespace: os.ObjectMeta.Namespace, }, InvolvedObject: corev1.ObjectReference{ Kind: "Queue", - Namespace: bc.Namespace, - Name: bc.Name, - UID: bc.UID, + Namespace: os.Namespace, + Name: os.Name, + UID: os.UID, APIVersion: GroupVersion.String(), }, Reason: reason, diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index 2fb0eebc8..dd9b2f347 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -512,7 +512,7 @@ func (in *IndexerClusterSpec) DeepCopyInto(out *IndexerClusterSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) out.QueueRef = in.QueueRef - out.LargeMessageStoreRef = in.LargeMessageStoreRef + out.ObjectStorageRef = in.ObjectStorageRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterSpec. @@ -550,9 +550,9 @@ func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { *out = new(QueueSpec) **out = **in } - if in.LargeMessageStore != nil { - in, out := &in.LargeMessageStore, &out.LargeMessageStore - *out = new(LargeMessageStoreSpec) + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = new(ObjectStorageSpec) **out = **in } } @@ -624,7 +624,7 @@ func (in *IngestorClusterSpec) DeepCopyInto(out *IngestorClusterSpec) { in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) out.QueueRef = in.QueueRef - out.LargeMessageStoreRef = in.LargeMessageStoreRef + out.ObjectStorageRef = in.ObjectStorageRef } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterSpec. @@ -653,9 +653,9 @@ func (in *IngestorClusterStatus) DeepCopyInto(out *IngestorClusterStatus) { *out = new(QueueSpec) **out = **in } - if in.LargeMessageStore != nil { - in, out := &in.LargeMessageStore, &out.LargeMessageStore - *out = new(LargeMessageStoreSpec) + if in.ObjectStorage != nil { + in, out := &in.ObjectStorage, &out.ObjectStorage + *out = new(ObjectStorageSpec) **out = **in } } @@ -671,50 +671,58 @@ func (in *IngestorClusterStatus) DeepCopy() *IngestorClusterStatus { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LargeMessageStore) DeepCopyInto(out *LargeMessageStore) { +func (in *LicenseManager) DeepCopyInto(out *LicenseManager) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LargeMessageStore. -func (in *LargeMessageStore) DeepCopy() *LargeMessageStore { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManager. +func (in *LicenseManager) DeepCopy() *LicenseManager { if in == nil { return nil } - out := new(LargeMessageStore) + out := new(LicenseManager) in.DeepCopyInto(out) return out } +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LicenseManager) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LargeMessageStoreList) DeepCopyInto(out *LargeMessageStoreList) { +func (in *LicenseManagerList) DeepCopyInto(out *LicenseManagerList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]LargeMessageStore, len(*in)) + *out = make([]LicenseManager, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LargeMessageStoreList. -func (in *LargeMessageStoreList) DeepCopy() *LargeMessageStoreList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManagerList. +func (in *LicenseManagerList) DeepCopy() *LicenseManagerList { if in == nil { return nil } - out := new(LargeMessageStoreList) + out := new(LicenseManagerList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LargeMessageStoreList) DeepCopyObject() runtime.Object { +func (in *LicenseManagerList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -722,45 +730,40 @@ func (in *LargeMessageStoreList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LargeMessageStoreSpec) DeepCopyInto(out *LargeMessageStoreSpec) { +func (in *LicenseManagerSpec) DeepCopyInto(out *LicenseManagerSpec) { *out = *in - out.S3 = in.S3 + in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) + in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LargeMessageStoreSpec. -func (in *LargeMessageStoreSpec) DeepCopy() *LargeMessageStoreSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManagerSpec. +func (in *LicenseManagerSpec) DeepCopy() *LicenseManagerSpec { if in == nil { return nil } - out := new(LargeMessageStoreSpec) + out := new(LicenseManagerSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LargeMessageStoreStatus) DeepCopyInto(out *LargeMessageStoreStatus) { +func (in *LicenseManagerStatus) DeepCopyInto(out *LicenseManagerStatus) { *out = *in - if in.ResourceRevMap != nil { - in, out := &in.ResourceRevMap, &out.ResourceRevMap - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } + in.AppContext.DeepCopyInto(&out.AppContext) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LargeMessageStoreStatus. -func (in *LargeMessageStoreStatus) DeepCopy() *LargeMessageStoreStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManagerStatus. +func (in *LicenseManagerStatus) DeepCopy() *LicenseManagerStatus { if in == nil { return nil } - out := new(LargeMessageStoreStatus) + out := new(LicenseManagerStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LicenseManager) DeepCopyInto(out *LicenseManager) { +func (in *MonitoringConsole) DeepCopyInto(out *MonitoringConsole) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) @@ -768,18 +771,18 @@ func (in *LicenseManager) DeepCopyInto(out *LicenseManager) { in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManager. -func (in *LicenseManager) DeepCopy() *LicenseManager { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsole. +func (in *MonitoringConsole) DeepCopy() *MonitoringConsole { if in == nil { return nil } - out := new(LicenseManager) + out := new(MonitoringConsole) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LicenseManager) DeepCopyObject() runtime.Object { +func (in *MonitoringConsole) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -787,31 +790,31 @@ func (in *LicenseManager) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LicenseManagerList) DeepCopyInto(out *LicenseManagerList) { +func (in *MonitoringConsoleList) DeepCopyInto(out *MonitoringConsoleList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]LicenseManager, len(*in)) + *out = make([]MonitoringConsole, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManagerList. -func (in *LicenseManagerList) DeepCopy() *LicenseManagerList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsoleList. +func (in *MonitoringConsoleList) DeepCopy() *MonitoringConsoleList { if in == nil { return nil } - out := new(LicenseManagerList) + out := new(MonitoringConsoleList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LicenseManagerList) DeepCopyObject() runtime.Object { +func (in *MonitoringConsoleList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -819,91 +822,91 @@ func (in *LicenseManagerList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LicenseManagerSpec) DeepCopyInto(out *LicenseManagerSpec) { +func (in *MonitoringConsoleSpec) DeepCopyInto(out *MonitoringConsoleSpec) { *out = *in in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManagerSpec. -func (in *LicenseManagerSpec) DeepCopy() *LicenseManagerSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsoleSpec. +func (in *MonitoringConsoleSpec) DeepCopy() *MonitoringConsoleSpec { if in == nil { return nil } - out := new(LicenseManagerSpec) + out := new(MonitoringConsoleSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LicenseManagerStatus) DeepCopyInto(out *LicenseManagerStatus) { +func (in *MonitoringConsoleStatus) DeepCopyInto(out *MonitoringConsoleStatus) { *out = *in + out.BundlePushTracker = in.BundlePushTracker + if in.ResourceRevMap != nil { + in, out := &in.ResourceRevMap, &out.ResourceRevMap + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } in.AppContext.DeepCopyInto(&out.AppContext) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LicenseManagerStatus. -func (in *LicenseManagerStatus) DeepCopy() *LicenseManagerStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsoleStatus. +func (in *MonitoringConsoleStatus) DeepCopy() *MonitoringConsoleStatus { if in == nil { return nil } - out := new(LicenseManagerStatus) + out := new(MonitoringConsoleStatus) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MonitoringConsole) DeepCopyInto(out *MonitoringConsole) { +func (in *ObjectStorage) DeepCopyInto(out *ObjectStorage) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) + out.Spec = in.Spec in.Status.DeepCopyInto(&out.Status) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsole. -func (in *MonitoringConsole) DeepCopy() *MonitoringConsole { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorage. +func (in *ObjectStorage) DeepCopy() *ObjectStorage { if in == nil { return nil } - out := new(MonitoringConsole) + out := new(ObjectStorage) in.DeepCopyInto(out) return out } -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MonitoringConsole) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MonitoringConsoleList) DeepCopyInto(out *MonitoringConsoleList) { +func (in *ObjectStorageList) DeepCopyInto(out *ObjectStorageList) { *out = *in out.TypeMeta = in.TypeMeta in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]MonitoringConsole, len(*in)) + *out = make([]ObjectStorage, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } } } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsoleList. -func (in *MonitoringConsoleList) DeepCopy() *MonitoringConsoleList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageList. +func (in *ObjectStorageList) DeepCopy() *ObjectStorageList { if in == nil { return nil } - out := new(MonitoringConsoleList) + out := new(ObjectStorageList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MonitoringConsoleList) DeepCopyObject() runtime.Object { +func (in *ObjectStorageList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -911,26 +914,24 @@ func (in *MonitoringConsoleList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MonitoringConsoleSpec) DeepCopyInto(out *MonitoringConsoleSpec) { +func (in *ObjectStorageSpec) DeepCopyInto(out *ObjectStorageSpec) { *out = *in - in.CommonSplunkSpec.DeepCopyInto(&out.CommonSplunkSpec) - in.AppFrameworkConfig.DeepCopyInto(&out.AppFrameworkConfig) + out.S3 = in.S3 } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsoleSpec. -func (in *MonitoringConsoleSpec) DeepCopy() *MonitoringConsoleSpec { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageSpec. +func (in *ObjectStorageSpec) DeepCopy() *ObjectStorageSpec { if in == nil { return nil } - out := new(MonitoringConsoleSpec) + out := new(ObjectStorageSpec) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MonitoringConsoleStatus) DeepCopyInto(out *MonitoringConsoleStatus) { +func (in *ObjectStorageStatus) DeepCopyInto(out *ObjectStorageStatus) { *out = *in - out.BundlePushTracker = in.BundlePushTracker if in.ResourceRevMap != nil { in, out := &in.ResourceRevMap, &out.ResourceRevMap *out = make(map[string]string, len(*in)) @@ -938,15 +939,14 @@ func (in *MonitoringConsoleStatus) DeepCopyInto(out *MonitoringConsoleStatus) { (*out)[key] = val } } - in.AppContext.DeepCopyInto(&out.AppContext) } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MonitoringConsoleStatus. -func (in *MonitoringConsoleStatus) DeepCopy() *MonitoringConsoleStatus { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectStorageStatus. +func (in *ObjectStorageStatus) DeepCopy() *ObjectStorageStatus { if in == nil { return nil } - out := new(MonitoringConsoleStatus) + out := new(ObjectStorageStatus) in.DeepCopyInto(out) return out } diff --git a/cmd/main.go b/cmd/main.go index 72a3e38c7..dfb9c87e1 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -237,11 +237,11 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "Queue") os.Exit(1) } - if err := (&controller.LargeMessageStoreReconciler{ + if err := (&controller.ObjectStorageReconciler{ Client: mgr.GetClient(), Scheme: mgr.GetScheme(), }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "LargeMessageStore") + setupLog.Error(err, "unable to create controller", "controller", "ObjectStorage") os.Exit(1) } //+kubebuilder:scaffold:builder diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 90c266230..a9fc2d811 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -5437,49 +5437,6 @@ spec: type: object x-kubernetes-map-type: atomic type: array - largeMessageStoreRef: - description: Large Message Store reference - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic licenseManagerRef: description: LicenseManagerRef refers to a Splunk Enterprise license manager managed by the operator within Kubernetes @@ -5647,6 +5604,49 @@ spec: type: string type: object x-kubernetes-map-type: atomic + objectStorageRef: + description: Object Storage reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic queueRef: description: Queue reference properties: @@ -8329,9 +8329,8 @@ spec: type: array type: object x-kubernetes-validations: - - message: queueRef and largeMessageStoreRef must both be set or both - be empty - rule: has(self.queueRef) == has(self.largeMessageStoreRef) + - message: queueRef and objectStorageRef must both be set or both be empty + rule: has(self.queueRef) == has(self.objectStorageRef) status: description: IndexerClusterStatus defines the observed state of a Splunk Enterprise indexer cluster @@ -8375,8 +8374,17 @@ spec: initialized_flag: description: Indicates if the cluster is initialized. type: boolean - largeMessageStore: - description: Large Message Store + maintenance_mode: + description: Indicates if the cluster is in maintenance mode. + type: boolean + message: + description: Auxillary message describing CR status + type: string + namespace_scoped_secret_resource_version: + description: Indicates resource version of namespace scoped secret + type: string + objectStorage: + description: Object Storage properties: provider: description: Provider of queue resources @@ -8404,15 +8412,6 @@ spec: x-kubernetes-validations: - message: s3 must be provided when provider is s3 rule: self.provider != 's3' || has(self.s3) - maintenance_mode: - description: Indicates if the cluster is in maintenance mode. - type: boolean - message: - description: Auxillary message describing CR status - type: string - namespace_scoped_secret_resource_version: - description: Indicates resource version of namespace scoped secret - type: string peers: description: status of each indexer cluster peer items: diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 37c820c4c..46a142719 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -1413,49 +1413,6 @@ spec: type: object x-kubernetes-map-type: atomic type: array - largeMessageStoreRef: - description: Large Message Store reference - properties: - apiVersion: - description: API version of the referent. - type: string - fieldPath: - description: |- - If referring to a piece of an object instead of an entire object, this string - should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - For example, if the object reference is to a container within a pod, this would take on a value like: - "spec.containers{name}" (where "name" refers to the name of the container that triggered - the event) or if no container name is specified "spec.containers[2]" (container with - index 2 in this pod). This syntax is chosen only to have some well-defined way of - referencing a part of an object. - type: string - kind: - description: |- - Kind of the referent. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - type: string - name: - description: |- - Name of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - type: string - namespace: - description: |- - Namespace of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - type: string - resourceVersion: - description: |- - Specific resourceVersion to which this reference is made, if any. - More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency - type: string - uid: - description: |- - UID of the referent. - More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - type: string - type: object - x-kubernetes-map-type: atomic licenseManagerRef: description: LicenseManagerRef refers to a Splunk Enterprise license manager managed by the operator within Kubernetes @@ -1623,6 +1580,49 @@ spec: type: string type: object x-kubernetes-map-type: atomic + objectStorageRef: + description: Object Storage reference + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: |- + If referring to a piece of an object instead of an entire object, this string + should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within a pod, this would take on a value like: + "spec.containers{name}" (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" (container with + index 2 in this pod). This syntax is chosen only to have some well-defined way of + referencing a part of an object. + type: string + kind: + description: |- + Kind of the referent. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + name: + description: |- + Name of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + type: string + namespace: + description: |- + Namespace of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + type: string + resourceVersion: + description: |- + Specific resourceVersion to which this reference is made, if any. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + type: string + uid: + description: |- + UID of the referent. + More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + type: string + type: object + x-kubernetes-map-type: atomic queueRef: description: Queue reference properties: @@ -4303,7 +4303,7 @@ spec: type: object type: array required: - - largeMessageStoreRef + - objectStorageRef - queueRef type: object status: @@ -4591,8 +4591,11 @@ spec: description: App Framework version info for future use type: integer type: object - largeMessageStore: - description: Large Message Store + message: + description: Auxillary message describing CR status + type: string + objectStorage: + description: Object Storage properties: provider: description: Provider of queue resources @@ -4620,9 +4623,6 @@ spec: x-kubernetes-validations: - message: s3 must be provided when provider is s3 rule: self.provider != 's3' || has(self.s3) - message: - description: Auxillary message describing CR status - type: string phase: description: Phase of the ingestor pods enum: diff --git a/config/crd/bases/enterprise.splunk.com_largemessagestores.yaml b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml similarity index 86% rename from config/crd/bases/enterprise.splunk.com_largemessagestores.yaml rename to config/crd/bases/enterprise.splunk.com_objectstorages.yaml index 562cd773c..1456234c6 100644 --- a/config/crd/bases/enterprise.splunk.com_largemessagestores.yaml +++ b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml @@ -4,24 +4,24 @@ kind: CustomResourceDefinition metadata: annotations: controller-gen.kubebuilder.io/version: v0.16.1 - name: largemessagestores.enterprise.splunk.com + name: objectstorages.enterprise.splunk.com spec: group: enterprise.splunk.com names: - kind: LargeMessageStore - listKind: LargeMessageStoreList - plural: largemessagestores + kind: ObjectStorage + listKind: ObjectStorageList + plural: objectstorages shortNames: - - lms - singular: largemessagestore + - os + singular: objectstorage scope: Namespaced versions: - additionalPrinterColumns: - - description: Status of large message store + - description: Status of object storage jsonPath: .status.phase name: Phase type: string - - description: Age of large message store resource + - description: Age of object storage resource jsonPath: .metadata.creationTimestamp name: Age type: date @@ -32,7 +32,7 @@ spec: name: v4 schema: openAPIV3Schema: - description: LargeMessageStore is the Schema for the largemessagestores API + description: ObjectStorage is the Schema for the objectstorages API properties: apiVersion: description: |- @@ -52,7 +52,7 @@ spec: metadata: type: object spec: - description: LargeMessageStoreSpec defines the desired state of LargeMessageStore + description: ObjectStorageSpec defines the desired state of ObjectStorage properties: provider: description: Provider of queue resources @@ -81,7 +81,7 @@ spec: - message: s3 must be provided when provider is s3 rule: self.provider != 's3' || has(self.s3) status: - description: LargeMessageStoreStatus defines the observed state of LargeMessageStore. + description: ObjectStorageStatus defines the observed state of ObjectStorage. properties: message: description: Auxillary message describing CR status diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index f80dfec5e..0304146cd 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -12,7 +12,7 @@ resources: - bases/enterprise.splunk.com_standalones.yaml - bases/enterprise.splunk.com_ingestorclusters.yaml - bases/enterprise.splunk.com_queues.yaml -- bases/enterprise.splunk.com_largemessagestores.yaml +- bases/enterprise.splunk.com_objectstorages.yaml #+kubebuilder:scaffold:crdkustomizeresource diff --git a/config/rbac/largemessagestore_editor_role.yaml b/config/rbac/objectstorage_editor_role.yaml similarity index 87% rename from config/rbac/largemessagestore_editor_role.yaml rename to config/rbac/objectstorage_editor_role.yaml index 614d09ad2..70323227f 100644 --- a/config/rbac/largemessagestore_editor_role.yaml +++ b/config/rbac/objectstorage_editor_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: largemessagestore-editor-role + name: objectstorage-editor-role rules: - apiGroups: - enterprise.splunk.com resources: - - largemessagestores + - objectstorages verbs: - create - delete @@ -25,6 +25,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - largemessagestores/status + - objectstorages/status verbs: - get diff --git a/config/rbac/largemessagestore_viewer_role.yaml b/config/rbac/objectstorage_viewer_role.yaml similarity index 87% rename from config/rbac/largemessagestore_viewer_role.yaml rename to config/rbac/objectstorage_viewer_role.yaml index 36cfde351..9764699bc 100644 --- a/config/rbac/largemessagestore_viewer_role.yaml +++ b/config/rbac/objectstorage_viewer_role.yaml @@ -8,12 +8,12 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - name: largemessagestore-viewer-role + name: objectstorage-viewer-role rules: - apiGroups: - enterprise.splunk.com resources: - - largemessagestores + - objectstorages verbs: - get - list @@ -21,6 +21,6 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - largemessagestores/status + - objectstorages/status verbs: - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 295e080c6..973105d16 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -51,10 +51,10 @@ rules: - clustermasters - indexerclusters - ingestorclusters - - largemessagestores - licensemanagers - licensemasters - monitoringconsoles + - objectstorages - queues - searchheadclusters - standalones @@ -73,10 +73,10 @@ rules: - clustermasters/finalizers - indexerclusters/finalizers - ingestorclusters/finalizers - - largemessagestores/finalizers - licensemanagers/finalizers - licensemasters/finalizers - monitoringconsoles/finalizers + - objectstorages/finalizers - queues/finalizers - searchheadclusters/finalizers - standalones/finalizers @@ -89,10 +89,10 @@ rules: - clustermasters/status - indexerclusters/status - ingestorclusters/status - - largemessagestores/status - licensemanagers/status - licensemasters/status - monitoringconsoles/status + - objectstorages/status - queues/status - searchheadclusters/status - standalones/status diff --git a/config/samples/enterprise_v4_largemessagestore.yaml b/config/samples/enterprise_v4_objectstorage.yaml similarity index 71% rename from config/samples/enterprise_v4_largemessagestore.yaml rename to config/samples/enterprise_v4_objectstorage.yaml index 508ba0b77..b693a14e0 100644 --- a/config/samples/enterprise_v4_largemessagestore.yaml +++ b/config/samples/enterprise_v4_objectstorage.yaml @@ -1,7 +1,7 @@ apiVersion: enterprise.splunk.com/v4 -kind: LargeMessageStore +kind: ObjectStorage metadata: - name: largemessagestore-sample + name: objectstorage-sample finalizers: - "enterprise.splunk.com/delete-pvc" spec: {} diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 4de2ec89d..34c05ab05 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -15,5 +15,5 @@ resources: - enterprise_v4_licensemanager.yaml - enterprise_v4_ingestorcluster.yaml - enterprise_v4_queue.yaml -- enterprise_v4_largemessagestore.yaml +- enterprise_v4_objectstorage.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/docs/CustomResources.md b/docs/CustomResources.md index f69a8fa50..157a9b123 100644 --- a/docs/CustomResources.md +++ b/docs/CustomResources.md @@ -22,7 +22,7 @@ you can use to manage Splunk Enterprise deployments in your Kubernetes cluster. - [ClusterManager Resource Spec Parameters](#clustermanager-resource-spec-parameters) - [IndexerCluster Resource Spec Parameters](#indexercluster-resource-spec-parameters) - [IngestorCluster Resource Spec Parameters](#ingestorcluster-resource-spec-parameters) - - [LargeMessageStore Resource Spec Parameters](#largemessagestore-resource-spec-parameters) + - [ObjectStorage Resource Spec Parameters](#objectstorage-resource-spec-parameters) - [MonitoringConsole Resource Spec Parameters](#monitoringconsole-resource-spec-parameters) - [Examples of Guaranteed and Burstable QoS](#examples-of-guaranteed-and-burstable-qos) - [A Guaranteed QoS Class example:](#a-guaranteed-qos-class-example) @@ -377,10 +377,10 @@ spec: replicas: 3 queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os ``` -Note: `queueRef` and `largeMessageStoreRef` are required fields in case of IngestorCluster resource since they will be used to connect the IngestorCluster to Queue and LargeMessageStore resources. +Note: `queueRef` and `objectStorageRef` are required fields in case of IngestorCluster resource since they will be used to connect the IngestorCluster to Queue and ObjectStorage resources. In addition to [Common Spec Parameters for All Resources](#common-spec-parameters-for-all-resources) and [Common Spec Parameters for All Splunk Enterprise Resources](#common-spec-parameters-for-all-splunk-enterprise-resources), @@ -390,13 +390,13 @@ the `IngestorCluster` resource provides the following `Spec` configuration param | ---------- | ------- | ----------------------------------------------------- | | replicas | integer | The number of ingestor peers (minimum of 3 which is the default) | -## LargeMessageStore Resource Spec Parameters +## ObjectStorage Resource Spec Parameters ```yaml apiVersion: enterprise.splunk.com/v4 -kind: LargeMessageStore +kind: ObjectStorage metadata: - name: lms + name: os spec: provider: s3 s3: @@ -404,7 +404,7 @@ spec: endpoint: https://s3.us-west-2.amazonaws.com ``` -LargeMessageStore inputs can be found in the table below. As of now, only S3 provider of large message store is supported. +ObjectStorage inputs can be found in the table below. As of now, only S3 provider of large message store is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | @@ -536,7 +536,7 @@ The Splunk Operator controller reconciles every Splunk Enterprise CR. However, t | clustermanager.enterprise.splunk.com | "clustermanager.enterprise.splunk.com/paused" | | indexercluster.enterprise.splunk.com | "indexercluster.enterprise.splunk.com/paused" | | ingestorcluster.enterprise.splunk.com | "ingestorcluster.enterprise.splunk.com/paused" | -| largemessagestore.enterprise.splunk.com | "largemessagestore.enterprise.splunk.com/paused" | +| objectstorage.enterprise.splunk.com | "objectstorage.enterprise.splunk.com/paused" | | licensemaster.enterprise.splunk.com | "licensemaster.enterprise.splunk.com/paused" | | monitoringconsole.enterprise.splunk.com | "monitoringconsole.enterprise.splunk.com/paused" | | searchheadcluster.enterprise.splunk.com | "searchheadcluster.enterprise.splunk.com/paused" | diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index 257e37400..bd5d97579 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -55,13 +55,13 @@ spec: dlq: sqs-dlq-test ``` -# LargeMessageStore +# ObjectStorage -LargeMessageStore is introduced to store large message (messages that exceed the size of messages that can be stored in SQS) store information to be shared among IngestorCluster and IndexerCluster. +ObjectStorage is introduced to store large message (messages that exceed the size of messages that can be stored in SQS) store information to be shared among IngestorCluster and IndexerCluster. ## Spec -LargeMessageStore inputs can be found in the table below. As of now, only S3 provider of large message store is supported. +ObjectStorage inputs can be found in the table below. As of now, only S3 provider of large message store is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | @@ -80,9 +80,9 @@ Change of any of the large message queue inputs triggers the restart of Splunk s ## Example ``` apiVersion: enterprise.splunk.com/v4 -kind: LargeMessageStore +kind: ObjectStorage metadata: - name: lms + name: os spec: provider: s3 s3: @@ -102,11 +102,11 @@ In addition to common spec inputs, the IngestorCluster resource provides the fol | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | | queueRef | corev1.ObjectReference | Message queue reference | -| largeMessageStoreRef | corev1.ObjectReference | Large message store reference | +| objectStorageRef | corev1.ObjectReference | Large message store reference | ## Example -The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and LargeMessageStore references allow the user to specify queue and bucket settings for the ingestion process. +The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the ingestion process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. @@ -123,8 +123,8 @@ spec: image: splunk/splunk:${SPLUNK_IMAGE_VERSION} queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os ``` # IndexerCluster @@ -139,11 +139,11 @@ In addition to common spec inputs, the IndexerCluster resource provides the foll | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | | queueRef | corev1.ObjectReference | Message queue reference | -| largeMessageStoreRef | corev1.ObjectReference | Large message store reference | +| objectStorageRef | corev1.ObjectReference | Large message store reference | ## Example -The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and LargeMessageStore references allow the user to specify queue and bucket settings for the indexing process. +The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the indexing process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. @@ -172,8 +172,8 @@ spec: image: splunk/splunk:${SPLUNK_IMAGE_VERSION} queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os ``` # Common Spec @@ -182,11 +182,11 @@ Common spec values for all SOK Custom Resources can be found in [CustomResources # Helm Charts -Queue, LargeMessageStore and IngestorCluster have been added to the splunk/splunk-enterprise Helm chart. IndexerCluster has also been enhanced to support new inputs. +Queue, ObjectStorage and IngestorCluster have been added to the splunk/splunk-enterprise Helm chart. IndexerCluster has also been enhanced to support new inputs. ## Example -Below examples describe how to define values for Queue, LargeMessageStoe, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. +Below examples describe how to define values for Queue, ObjectStorage, IngestorCluster and IndexerCluster similarly to the above yaml files specifications. ``` queue: @@ -201,9 +201,9 @@ queue: ``` ``` -largeMessageStore: +objectStorage: enabled: true - name: lms + name: os provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com @@ -218,8 +218,8 @@ ingestorCluster: serviceAccount: ingestor-sa queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os ``` ``` @@ -238,8 +238,8 @@ indexerCluster: name: cm queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os ``` # Service Account @@ -599,14 +599,14 @@ Status: Events: ``` -4. Install LargeMessageStore resource. +4. Install ObjectStorage resource. ``` -$ cat lms.yaml +$ cat os.yaml apiVersion: enterprise.splunk.com/v4 -kind: LargeMessageStore +kind: ObjectStorage metadata: - name: lms + name: os finalizers: - enterprise.splunk.com/delete-pvc spec: @@ -617,23 +617,23 @@ spec: ``` ``` -$ kubectl apply -f lms.yaml +$ kubectl apply -f os.yaml ``` ``` -$ kubectl get lms +$ kubectl get os NAME PHASE AGE MESSAGE -lms Ready 20s +os Ready 20s ``` ``` -kubectl describe lms -Name: lms +kubectl describe os +Name: os Namespace: default Labels: Annotations: API Version: enterprise.splunk.com/v4 -Kind: LargeMessageStore +Kind: ObjectStorage Metadata: Creation Timestamp: 2025-10-27T10:25:53Z Finalizers: @@ -669,8 +669,8 @@ spec: image: splunk/splunk:${SPLUNK_IMAGE_VERSION} queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os ``` ``` @@ -704,7 +704,7 @@ Spec: Namespace: default Image: splunk/splunk:${SPLUNK_IMAGE_VERSION} Large Message Store Ref: - Name: lms + Name: os Namespace: default Replicas: 3 Service Account: ingestor-sa @@ -813,8 +813,8 @@ spec: serviceAccount: ingestor-sa queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os ``` ``` diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml index 536be0cd2..833f162aa 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml @@ -169,8 +169,8 @@ items: {{- if .namespace }} namespace: {{ .namespace }} {{- end }} - {{- with $.Values.indexerCluster.largeMessageStoreRef }} - largeMessageStoreRef: + {{- with $.Values.indexerCluster.objectStorageRef }} + objectStorageRef: name: {{ .name }} {{- if .namespace }} namespace: {{ .namespace }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml index b9ec62107..e5ab1258c 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_ingestorcluster.yaml @@ -102,11 +102,11 @@ spec: namespace: {{ $.Values.ingestorCluster.queueRef.namespace }} {{- end }} {{- end }} - {{- with $.Values.ingestorCluster.largeMessageStoreRef }} - largeMessageStoreRef: - name: {{ $.Values.ingestorCluster.largeMessageStoreRef.name }} - {{- if $.Values.ingestorCluster.largeMessageStoreRef.namespace }} - namespace: {{ $.Values.ingestorCluster.largeMessageStoreRef.namespace }} + {{- with $.Values.ingestorCluster.objectStorageRef }} + objectStorageRef: + name: {{ $.Values.ingestorCluster.objectStorageRef.name }} + {{- if $.Values.ingestorCluster.objectStorageRef.namespace }} + namespace: {{ $.Values.ingestorCluster.objectStorageRef.namespace }} {{- end }} {{- end }} {{- with .Values.ingestorCluster.extraEnv }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml deleted file mode 100644 index 77ef09e69..000000000 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_largemessagestores.yaml +++ /dev/null @@ -1,28 +0,0 @@ -{{- if .Values.largemessagestore }} -{{- if .Values.largemessagestore.enabled }} -apiVersion: enterprise.splunk.com/v4 -kind: LargeMessageStore -metadata: - name: {{ .Values.largemessagestore.name }} - namespace: {{ default .Release.Namespace .Values.largemessagestore.namespaceOverride }} - {{- with .Values.largemessagestore.additionalLabels }} - labels: -{{ toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.largemessagestore.additionalAnnotations }} - annotations: -{{ toYaml . | nindent 4 }} - {{- end }} -spec: - provider: {{ .Values.largemessagestore.provider | quote }} - {{- with .Values.largemessagestore.s3 }} - s3: - {{- if .endpoint }} - endpoint: {{ .endpoint | quote }} - {{- end }} - {{- if .path }} - path: {{ .path | quote }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml new file mode 100644 index 000000000..7cd5bdca0 --- /dev/null +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml @@ -0,0 +1,28 @@ +{{- if .Values.objectStorage.enabled }} +{{- if .Values.objectStorage.enabled }} +apiVersion: enterprise.splunk.com/v4 +kind: ObjectStorage +metadata: + name: {{ .Values.objectStorage.name }} + namespace: {{ default .Release.Namespace .Values.objectStorage.namespaceOverride }} + {{- with .Values.objectStorage.additionalLabels }} + labels: +{{ toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.objectStorage.additionalAnnotations }} + annotations: +{{ toYaml . | nindent 4 }} + {{- end }} +spec: + provider: {{ .Values.objectStorage.provider | quote }} + {{- with .Values.objectStorage.s3 }} + s3: + {{- if .endpoint }} + endpoint: {{ .endpoint | quote }} + {{- end }} + {{- if .path }} + path: {{ .path | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-enterprise/values.yaml b/helm-chart/splunk-enterprise/values.yaml index ea4921b52..6643728fa 100644 --- a/helm-chart/splunk-enterprise/values.yaml +++ b/helm-chart/splunk-enterprise/values.yaml @@ -352,7 +352,7 @@ indexerCluster: queueRef: {} - largeMessageStoreRef: {} + objectStorageRef: {} searchHeadCluster: @@ -903,4 +903,4 @@ ingestorCluster: queueRef: {} - largeMessageStoreRef: {} \ No newline at end of file + objectStorageRef: {} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/role.yaml b/helm-chart/splunk-operator/templates/rbac/role.yaml index 26824528f..77be54727 100644 --- a/helm-chart/splunk-operator/templates/rbac/role.yaml +++ b/helm-chart/splunk-operator/templates/rbac/role.yaml @@ -277,7 +277,7 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - largemessagestores + - objectstorages verbs: - create - delete @@ -289,13 +289,13 @@ rules: - apiGroups: - enterprise.splunk.com resources: - - largemessagestores/finalizers + - objectstorages/finalizers verbs: - update - apiGroups: - enterprise.splunk.com resources: - - largemessagestores/status + - objectstorages/status verbs: - get - patch diff --git a/internal/controller/indexercluster_controller.go b/internal/controller/indexercluster_controller.go index 2ed4d775e..7efb6e1b8 100644 --- a/internal/controller/indexercluster_controller.go +++ b/internal/controller/indexercluster_controller.go @@ -200,9 +200,9 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { return reqs }), ). - Watches(&enterpriseApi.LargeMessageStore{}, + Watches(&enterpriseApi.ObjectStorage{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - lms, ok := obj.(*enterpriseApi.LargeMessageStore) + os, ok := obj.(*enterpriseApi.ObjectStorage) if !ok { return nil } @@ -212,11 +212,11 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { } var reqs []reconcile.Request for _, ic := range list.Items { - ns := ic.Spec.LargeMessageStoreRef.Namespace + ns := ic.Spec.ObjectStorageRef.Namespace if ns == "" { ns = ic.Namespace } - if ic.Spec.LargeMessageStoreRef.Name == lms.Name && ns == lms.Namespace { + if ic.Spec.ObjectStorageRef.Name == os.Name && ns == os.Namespace { reqs = append(reqs, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ic.Name, diff --git a/internal/controller/ingestorcluster_controller.go b/internal/controller/ingestorcluster_controller.go index a46a1dcff..0d8117bd2 100644 --- a/internal/controller/ingestorcluster_controller.go +++ b/internal/controller/ingestorcluster_controller.go @@ -169,23 +169,23 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { return reqs }), ). - Watches(&enterpriseApi.LargeMessageStore{}, + Watches(&enterpriseApi.ObjectStorage{}, handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - lms, ok := obj.(*enterpriseApi.LargeMessageStore) + os, ok := obj.(*enterpriseApi.ObjectStorage) if !ok { return nil } - var list enterpriseApi.IndexerClusterList + var list enterpriseApi.IngestorClusterList if err := r.Client.List(ctx, &list); err != nil { return nil } var reqs []reconcile.Request for _, ic := range list.Items { - ns := ic.Spec.LargeMessageStoreRef.Namespace + ns := ic.Spec.ObjectStorageRef.Namespace if ns == "" { ns = ic.Namespace } - if ic.Spec.LargeMessageStoreRef.Name == lms.Name && ns == lms.Namespace { + if ic.Spec.ObjectStorageRef.Name == os.Name && ns == os.Namespace { reqs = append(reqs, reconcile.Request{ NamespacedName: types.NamespacedName{ Name: ic.Name, diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go index 4d140e1d6..d035d1037 100644 --- a/internal/controller/ingestorcluster_controller_test.go +++ b/internal/controller/ingestorcluster_controller_test.go @@ -86,12 +86,12 @@ var _ = Describe("IngestorCluster Controller", func() { }, }, } - lms := &enterpriseApi.LargeMessageStore{ + os := &enterpriseApi.ObjectStorage{ ObjectMeta: metav1.ObjectMeta{ - Name: "lms", + Name: "os", Namespace: nsSpecs.Name, }, - Spec: enterpriseApi.LargeMessageStoreSpec{ + Spec: enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", @@ -99,7 +99,7 @@ var _ = Describe("IngestorCluster Controller", func() { }, }, } - CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, lms, queue) + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, os, queue) icSpec, _ := GetIngestorCluster("test", nsSpecs.Name) annotations = map[string]string{} icSpec.Annotations = annotations @@ -134,12 +134,12 @@ var _ = Describe("IngestorCluster Controller", func() { }, }, } - lms := &enterpriseApi.LargeMessageStore{ + os := &enterpriseApi.ObjectStorage{ ObjectMeta: metav1.ObjectMeta{ - Name: "lms", + Name: "os", Namespace: nsSpecs.Name, }, - Spec: enterpriseApi.LargeMessageStoreSpec{ + Spec: enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", @@ -147,7 +147,7 @@ var _ = Describe("IngestorCluster Controller", func() { }, }, } - CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, lms, queue) + CreateIngestorCluster("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, os, queue) DeleteIngestorCluster("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) @@ -220,7 +220,7 @@ func GetIngestorCluster(name string, namespace string) (*enterpriseApi.IngestorC return ic, err } -func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, lms *enterpriseApi.LargeMessageStore, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster { +func CreateIngestorCluster(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, os *enterpriseApi.ObjectStorage, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster { By("Expecting IngestorCluster custom resource to be created successfully") key := types.NamespacedName{ @@ -244,9 +244,9 @@ func CreateIngestorCluster(name string, namespace string, annotations map[string Name: queue.Name, Namespace: queue.Namespace, }, - LargeMessageStoreRef: corev1.ObjectReference{ - Name: lms.Name, - Namespace: lms.Namespace, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, }, }, } diff --git a/internal/controller/largemessagestore_controller.go b/internal/controller/objectstorage_controller.go similarity index 68% rename from internal/controller/largemessagestore_controller.go rename to internal/controller/objectstorage_controller.go index 69a4af131..4ae36b1a2 100644 --- a/internal/controller/largemessagestore_controller.go +++ b/internal/controller/objectstorage_controller.go @@ -36,34 +36,34 @@ import ( enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" ) -// LargeMessageStoreReconciler reconciles a LargeMessageStore object -type LargeMessageStoreReconciler struct { +// ObjectStorageReconciler reconciles a ObjectStorage object +type ObjectStorageReconciler struct { client.Client Scheme *runtime.Scheme } -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=largemessagestores,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=largemessagestores/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=largemessagestores/finalizers,verbs=update +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages/finalizers,verbs=update // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // TODO(user): Modify the Reconcile function to compare the state specified by -// the LargeMessageStore object against the actual cluster state, and then +// the ObjectStorage object against the actual cluster state, and then // perform operations to make the cluster state reflect the state specified by // the user. // // For more details, check Reconcile and its Result here: // - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile -func (r *LargeMessageStoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "LargeMessageStore")).Inc() - defer recordInstrumentionData(time.Now(), req, "controller", "LargeMessageStore") +func (r *ObjectStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "ObjectStorage")).Inc() + defer recordInstrumentionData(time.Now(), req, "controller", "ObjectStorage") reqLogger := log.FromContext(ctx) - reqLogger = reqLogger.WithValues("largemessagestore", req.NamespacedName) + reqLogger = reqLogger.WithValues("objectstorage", req.NamespacedName) - // Fetch the LargeMessageStore - instance := &enterpriseApi.LargeMessageStore{} + // Fetch the ObjectStorage + instance := &enterpriseApi.ObjectStorage{} err := r.Get(ctx, req.NamespacedName, instance) if err != nil { if k8serrors.IsNotFound(err) { @@ -74,20 +74,20 @@ func (r *LargeMessageStoreReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, nil } // Error reading the object - requeue the request. - return ctrl.Result{}, errors.Wrap(err, "could not load largemessagestore data") + return ctrl.Result{}, errors.Wrap(err, "could not load objectstorage data") } // If the reconciliation is paused, requeue annotations := instance.GetAnnotations() if annotations != nil { - if _, ok := annotations[enterpriseApi.LargeMessageStorePausedAnnotation]; ok { + if _, ok := annotations[enterpriseApi.ObjectStoragePausedAnnotation]; ok { return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil } } reqLogger.Info("start", "CR version", instance.GetResourceVersion()) - result, err := ApplyLargeMessageStore(ctx, r.Client, instance) + result, err := ApplyObjectStorage(ctx, r.Client, instance) if result.Requeue && result.RequeueAfter != 0 { reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) } @@ -95,14 +95,14 @@ func (r *LargeMessageStoreReconciler) Reconcile(ctx context.Context, req ctrl.Re return result, err } -var ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { - return enterprise.ApplyLargeMessageStore(ctx, client, instance) +var ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { + return enterprise.ApplyObjectStorage(ctx, client, instance) } // SetupWithManager sets up the controller with the Manager. -func (r *LargeMessageStoreReconciler) SetupWithManager(mgr ctrl.Manager) error { +func (r *ObjectStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). - For(&enterpriseApi.LargeMessageStore{}). + For(&enterpriseApi.ObjectStorage{}). WithEventFilter(predicate.Or( common.GenerationChangedPredicate(), common.AnnotationChangedPredicate(), diff --git a/internal/controller/largemessagestore_controller_test.go b/internal/controller/objectstorage_controller_test.go similarity index 51% rename from internal/controller/largemessagestore_controller_test.go rename to internal/controller/objectstorage_controller_test.go index 5d85d4409..6d7dec87a 100644 --- a/internal/controller/largemessagestore_controller_test.go +++ b/internal/controller/objectstorage_controller_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -var _ = Describe("LargeMessageStore Controller", func() { +var _ = Describe("ObjectStorage Controller", func() { BeforeEach(func() { time.Sleep(2 * time.Second) }) @@ -43,53 +43,53 @@ var _ = Describe("LargeMessageStore Controller", func() { }) - Context("LargeMessageStore Management", func() { + Context("ObjectStorage Management", func() { - It("Get LargeMessageStore custom resource should fail", func() { - namespace := "ns-splunk-largemessagestore-1" - ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + It("Get ObjectStorage custom resource should fail", func() { + namespace := "ns-splunk-objectstorage-1" + ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - _, err := GetLargeMessageStore("test", nsSpecs.Name) - Expect(err.Error()).Should(Equal("largemessagestores.enterprise.splunk.com \"test\" not found")) + _, err := GetObjectStorage("test", nsSpecs.Name) + Expect(err.Error()).Should(Equal("objectstorages.enterprise.splunk.com \"test\" not found")) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) - It("Create LargeMessageStore custom resource with annotations should pause", func() { - namespace := "ns-splunk-largemessagestore-2" + It("Create ObjectStorage custom resource with annotations should pause", func() { + namespace := "ns-splunk-objectstorage-2" annotations := make(map[string]string) - annotations[enterpriseApi.LargeMessageStorePausedAnnotation] = "" - ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + annotations[enterpriseApi.ObjectStoragePausedAnnotation] = "" + ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - spec := enterpriseApi.LargeMessageStoreSpec{ + spec := enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", Path: "s3://ingestion/smartbus-test", }, } - CreateLargeMessageStore("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - icSpec, _ := GetLargeMessageStore("test", nsSpecs.Name) + CreateObjectStorage("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + osSpec, _ := GetObjectStorage("test", nsSpecs.Name) annotations = map[string]string{} - icSpec.Annotations = annotations - icSpec.Status.Phase = "Ready" - UpdateLargeMessageStore(icSpec, enterpriseApi.PhaseReady, spec) - DeleteLargeMessageStore("test", nsSpecs.Name) + osSpec.Annotations = annotations + osSpec.Status.Phase = "Ready" + UpdateObjectStorage(osSpec, enterpriseApi.PhaseReady, spec) + DeleteObjectStorage("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) - It("Create LargeMessageStore custom resource should succeeded", func() { - namespace := "ns-splunk-largemessagestore-3" - ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + It("Create ObjectStorage custom resource should succeeded", func() { + namespace := "ns-splunk-objectstorage-3" + ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -97,21 +97,21 @@ var _ = Describe("LargeMessageStore Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) annotations := make(map[string]string) - spec := enterpriseApi.LargeMessageStoreSpec{ + spec := enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", Path: "s3://ingestion/smartbus-test", }, } - CreateLargeMessageStore("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - DeleteLargeMessageStore("test", nsSpecs.Name) + CreateObjectStorage("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) + DeleteObjectStorage("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) It("Cover Unused methods", func() { - namespace := "ns-splunk-largemessagestore-4" - ApplyLargeMessageStore = func(ctx context.Context, client client.Client, instance *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { + namespace := "ns-splunk-objectstorage-4" + ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { return reconcile.Result{}, nil } nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} @@ -121,7 +121,7 @@ var _ = Describe("LargeMessageStore Controller", func() { ctx := context.TODO() builder := fake.NewClientBuilder() c := builder.Build() - instance := LargeMessageStoreReconciler{ + instance := ObjectStorageReconciler{ Client: c, Scheme: scheme.Scheme, } @@ -134,32 +134,32 @@ var _ = Describe("LargeMessageStore Controller", func() { _, err := instance.Reconcile(ctx, request) Expect(err).ToNot(HaveOccurred()) - spec := enterpriseApi.LargeMessageStoreSpec{ + spec := enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", Path: "s3://ingestion/smartbus-test", }, } - lmsSpec := testutils.NewLargeMessageStore("test", namespace, spec) - Expect(c.Create(ctx, lmsSpec)).Should(Succeed()) + osSpec := testutils.NewObjectStorage("test", namespace, spec) + Expect(c.Create(ctx, osSpec)).Should(Succeed()) annotations := make(map[string]string) - annotations[enterpriseApi.LargeMessageStorePausedAnnotation] = "" - lmsSpec.Annotations = annotations - Expect(c.Update(ctx, lmsSpec)).Should(Succeed()) + annotations[enterpriseApi.ObjectStoragePausedAnnotation] = "" + osSpec.Annotations = annotations + Expect(c.Update(ctx, osSpec)).Should(Succeed()) _, err = instance.Reconcile(ctx, request) Expect(err).ToNot(HaveOccurred()) annotations = map[string]string{} - lmsSpec.Annotations = annotations - Expect(c.Update(ctx, lmsSpec)).Should(Succeed()) + osSpec.Annotations = annotations + Expect(c.Update(ctx, osSpec)).Should(Succeed()) _, err = instance.Reconcile(ctx, request) Expect(err).ToNot(HaveOccurred()) - lmsSpec.DeletionTimestamp = &metav1.Time{} + osSpec.DeletionTimestamp = &metav1.Time{} _, err = instance.Reconcile(ctx, request) Expect(err).ToNot(HaveOccurred()) }) @@ -167,31 +167,30 @@ var _ = Describe("LargeMessageStore Controller", func() { }) }) -func GetLargeMessageStore(name string, namespace string) (*enterpriseApi.LargeMessageStore, error) { - By("Expecting LargeMessageStore custom resource to be retrieved successfully") +func GetObjectStorage(name string, namespace string) (*enterpriseApi.ObjectStorage, error) { + By("Expecting ObjectStorage custom resource to be retrieved successfully") key := types.NamespacedName{ Name: name, Namespace: namespace, } - lms := &enterpriseApi.LargeMessageStore{} + os := &enterpriseApi.ObjectStorage{} - err := k8sClient.Get(context.Background(), key, lms) + err := k8sClient.Get(context.Background(), key, os) if err != nil { return nil, err } - return lms, err + return os, err } -func CreateLargeMessageStore(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.LargeMessageStoreSpec) *enterpriseApi.LargeMessageStore { - By("Expecting LargeMessageStore custom resource to be created successfully") - +func CreateObjectStorage(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { + By("Expecting ObjectStorage custom resource to be created successfully") key := types.NamespacedName{ Name: name, Namespace: namespace, } - lmsSpec := &enterpriseApi.LargeMessageStore{ + osSpec := &enterpriseApi.ObjectStorage{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -200,64 +199,62 @@ func CreateLargeMessageStore(name string, namespace string, annotations map[stri Spec: spec, } - Expect(k8sClient.Create(context.Background(), lmsSpec)).Should(Succeed()) + Expect(k8sClient.Create(context.Background(), osSpec)).Should(Succeed()) time.Sleep(2 * time.Second) - lms := &enterpriseApi.LargeMessageStore{} + os := &enterpriseApi.ObjectStorage{} Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, lms) + _ = k8sClient.Get(context.Background(), key, os) if status != "" { fmt.Printf("status is set to %v", status) - lms.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), lms)).Should(Succeed()) + os.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), os)).Should(Succeed()) time.Sleep(2 * time.Second) } return true }, timeout, interval).Should(BeTrue()) - return lms + return os } -func UpdateLargeMessageStore(instance *enterpriseApi.LargeMessageStore, status enterpriseApi.Phase, spec enterpriseApi.LargeMessageStoreSpec) *enterpriseApi.LargeMessageStore { - By("Expecting LargeMessageStore custom resource to be updated successfully") - +func UpdateObjectStorage(instance *enterpriseApi.ObjectStorage, status enterpriseApi.Phase, spec enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { + By("Expecting ObjectStorage custom resource to be updated successfully") key := types.NamespacedName{ Name: instance.Name, Namespace: instance.Namespace, } - lmsSpec := testutils.NewLargeMessageStore(instance.Name, instance.Namespace, spec) - lmsSpec.ResourceVersion = instance.ResourceVersion - Expect(k8sClient.Update(context.Background(), lmsSpec)).Should(Succeed()) + osSpec := testutils.NewObjectStorage(instance.Name, instance.Namespace, spec) + osSpec.ResourceVersion = instance.ResourceVersion + Expect(k8sClient.Update(context.Background(), osSpec)).Should(Succeed()) time.Sleep(2 * time.Second) - lms := &enterpriseApi.LargeMessageStore{} + os := &enterpriseApi.ObjectStorage{} Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, lms) + _ = k8sClient.Get(context.Background(), key, os) if status != "" { fmt.Printf("status is set to %v", status) - lms.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), lms)).Should(Succeed()) + os.Status.Phase = status + Expect(k8sClient.Status().Update(context.Background(), os)).Should(Succeed()) time.Sleep(2 * time.Second) } return true }, timeout, interval).Should(BeTrue()) - return lms + return os } -func DeleteLargeMessageStore(name string, namespace string) { - By("Expecting LargeMessageStore custom resource to be deleted successfully") - +func DeleteObjectStorage(name string, namespace string) { + By("Expecting ObjectStorage custom resource to be deleted successfully") key := types.NamespacedName{ Name: name, Namespace: namespace, } Eventually(func() error { - lms := &enterpriseApi.LargeMessageStore{} - _ = k8sClient.Get(context.Background(), key, lms) - err := k8sClient.Delete(context.Background(), lms) + os := &enterpriseApi.ObjectStorage{} + _ = k8sClient.Get(context.Background(), key, os) + err := k8sClient.Delete(context.Background(), os) return err }, timeout, interval).Should(Succeed()) } diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index eda9f320d..8454d15b5 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -128,7 +128,7 @@ var _ = BeforeSuite(func(ctx context.Context) { }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&LargeMessageStoreReconciler{ + if err := (&ObjectStorageReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), }).SetupWithManager(k8sManager); err != nil { diff --git a/internal/controller/testutils/new.go b/internal/controller/testutils/new.go index b5b620337..aa47e8092 100644 --- a/internal/controller/testutils/new.go +++ b/internal/controller/testutils/new.go @@ -69,9 +69,9 @@ func NewQueue(name, ns string, spec enterpriseApi.QueueSpec) *enterpriseApi.Queu } } -// NewLargeMessageStore returns new LargeMessageStore instance with its config hash -func NewLargeMessageStore(name, ns string, spec enterpriseApi.LargeMessageStoreSpec) *enterpriseApi.LargeMessageStore { - return &enterpriseApi.LargeMessageStore{ +// NewObjectStorage returns new ObjectStorage instance with its config hash +func NewObjectStorage(name, ns string, spec enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { + return &enterpriseApi.ObjectStorage{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, Spec: spec, } diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml index 2b0596fdd..41f4ea2aa 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml @@ -17,9 +17,9 @@ status: --- # assert for large message store custom resource to be ready apiVersion: enterprise.splunk.com/v4 -kind: LargeMessageStore +kind: ObjectStorage metadata: - name: lms + name: os spec: provider: s3 s3: @@ -72,7 +72,7 @@ status: region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test - largeMessageStore: + objectStorage: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com @@ -113,7 +113,7 @@ status: region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test - largeMessageStore: + objectStorage: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index 57e6c4c68..00ff26a56 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -17,7 +17,7 @@ status: region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test - largeMessageStore: + objectStorage: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index 1e8af1663..d05cb5bcf 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -15,9 +15,9 @@ queue: endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test -largeMessageStore: +objectStorage: enabled: true - name: lms + name: os provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com @@ -29,8 +29,8 @@ ingestorCluster: replicaCount: 3 queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os clusterManager: enabled: true @@ -45,5 +45,5 @@ indexerCluster: name: cm queueRef: name: queue - largeMessageStoreRef: - name: lms + objectStorageRef: + name: os diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 5e468196c..f6bcd046d 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -269,26 +269,26 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } // Large Message Store - lms := enterpriseApi.LargeMessageStore{} - if cr.Spec.LargeMessageStoreRef.Name != "" { + os := enterpriseApi.ObjectStorage{} + if cr.Spec.ObjectStorageRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.LargeMessageStoreRef.Namespace != "" { - ns = cr.Spec.LargeMessageStoreRef.Namespace + if cr.Spec.ObjectStorageRef.Namespace != "" { + ns = cr.Spec.ObjectStorageRef.Namespace } err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.LargeMessageStoreRef.Name, + Name: cr.Spec.ObjectStorageRef.Name, Namespace: ns, - }, &lms) + }, &os) if err != nil { return result, err } } // Can not override original large message store spec due to comparison in the later code - lmsCopy := lms - if lmsCopy.Spec.Provider == "s3" { - if lmsCopy.Spec.S3.Endpoint == "" { - lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.Region) + osCopy := os + if osCopy.Spec.Provider == "s3" { + if osCopy.Spec.S3.Endpoint == "" { + osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.Region) } } @@ -297,7 +297,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullQueueChange(ctx, cr, queueCopy, lmsCopy, client) + err = mgr.handlePullQueueChange(ctx, cr, queueCopy, osCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -592,14 +592,14 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } // Large Message Store - lms := enterpriseApi.LargeMessageStore{} - if cr.Spec.LargeMessageStoreRef.Name != "" { + os := enterpriseApi.ObjectStorage{} + if cr.Spec.ObjectStorageRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.LargeMessageStoreRef.Namespace != "" { - ns = cr.Spec.LargeMessageStoreRef.Namespace + if cr.Spec.ObjectStorageRef.Namespace != "" { + ns = cr.Spec.ObjectStorageRef.Namespace } err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.LargeMessageStoreRef.Name, + Name: cr.Spec.ObjectStorageRef.Name, Namespace: ns, }, &queue) if err != nil { @@ -608,10 +608,10 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } // Can not override original queue spec due to comparison in the later code - lmsCopy := lms - if lmsCopy.Spec.Provider == "s3" { - if lmsCopy.Spec.S3.Endpoint == "" { - lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.Region) + osCopy := os + if osCopy.Spec.Provider == "s3" { + if osCopy.Spec.S3.Endpoint == "" { + osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.Region) } } @@ -620,7 +620,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePullQueueChange(ctx, cr, queueCopy, lmsCopy, client) + err = mgr.handlePullQueueChange(ctx, cr, queueCopy, osCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -1297,7 +1297,7 @@ func getSiteName(ctx context.Context, c splcommon.ControllerClient, cr *enterpri var newSplunkClientForQueuePipeline = splclient.NewSplunkClient // Checks if only PullQueue or Pipeline config changed, and updates the conf file if so -func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue enterpriseApi.Queue, lms enterpriseApi.LargeMessageStore, k8s rclient.Client) error { +func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue enterpriseApi.Queue, os enterpriseApi.ObjectStorage, k8s rclient.Client) error { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("handlePullQueueChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) @@ -1327,7 +1327,7 @@ func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, afterDelete = true } - queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &lms, newCR, afterDelete) + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCR, afterDelete) for _, pbVal := range queueChangedFieldsOutputs { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { @@ -1353,22 +1353,22 @@ func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, } // getChangedQueueFieldsForIndexer returns a list of changed queue and pipeline fields for indexer pods -func getChangedQueueFieldsForIndexer(queue *enterpriseApi.Queue, lms *enterpriseApi.LargeMessageStore, queueIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields [][]string) { +func getChangedQueueFieldsForIndexer(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool) (queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields [][]string) { // Compare queue fields - oldPB := queueIndexerStatus.Status.Queue - if oldPB == nil { - oldPB = &enterpriseApi.QueueSpec{} + oldQueue := queueIndexerStatus.Status.Queue + if oldQueue == nil { + oldQueue = &enterpriseApi.QueueSpec{} } - newPB := queue.Spec + newQueue := queue.Spec - oldLMS := queueIndexerStatus.Status.LargeMessageStore - if oldLMS == nil { - oldLMS = &enterpriseApi.LargeMessageStoreSpec{} + oldOS := queueIndexerStatus.Status.ObjectStorage + if oldOS == nil { + oldOS = &enterpriseApi.ObjectStorageSpec{} } - newLMS := lms.Spec + newOS := os.Spec // Push all queue fields - queueChangedFieldsInputs, queueChangedFieldsOutputs = pullQueueChanged(oldPB, &newPB, oldLMS, &newLMS, afterDelete) + queueChangedFieldsInputs, queueChangedFieldsOutputs = pullQueueChanged(oldQueue, &newQueue, oldOS, &newOS, afterDelete) // Always set all pipeline fields, not just changed ones pipelineChangedFields = pipelineConfig(true) @@ -1386,14 +1386,14 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { return strings.HasPrefix(previousVersion, "8") && strings.HasPrefix(currentVersion, "9") } -func pullQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (inputs, outputs [][]string) { +func pullQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS *enterpriseApi.ObjectStorageSpec, afterDelete bool) (inputs, outputs [][]string) { queueProvider := "" if newQueue.Provider == "sqs" { queueProvider = "sqs_smartbus" } - lmsProvider := "" - if newLMS.Provider == "s3" { - lmsProvider = "sqs_smartbus" + osProvider := "" + if newOS.Provider == "s3" { + osProvider = "sqs_smartbus" } if oldQueue.Provider != newQueue.Provider || afterDelete { @@ -1405,11 +1405,11 @@ func pullQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldLMS, newLM if oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) } - if oldLMS.S3.Endpoint != newLMS.S3.Endpoint || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", lmsProvider), newLMS.S3.Endpoint}) + if oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), newOS.S3.Endpoint}) } - if oldLMS.S3.Path != newLMS.S3.Path || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", lmsProvider), newLMS.S3.Path}) + if oldOS.S3.Path != newOS.S3.Path || afterDelete { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), newOS.S3.Path}) } if oldQueue.SQS.DLQ != newQueue.SQS.DLQ || afterDelete { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), newQueue.SQS.DLQ}) diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 4c166c8e0..c2b3a8063 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2067,15 +2067,15 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { }, } - lms := enterpriseApi.LargeMessageStore{ + os := enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ - Kind: "LargeMessageStore", + Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "lms", + Name: "os", }, - Spec: enterpriseApi.LargeMessageStoreSpec{ + Spec: enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", @@ -2089,20 +2089,20 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { QueueRef: corev1.ObjectReference{ Name: queue.Name, }, - LargeMessageStoreRef: corev1.ObjectReference{ - Name: lms.Name, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, }, }, } - queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &lms, newCR, false) + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCR, false) assert.Equal(t, 8, len(queueChangedFieldsInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, @@ -2113,8 +2113,8 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { {"remote_queue.type", provider}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, @@ -2156,16 +2156,16 @@ func TestHandlePullQueueChange(t *testing.T) { }, } - lms := enterpriseApi.LargeMessageStore{ + os := enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ - Kind: "LargeMessageStore", + Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "lms", + Name: "os", Namespace: "test", }, - Spec: enterpriseApi.LargeMessageStoreSpec{ + Spec: enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", @@ -2186,15 +2186,15 @@ func TestHandlePullQueueChange(t *testing.T) { QueueRef: corev1.ObjectReference{ Name: queue.Name, }, - LargeMessageStoreRef: corev1.ObjectReference{ - Name: lms.Name, - Namespace: lms.Namespace, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, }, }, Status: enterpriseApi.IndexerClusterStatus{ ReadyReplicas: 3, Queue: &enterpriseApi.QueueSpec{}, - LargeMessageStore: &enterpriseApi.LargeMessageStoreSpec{}, + ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, }, } @@ -2252,7 +2252,7 @@ func TestHandlePullQueueChange(t *testing.T) { c := spltest.NewMockClient() ctx := context.TODO() c.Create(ctx, &queue) - c.Create(ctx, &lms) + c.Create(ctx, &os) c.Create(ctx, newCR) c.Create(ctx, pod0) c.Create(ctx, pod1) @@ -2260,7 +2260,7 @@ func TestHandlePullQueueChange(t *testing.T) { // Negative test case: secret not found mgr := &indexerClusterPodManager{} - err := mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) + err := mgr.handlePullQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // Mock secret @@ -2271,15 +2271,15 @@ func TestHandlePullQueueChange(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // outputs.conf propertyKVList := [][]string{ {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, @@ -2295,7 +2295,7 @@ func TestHandlePullQueueChange(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // inputs.conf @@ -2305,7 +2305,7 @@ func TestHandlePullQueueChange(t *testing.T) { // Negative test case: failure in updating remote queue stanza mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // default-mode.conf @@ -2333,7 +2333,7 @@ func TestHandlePullQueueChange(t *testing.T) { mgr = newTestPullQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, lms, c) + err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) assert.Nil(t, err) } diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 299aa8d0c..17cd14a44 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -235,26 +235,26 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } // Large Message Store - lms := enterpriseApi.LargeMessageStore{} - if cr.Spec.LargeMessageStoreRef.Name != "" { + os := enterpriseApi.ObjectStorage{} + if cr.Spec.ObjectStorageRef.Name != "" { ns := cr.GetNamespace() - if cr.Spec.LargeMessageStoreRef.Namespace != "" { - ns = cr.Spec.LargeMessageStoreRef.Namespace + if cr.Spec.ObjectStorageRef.Namespace != "" { + ns = cr.Spec.ObjectStorageRef.Namespace } err = client.Get(context.Background(), types.NamespacedName{ - Name: cr.Spec.LargeMessageStoreRef.Name, + Name: cr.Spec.ObjectStorageRef.Name, Namespace: ns, - }, &lms) + }, &os) if err != nil { return result, err } } // Can not override original queue spec due to comparison in the later code - lmsCopy := lms - if lmsCopy.Spec.Provider == "s3" { - if lmsCopy.Spec.S3.Endpoint == "" { - lmsCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.Region) + osCopy := os + if osCopy.Spec.Provider == "s3" { + if osCopy.Spec.S3.Endpoint == "" { + osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.Region) } } @@ -262,7 +262,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr if !reflect.DeepEqual(cr.Status.Queue, queue.Spec) { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) - err = mgr.handlePushQueueChange(ctx, cr, queueCopy, lmsCopy, client) + err = mgr.handlePushQueueChange(ctx, cr, queueCopy, osCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -343,7 +343,7 @@ func getIngestorStatefulSet(ctx context.Context, client splcommon.ControllerClie } // Checks if only Queue or Pipeline config changed, and updates the conf file if so -func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue enterpriseApi.Queue, lms enterpriseApi.LargeMessageStore, k8s client.Client) error { +func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue enterpriseApi.Queue, os enterpriseApi.ObjectStorage, k8s client.Client) error { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("handlePushQueueChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) @@ -370,7 +370,7 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, afterDelete = true } - queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &lms, newCR, afterDelete) + queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCR, afterDelete) for _, pbVal := range queueChangedFields { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { @@ -390,21 +390,20 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, } // getChangedQueueFieldsForIngestor returns a list of changed queue and pipeline fields for ingestor pods -func getChangedQueueFieldsForIngestor(queue *enterpriseApi.Queue, lms *enterpriseApi.LargeMessageStore, queueIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (queueChangedFields, pipelineChangedFields [][]string) { - oldPB := queueIngestorStatus.Status.Queue - if oldPB == nil { - oldPB = &enterpriseApi.QueueSpec{} +func getChangedQueueFieldsForIngestor(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool) (queueChangedFields, pipelineChangedFields [][]string) { + oldQueue := queueIngestorStatus.Status.Queue + if oldQueue == nil { + oldQueue = &enterpriseApi.QueueSpec{} } - newPB := &queue.Spec + newQueue := &queue.Spec - oldLMS := queueIngestorStatus.Status.LargeMessageStore - if oldLMS == nil { - oldLMS = &enterpriseApi.LargeMessageStoreSpec{} + oldOS := queueIngestorStatus.Status.ObjectStorage + if oldOS == nil { + oldOS = &enterpriseApi.ObjectStorageSpec{} } - newLMS := &lms.Spec - + newOS := &os.Spec // Push changed queue fields - queueChangedFields = pushQueueChanged(oldPB, newPB, oldLMS, newLMS, afterDelete) + queueChangedFields = pushQueueChanged(oldQueue, newQueue, oldOS, newOS, afterDelete) // Always changed pipeline fields pipelineChangedFields = pipelineConfig(false) @@ -443,14 +442,14 @@ func pipelineConfig(isIndexer bool) (output [][]string) { return output } -func pushQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldLMS, newLMS *enterpriseApi.LargeMessageStoreSpec, afterDelete bool) (output [][]string) { +func pushQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS *enterpriseApi.ObjectStorageSpec, afterDelete bool) (output [][]string) { queueProvider := "" if newQueue.Provider == "sqs" { queueProvider = "sqs_smartbus" } - lmsProvider := "" - if newLMS.Provider == "s3" { - lmsProvider = "sqs_smartbus" + osProvider := "" + if newOS.Provider == "s3" { + osProvider = "sqs_smartbus" } if oldQueue.Provider != newQueue.Provider || afterDelete { @@ -462,11 +461,11 @@ func pushQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldLMS, newLM if oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete { output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) } - if oldLMS.S3.Endpoint != newLMS.S3.Endpoint || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", lmsProvider), newLMS.S3.Endpoint}) + if oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), newOS.S3.Endpoint}) } - if oldLMS.S3.Path != newLMS.S3.Path || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", lmsProvider), newLMS.S3.Path}) + if oldOS.S3.Path != newOS.S3.Path || afterDelete { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), newOS.S3.Path}) } if oldQueue.SQS.DLQ != newQueue.SQS.DLQ || afterDelete { output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), newQueue.SQS.DLQ}) diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index 424806846..7bf69ac84 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -86,16 +86,16 @@ func TestApplyIngestorCluster(t *testing.T) { } c.Create(ctx, queue) - lms := enterpriseApi.LargeMessageStore{ + os := enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ - Kind: "LargeMessageStore", + Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "lms", + Name: "os", Namespace: "test", }, - Spec: enterpriseApi.LargeMessageStoreSpec{ + Spec: enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", @@ -103,7 +103,7 @@ func TestApplyIngestorCluster(t *testing.T) { }, }, } - c.Create(ctx, &lms) + c.Create(ctx, &os) cr := &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ @@ -123,9 +123,9 @@ func TestApplyIngestorCluster(t *testing.T) { Name: queue.Name, Namespace: queue.Namespace, }, - LargeMessageStoreRef: corev1.ObjectReference{ - Name: lms.Name, - Namespace: lms.Namespace, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, }, }, } @@ -287,8 +287,8 @@ func TestApplyIngestorCluster(t *testing.T) { {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, @@ -438,15 +438,15 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { }, } - lms := enterpriseApi.LargeMessageStore{ + os := enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ - Kind: "LargeMessageStore", + Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "lms", + Name: "os", }, - Spec: enterpriseApi.LargeMessageStoreSpec{ + Spec: enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", @@ -460,22 +460,22 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { QueueRef: corev1.ObjectReference{ Name: queue.Name, }, - LargeMessageStoreRef: corev1.ObjectReference{ - Name: lms.Name, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, }, }, Status: enterpriseApi.IngestorClusterStatus{}, } - queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &lms, newCR, false) + queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCR, false) assert.Equal(t, 10, len(queueChangedFields)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, @@ -517,15 +517,15 @@ func TestHandlePushQueueChange(t *testing.T) { }, } - lms := enterpriseApi.LargeMessageStore{ + os := enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ - Kind: "LargeMessageStore", + Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "lms", + Name: "os", }, - Spec: enterpriseApi.LargeMessageStoreSpec{ + Spec: enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", @@ -546,15 +546,15 @@ func TestHandlePushQueueChange(t *testing.T) { QueueRef: corev1.ObjectReference{ Name: queue.Name, }, - LargeMessageStoreRef: corev1.ObjectReference{ - Name: lms.Name, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, }, }, Status: enterpriseApi.IngestorClusterStatus{ Replicas: 3, ReadyReplicas: 3, Queue: &enterpriseApi.QueueSpec{}, - LargeMessageStore: &enterpriseApi.LargeMessageStoreSpec{}, + ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, }, } @@ -618,7 +618,7 @@ func TestHandlePushQueueChange(t *testing.T) { // Negative test case: secret not found mgr := &ingestorClusterPodManager{} - err := mgr.handlePushQueueChange(ctx, newCR, queue, lms, c) + err := mgr.handlePushQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // Mock secret @@ -629,7 +629,7 @@ func TestHandlePushQueueChange(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestPushQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushQueueChange(ctx, newCR, queue, lms, c) + err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // outputs.conf @@ -637,8 +637,8 @@ func TestHandlePushQueueChange(t *testing.T) { {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), lms.Spec.S3.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), lms.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.max_count.%s.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, @@ -651,7 +651,7 @@ func TestHandlePushQueueChange(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestPushQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushQueueChange(ctx, newCR, queue, lms, c) + err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) assert.NotNil(t, err) // default-mode.conf @@ -680,7 +680,7 @@ func TestHandlePushQueueChange(t *testing.T) { mgr = newTestPushQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushQueueChange(ctx, newCR, queue, lms, c) + err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) assert.Nil(t, err) } diff --git a/pkg/splunk/enterprise/largemessagestore.go b/pkg/splunk/enterprise/objectstorage.go similarity index 89% rename from pkg/splunk/enterprise/largemessagestore.go rename to pkg/splunk/enterprise/objectstorage.go index 8e6ff93f5..4db3dcaee 100644 --- a/pkg/splunk/enterprise/largemessagestore.go +++ b/pkg/splunk/enterprise/objectstorage.go @@ -27,8 +27,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ) -// ApplyLargeMessageStore reconciles the state of an IngestorCluster custom resource -func ApplyLargeMessageStore(ctx context.Context, client client.Client, cr *enterpriseApi.LargeMessageStore) (reconcile.Result, error) { +// ApplyObjectStorage reconciles the state of an IngestorCluster custom resource +func ApplyObjectStorage(ctx context.Context, client client.Client, cr *enterpriseApi.ObjectStorage) (reconcile.Result, error) { var err error // Unless modified, reconcile for this object will be requeued after 5 seconds @@ -44,7 +44,7 @@ func ApplyLargeMessageStore(ctx context.Context, client client.Client, cr *enter eventPublisher, _ := newK8EventPublisher(client, cr) ctx = context.WithValue(ctx, splcommon.EventPublisherKey, eventPublisher) - cr.Kind = "LargeMessageStore" + cr.Kind = "ObjectStorage" // Initialize phase cr.Status.Phase = enterpriseApi.PhaseError diff --git a/pkg/splunk/enterprise/largemessagestore_test.go b/pkg/splunk/enterprise/objectstorage_test.go similarity index 82% rename from pkg/splunk/enterprise/largemessagestore_test.go rename to pkg/splunk/enterprise/objectstorage_test.go index 0f627383c..a3511af69 100644 --- a/pkg/splunk/enterprise/largemessagestore_test.go +++ b/pkg/splunk/enterprise/objectstorage_test.go @@ -43,7 +43,7 @@ func init() { } } -func TestApplyLargeMessageStore(t *testing.T) { +func TestApplyObjectStorage(t *testing.T) { os.Setenv("SPLUNK_GENERAL_TERMS", "--accept-sgt-current-at-splunk-com") ctx := context.TODO() @@ -55,16 +55,16 @@ func TestApplyLargeMessageStore(t *testing.T) { c := fake.NewClientBuilder().WithScheme(scheme).Build() // Object definitions - lms := &enterpriseApi.LargeMessageStore{ + os := &enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ - Kind: "LargeMessageStore", + Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", }, ObjectMeta: metav1.ObjectMeta{ - Name: "lms", + Name: "os", Namespace: "test", }, - Spec: enterpriseApi.LargeMessageStoreSpec{ + Spec: enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", @@ -72,12 +72,12 @@ func TestApplyLargeMessageStore(t *testing.T) { }, }, } - c.Create(ctx, lms) + c.Create(ctx, os) - // ApplyLargeMessageStore - result, err := ApplyLargeMessageStore(ctx, c, lms) + // ApplyObjectStorage + result, err := ApplyObjectStorage(ctx, c, os) assert.NoError(t, err) assert.True(t, result.Requeue) - assert.NotEqual(t, enterpriseApi.PhaseError, lms.Status.Phase) - assert.Equal(t, enterpriseApi.PhaseReady, lms.Status.Phase) + assert.NotEqual(t, enterpriseApi.PhaseError, os.Status.Phase) + assert.Equal(t, enterpriseApi.PhaseReady, os.Status.Phase) } diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go index b7b691415..fe96430e4 100644 --- a/pkg/splunk/enterprise/types.go +++ b/pkg/splunk/enterprise/types.go @@ -66,8 +66,8 @@ const ( // SplunkQueue is the queue instance SplunkQueue InstanceType = "queue" - // SplunkLargeMessageStore is the large message store instance - SplunkLargeMessageStore InstanceType = "large-message-store" + // SplunkObjectStorage is the large message store instance + SplunkObjectStorage InstanceType = "object-storage" // SplunkDeployer is an instance that distributes baseline configurations and apps to search head cluster members SplunkDeployer InstanceType = "deployer" @@ -299,8 +299,8 @@ func KindToInstanceString(kind string) string { return SplunkIngestor.ToString() case "Queue": return SplunkQueue.ToString() - case "LargeMessageStore": - return SplunkLargeMessageStore.ToString() + case "ObjectStorage": + return SplunkObjectStorage.ToString() case "LicenseManager": return SplunkLicenseManager.ToString() case "LicenseMaster": diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 01b304c12..afafa6ede 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -2305,19 +2305,19 @@ func fetchCurrentCRWithStatusUpdate(ctx context.Context, client splcommon.Contro origCR.(*enterpriseApi.Queue).Status.DeepCopyInto(&latestQueueCR.Status) return latestQueueCR, nil - case "LargeMessageStore": - latestLmsCR := &enterpriseApi.LargeMessageStore{} - err = client.Get(ctx, namespacedName, latestLmsCR) + case "ObjectStorage": + latestOsCR := &enterpriseApi.ObjectStorage{} + err = client.Get(ctx, namespacedName, latestOsCR) if err != nil { return nil, err } - origCR.(*enterpriseApi.LargeMessageStore).Status.Message = "" + origCR.(*enterpriseApi.ObjectStorage).Status.Message = "" if (crError != nil) && ((*crError) != nil) { - origCR.(*enterpriseApi.LargeMessageStore).Status.Message = (*crError).Error() + origCR.(*enterpriseApi.ObjectStorage).Status.Message = (*crError).Error() } - origCR.(*enterpriseApi.LargeMessageStore).Status.DeepCopyInto(&latestLmsCR.Status) - return latestLmsCR, nil + origCR.(*enterpriseApi.ObjectStorage).Status.DeepCopyInto(&latestOsCR.Status) + return latestOsCR, nil case "LicenseMaster": latestLmCR := &enterpriseApiV3.LicenseMaster{} diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index 687473bc0..e2e27d268 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -48,7 +48,7 @@ var ( DLQ: "test-dead-letter-queue", }, } - lms = enterpriseApi.LargeMessageStoreSpec{ + objectStorage = enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index a27269889..41beae4bc 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -84,14 +84,14 @@ var _ = Describe("indingsep test", func() { q, err := deployment.DeployQueue(ctx, "queue", queue) Expect(err).To(Succeed(), "Unable to deploy Queue") - // Deploy LargeMessageStore - testcaseEnvInst.Log.Info("Deploy LargeMessageStore") - lm, err := deployment.DeployLargeMessageStore(ctx, "lms", lms) - Expect(err).To(Succeed(), "Unable to deploy LargeMessageStore") + // Deploy ObjectStorage + testcaseEnvInst.Log.Info("Deploy ObjectStorage") + objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) + Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -101,7 +101,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -137,12 +137,12 @@ var _ = Describe("indingsep test", func() { err = deployment.DeleteCR(ctx, queue) Expect(err).To(Succeed(), "Unable to delete Queue", "Queue Name", queue) - // Delete the LargeMessageStore - lm = &enterpriseApi.LargeMessageStore{} - err = deployment.GetInstance(ctx, "lms", lm) - Expect(err).To(Succeed(), "Unable to get LargeMessageStore instance", "LargeMessageStore Name", lm) - err = deployment.DeleteCR(ctx, lm) - Expect(err).To(Succeed(), "Unable to delete LargeMessageStore", "LargeMessageStore Name", lm) + // Delete the ObjectStorage + objStorage = &enterpriseApi.ObjectStorage{} + err = deployment.GetInstance(ctx, "os", objStorage) + Expect(err).To(Succeed(), "Unable to get ObjectStorage instance", "ObjectStorage Name", objStorage) + err = deployment.DeleteCR(ctx, objStorage) + Expect(err).To(Succeed(), "Unable to delete ObjectStorage", "ObjectStorage Name", objStorage) }) }) @@ -157,10 +157,10 @@ var _ = Describe("indingsep test", func() { q, err := deployment.DeployQueue(ctx, "queue", queue) Expect(err).To(Succeed(), "Unable to deploy Queue") - // Deploy LargeMessageStore - testcaseEnvInst.Log.Info("Deploy LargeMessageStore") - lm, err := deployment.DeployLargeMessageStore(ctx, "lms", lms) - Expect(err).To(Succeed(), "Unable to deploy LargeMessageStore") + // Deploy ObjectStorage + testcaseEnvInst.Log.Info("Deploy ObjectStorage") + objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) + Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") // Upload apps to S3 testcaseEnvInst.Log.Info("Upload apps to S3") @@ -206,7 +206,7 @@ var _ = Describe("indingsep test", func() { }, }, QueueRef: v1.ObjectReference{Name: q.Name}, - LargeMessageStoreRef: v1.ObjectReference{Name: lm.Name}, + ObjectStorageRef: v1.ObjectReference{Name: objStorage.Name}, Replicas: 3, AppFrameworkConfig: appFrameworkSpec, }, @@ -261,14 +261,14 @@ var _ = Describe("indingsep test", func() { q, err := deployment.DeployQueue(ctx, "queue", queue) Expect(err).To(Succeed(), "Unable to deploy Queue") - // Deploy LargeMessageStore - testcaseEnvInst.Log.Info("Deploy LargeMessageStore") - lm, err := deployment.DeployLargeMessageStore(ctx, "lms", lms) - Expect(err).To(Succeed(), "Unable to deploy LargeMessageStore") + // Deploy ObjectStorage + testcaseEnvInst.Log.Info("Deploy ObjectStorage") + objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) + Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -278,7 +278,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -368,14 +368,14 @@ var _ = Describe("indingsep test", func() { q, err := deployment.DeployQueue(ctx, "queue", queue) Expect(err).To(Succeed(), "Unable to deploy Queue") - // Deploy LargeMessageStore - testcaseEnvInst.Log.Info("Deploy LargeMessageStore") - lm, err := deployment.DeployLargeMessageStore(ctx, "lms", lms) - Expect(err).To(Succeed(), "Unable to deploy LargeMessageStore") + // Deploy ObjectStorage + testcaseEnvInst.Log.Info("Deploy ObjectStorage") + objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) + Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -385,7 +385,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: lm.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase diff --git a/test/testenv/deployment.go b/test/testenv/deployment.go index 00d8f1e95..781e5b6f0 100644 --- a/test/testenv/deployment.go +++ b/test/testenv/deployment.go @@ -431,9 +431,9 @@ func (d *Deployment) DeployClusterMasterWithSmartStoreIndexes(ctx context.Contex } // DeployIndexerCluster deploys the indexer cluster -func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, queue, lms corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { +func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseManagerName string, count int, clusterManagerRef string, ansibleConfig string, queue, os corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IndexerCluster, error) { d.testenv.Log.Info("Deploying indexer cluster", "name", name, "CM", clusterManagerRef) - indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, queue, lms, serviceAccountName) + indexer := newIndexerCluster(name, d.testenv.namespace, LicenseManagerName, count, clusterManagerRef, ansibleConfig, d.testenv.splunkImage, queue, os, serviceAccountName) pdata, _ := json.Marshal(indexer) d.testenv.Log.Info("indexer cluster spec", "cr", string(pdata)) deployed, err := d.deployCR(ctx, name, indexer) @@ -445,10 +445,10 @@ func (d *Deployment) DeployIndexerCluster(ctx context.Context, name, LicenseMana } // DeployIngestorCluster deploys the ingestor cluster -func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, queue, lms corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { +func (d *Deployment) DeployIngestorCluster(ctx context.Context, name string, count int, queue, os corev1.ObjectReference, serviceAccountName string) (*enterpriseApi.IngestorCluster, error) { d.testenv.Log.Info("Deploying ingestor cluster", "name", name) - ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, queue, lms, serviceAccountName) + ingestor := newIngestorCluster(name, d.testenv.namespace, count, d.testenv.splunkImage, queue, os, serviceAccountName) pdata, _ := json.Marshal(ingestor) d.testenv.Log.Info("ingestor cluster spec", "cr", string(pdata)) @@ -476,20 +476,20 @@ func (d *Deployment) DeployQueue(ctx context.Context, name string, queue enterpr return deployed.(*enterpriseApi.Queue), err } -// DeployLargeMessageStore deploys the large message store -func (d *Deployment) DeployLargeMessageStore(ctx context.Context, name string, lms enterpriseApi.LargeMessageStoreSpec) (*enterpriseApi.LargeMessageStore, error) { - d.testenv.Log.Info("Deploying large message store", "name", name) +// DeployObjectStorage deploys the object storage +func (d *Deployment) DeployObjectStorage(ctx context.Context, name string, objStorage enterpriseApi.ObjectStorageSpec) (*enterpriseApi.ObjectStorage, error) { + d.testenv.Log.Info("Deploying object storage", "name", name) - lmsCfg := newLargeMessageStore(name, d.testenv.namespace, lms) - pdata, _ := json.Marshal(lmsCfg) + objStorageCfg := newObjectStorage(name, d.testenv.namespace, objStorage) + pdata, _ := json.Marshal(objStorageCfg) - d.testenv.Log.Info("large message store spec", "cr", string(pdata)) - deployed, err := d.deployCR(ctx, name, lmsCfg) + d.testenv.Log.Info("object storage spec", "cr", string(pdata)) + deployed, err := d.deployCR(ctx, name, objStorageCfg) if err != nil { return nil, err } - return deployed.(*enterpriseApi.LargeMessageStore), err + return deployed.(*enterpriseApi.ObjectStorage), err } // DeployIngestorClusterWithAdditionalConfiguration deploys the ingestor cluster with additional configuration @@ -657,13 +657,13 @@ func (d *Deployment) UpdateCR(ctx context.Context, cr client.Object) error { ucr := cr.(*enterpriseApi.Queue) current.Spec = ucr.Spec cobject = current - case "LargeMessageStore": - current := &enterpriseApi.LargeMessageStore{} + case "ObjectStorage": + current := &enterpriseApi.ObjectStorage{} err = d.testenv.GetKubeClient().Get(ctx, namespacedName, current) if err != nil { return err } - ucr := cr.(*enterpriseApi.LargeMessageStore) + ucr := cr.(*enterpriseApi.ObjectStorage) current.Spec = ucr.Spec cobject = current case "ClusterMaster": diff --git a/test/testenv/util.go b/test/testenv/util.go index f71cc31f3..d9c6d5807 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -359,7 +359,7 @@ func newClusterMasterWithGivenIndexes(name, ns, licenseManagerName, ansibleConfi } // newIndexerCluster creates and initialize the CR for IndexerCluster Kind -func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, queue, lms corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { +func newIndexerCluster(name, ns, licenseManagerName string, replicas int, clusterManagerRef, ansibleConfig, splunkImage string, queue, os corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IndexerCluster { licenseMasterRef, licenseManagerRef := swapLicenseManager(name, licenseManagerName) clusterMasterRef, clusterManagerRef := swapClusterManager(name, clusterManagerRef) @@ -398,7 +398,7 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste }, Replicas: int32(replicas), QueueRef: queue, - LargeMessageStoreRef: lms, + ObjectStorageRef: os, }, } @@ -406,7 +406,7 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste } // newIngestorCluster creates and initialize the CR for IngestorCluster Kind -func newIngestorCluster(name, ns string, replicas int, splunkImage string, queue, lms corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { +func newIngestorCluster(name, ns string, replicas int, splunkImage string, queue, os corev1.ObjectReference, serviceAccountName string) *enterpriseApi.IngestorCluster { return &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IngestorCluster", @@ -428,7 +428,7 @@ func newIngestorCluster(name, ns string, replicas int, splunkImage string, queue }, Replicas: int32(replicas), QueueRef: queue, - LargeMessageStoreRef: lms, + ObjectStorageRef: os, }, } } @@ -447,17 +447,17 @@ func newQueue(name, ns string, queue enterpriseApi.QueueSpec) *enterpriseApi.Que } } -// newLargeMessageStore creates and initializes the CR for LargeMessageStore Kind -func newLargeMessageStore(name, ns string, lms enterpriseApi.LargeMessageStoreSpec) *enterpriseApi.LargeMessageStore { - return &enterpriseApi.LargeMessageStore{ +// newObjectStorage creates and initializes the CR for ObjectStorage Kind +func newObjectStorage(name, ns string, objStorage enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { + return &enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ - Kind: "LargeMessageStore", + Kind: "ObjectStorage", }, ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: ns, }, - Spec: lms, + Spec: objStorage, } } From 607632f2c62ea57d9f2e682e4a10c06151135a40 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 19 Dec 2025 10:02:24 +0100 Subject: [PATCH 13/25] CSPL-4358 Making region authRegion and optional, simplifying endpoint --- api/v4/objectstorage_types.go | 2 +- api/v4/queue_types.go | 8 ++-- ...enterprise.splunk.com_indexerclusters.yaml | 13 +++--- ...nterprise.splunk.com_ingestorclusters.yaml | 13 +++--- .../enterprise.splunk.com_objectstorages.yaml | 2 +- .../bases/enterprise.splunk.com_queues.yaml | 11 +++-- .../ingestorcluster_controller_test.go | 16 +++---- internal/controller/queue_controller_test.go | 24 +++++----- pkg/splunk/enterprise/indexercluster.go | 26 +++++------ pkg/splunk/enterprise/indexercluster_test.go | 42 +++++++++--------- pkg/splunk/enterprise/ingestorcluster.go | 16 +++---- pkg/splunk/enterprise/ingestorcluster_test.go | 44 +++++++++---------- pkg/splunk/enterprise/queue_test.go | 8 ++-- ...dex_and_ingestion_separation_suite_test.go | 16 +++---- 14 files changed, 119 insertions(+), 122 deletions(-) diff --git a/api/v4/objectstorage_types.go b/api/v4/objectstorage_types.go index 80fcd45cf..9e95392ce 100644 --- a/api/v4/objectstorage_types.go +++ b/api/v4/objectstorage_types.go @@ -43,7 +43,7 @@ type ObjectStorageSpec struct { type S3Spec struct { // +optional - // +kubebuilder:validation:Pattern=`^https://s3(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$` + // +kubebuilder:validation:Pattern=`^https?://[^\s/$.?#].[^\s]*$` // S3-compatible Service endpoint Endpoint string `json:"endpoint"` diff --git a/api/v4/queue_types.go b/api/v4/queue_types.go index 06703ac95..9828f7301 100644 --- a/api/v4/queue_types.go +++ b/api/v4/queue_types.go @@ -47,10 +47,10 @@ type SQSSpec struct { // Name of the queue Name string `json:"name"` - // +kubebuilder:validation:Required + // +optional // +kubebuilder:validation:Pattern=`^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$` - // Region of the resources - Region string `json:"region"` + // Auth Region of the resources + AuthRegion string `json:"authRegion"` // +kubebuilder:validation:Required // +kubebuilder:validation:MinLength=1 @@ -58,7 +58,7 @@ type SQSSpec struct { DLQ string `json:"dlq"` // +optional - // +kubebuilder:validation:Pattern=`^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$` + // +kubebuilder:validation:Pattern=`^https?://[^\s/$.?#].[^\s]*$` // Amazon SQS Service endpoint Endpoint string `json:"endpoint"` } diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index a9fc2d811..59faab055 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8396,7 +8396,7 @@ spec: properties: endpoint: description: S3-compatible Service endpoint - pattern: ^https://s3(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + pattern: ^https?://[^\s/$.?#].[^\s]*$ type: string path: description: S3 bucket path @@ -8464,26 +8464,25 @@ spec: sqs: description: sqs specific inputs properties: + authRegion: + description: Auth Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string dlq: description: Name of the dead letter queue resource minLength: 1 type: string endpoint: description: Amazon SQS Service endpoint - pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + pattern: ^https?://[^\s/$.?#].[^\s]*$ type: string name: description: Name of the queue minLength: 1 type: string - region: - description: Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string required: - dlq - name - - region type: object required: - provider diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 46a142719..7432e96b4 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -4607,7 +4607,7 @@ spec: properties: endpoint: description: S3-compatible Service endpoint - pattern: ^https://s3(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + pattern: ^https?://[^\s/$.?#].[^\s]*$ type: string path: description: S3 bucket path @@ -4645,26 +4645,25 @@ spec: sqs: description: sqs specific inputs properties: + authRegion: + description: Auth Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string dlq: description: Name of the dead letter queue resource minLength: 1 type: string endpoint: description: Amazon SQS Service endpoint - pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + pattern: ^https?://[^\s/$.?#].[^\s]*$ type: string name: description: Name of the queue minLength: 1 type: string - region: - description: Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string required: - dlq - name - - region type: object required: - provider diff --git a/config/crd/bases/enterprise.splunk.com_objectstorages.yaml b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml index 1456234c6..2fac45707 100644 --- a/config/crd/bases/enterprise.splunk.com_objectstorages.yaml +++ b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml @@ -64,7 +64,7 @@ spec: properties: endpoint: description: S3-compatible Service endpoint - pattern: ^https://s3(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + pattern: ^https?://[^\s/$.?#].[^\s]*$ type: string path: description: S3 bucket path diff --git a/config/crd/bases/enterprise.splunk.com_queues.yaml b/config/crd/bases/enterprise.splunk.com_queues.yaml index 928cd34ce..2ba8d03f5 100644 --- a/config/crd/bases/enterprise.splunk.com_queues.yaml +++ b/config/crd/bases/enterprise.splunk.com_queues.yaml @@ -62,26 +62,25 @@ spec: sqs: description: sqs specific inputs properties: + authRegion: + description: Auth Region of the resources + pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ + type: string dlq: description: Name of the dead letter queue resource minLength: 1 type: string endpoint: description: Amazon SQS Service endpoint - pattern: ^https://sqs(?:-fips)?\.[a-z]+-[a-z]+(?:-[a-z]+)?-\d+\.amazonaws\.com(?:\.cn)?(?:/[A-Za-z0-9._-]+(?:/[A-Za-z0-9._-]+)*)?$ + pattern: ^https?://[^\s/$.?#].[^\s]*$ type: string name: description: Name of the queue minLength: 1 type: string - region: - description: Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string required: - dlq - name - - region type: object required: - provider diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go index d035d1037..38e7cbb4e 100644 --- a/internal/controller/ingestorcluster_controller_test.go +++ b/internal/controller/ingestorcluster_controller_test.go @@ -79,10 +79,10 @@ var _ = Describe("IngestorCluster Controller", func() { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - Region: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", }, }, } @@ -127,10 +127,10 @@ var _ = Describe("IngestorCluster Controller", func() { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - Region: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", }, }, } diff --git a/internal/controller/queue_controller_test.go b/internal/controller/queue_controller_test.go index 23d40ae4c..b04a5d4b3 100644 --- a/internal/controller/queue_controller_test.go +++ b/internal/controller/queue_controller_test.go @@ -73,10 +73,10 @@ var _ = Describe("Queue Controller", func() { spec := enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - Region: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } CreateQueue("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) @@ -102,10 +102,10 @@ var _ = Describe("Queue Controller", func() { spec := enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - Region: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } CreateQueue("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) @@ -141,10 +141,10 @@ var _ = Describe("Queue Controller", func() { spec := enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - Region: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", }, } bcSpec := testutils.NewQueue("test", namespace, spec) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index f6bcd046d..60b4d5a9a 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -263,8 +263,8 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // Can not override original queue spec due to comparison in the later code queueCopy := queue if queueCopy.Spec.Provider == "sqs" { - if queueCopy.Spec.SQS.Endpoint == "" { - queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.Region) + if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) } } @@ -287,8 +287,8 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // Can not override original large message store spec due to comparison in the later code osCopy := os if osCopy.Spec.Provider == "s3" { - if osCopy.Spec.S3.Endpoint == "" { - osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.Region) + if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) } } @@ -586,8 +586,8 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // Can not override original queue spec due to comparison in the later code queueCopy := queue if queueCopy.Spec.Provider == "sqs" { - if queueCopy.Spec.SQS.Endpoint == "" { - queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.Region) + if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) } } @@ -610,8 +610,8 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // Can not override original queue spec due to comparison in the later code osCopy := os if osCopy.Spec.Provider == "s3" { - if osCopy.Spec.S3.Endpoint == "" { - osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.Region) + if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) } } @@ -1391,7 +1391,7 @@ func pullQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS if newQueue.Provider == "sqs" { queueProvider = "sqs_smartbus" } - osProvider := "" + osProvider := "" if newOS.Provider == "s3" { osProvider = "sqs_smartbus" } @@ -1399,13 +1399,13 @@ func pullQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS if oldQueue.Provider != newQueue.Provider || afterDelete { inputs = append(inputs, []string{"remote_queue.type", queueProvider}) } - if oldQueue.SQS.Region != newQueue.SQS.Region || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.Region}) + if newQueue.SQS.AuthRegion != "" &&(oldQueue.SQS.AuthRegion != newQueue.SQS.AuthRegion || afterDelete) { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.AuthRegion}) } - if oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete { + if newQueue.SQS.Endpoint != "" && (oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete) { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) } - if oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete { + if newOS.S3.Endpoint != "" && (oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete) { inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), newOS.S3.Endpoint}) } if oldOS.S3.Path != newOS.S3.Path || afterDelete { diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index c2b3a8063..a74ab4acd 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -1355,10 +1355,10 @@ func TestGetIndexerStatefulSet(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -2059,10 +2059,10 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -2099,7 +2099,7 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { assert.Equal(t, 8, len(queueChangedFieldsInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, @@ -2111,7 +2111,7 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { assert.Equal(t, 10, len(queueChangedFieldsOutputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, @@ -2148,10 +2148,10 @@ func TestHandlePullQueueChange(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -2192,8 +2192,8 @@ func TestHandlePullQueueChange(t *testing.T) { }, }, Status: enterpriseApi.IndexerClusterStatus{ - ReadyReplicas: 3, - Queue: &enterpriseApi.QueueSpec{}, + ReadyReplicas: 3, + Queue: &enterpriseApi.QueueSpec{}, ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, }, } @@ -2276,7 +2276,7 @@ func TestHandlePullQueueChange(t *testing.T) { // outputs.conf propertyKVList := [][]string{ - {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, @@ -2407,10 +2407,10 @@ func TestApplyIndexerClusterManager_Queue_Success(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 17cd14a44..0fc94487b 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -229,8 +229,8 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // Can not override original queue spec due to comparison in the later code queueCopy := queue if queueCopy.Spec.Provider == "sqs" { - if queueCopy.Spec.SQS.Endpoint == "" { - queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.Region) + if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) } } @@ -253,8 +253,8 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // Can not override original queue spec due to comparison in the later code osCopy := os if osCopy.Spec.Provider == "s3" { - if osCopy.Spec.S3.Endpoint == "" { - osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.Region) + if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { + osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) } } @@ -455,13 +455,13 @@ func pushQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS if oldQueue.Provider != newQueue.Provider || afterDelete { output = append(output, []string{"remote_queue.type", queueProvider}) } - if oldQueue.SQS.Region != newQueue.SQS.Region || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.Region}) + if newQueue.SQS.AuthRegion != "" && (oldQueue.SQS.AuthRegion != newQueue.SQS.AuthRegion || afterDelete) { + output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.AuthRegion}) } - if oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete { + if newQueue.SQS.Endpoint != "" && (oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete) { output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) } - if oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete { + if newOS.S3.Endpoint != "" && (oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete) { output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), newOS.S3.Endpoint}) } if oldOS.S3.Path != newOS.S3.Path || afterDelete { diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index 7bf69ac84..fac91bbbe 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -77,10 +77,10 @@ func TestApplyIngestorCluster(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -285,7 +285,7 @@ func TestApplyIngestorCluster(t *testing.T) { propertyKVList := [][]string{ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, @@ -344,10 +344,10 @@ func TestGetIngestorStatefulSet(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -430,10 +430,10 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -472,7 +472,7 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { assert.Equal(t, 10, len(queueChangedFields)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, @@ -509,10 +509,10 @@ func TestHandlePushQueueChange(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } @@ -551,9 +551,9 @@ func TestHandlePushQueueChange(t *testing.T) { }, }, Status: enterpriseApi.IngestorClusterStatus{ - Replicas: 3, - ReadyReplicas: 3, - Queue: &enterpriseApi.QueueSpec{}, + Replicas: 3, + ReadyReplicas: 3, + Queue: &enterpriseApi.QueueSpec{}, ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, }, } @@ -635,7 +635,7 @@ func TestHandlePushQueueChange(t *testing.T) { // outputs.conf propertyKVList := [][]string{ {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, - {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.Region}, + {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, diff --git a/pkg/splunk/enterprise/queue_test.go b/pkg/splunk/enterprise/queue_test.go index 45a813282..767d33e83 100644 --- a/pkg/splunk/enterprise/queue_test.go +++ b/pkg/splunk/enterprise/queue_test.go @@ -51,10 +51,10 @@ func TestApplyQueue(t *testing.T) { Spec: enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "sqs-dlq-test", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "sqs-dlq-test", }, }, } diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index e2e27d268..86231df14 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -42,10 +42,10 @@ var ( queue = enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "test-dead-letter-queue", + Name: "test-queue", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "test-dead-letter-queue", }, } objectStorage = enterpriseApi.ObjectStorageSpec{ @@ -88,10 +88,10 @@ var ( updateQueue = enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue-updated", - Region: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "test-dead-letter-queue-updated", + Name: "test-queue-updated", + AuthRegion: "us-west-2", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + DLQ: "test-dead-letter-queue-updated", }, } From fafed270b1c068601a18f4bfeb4c073e625b2fa9 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 19 Dec 2025 11:03:36 +0100 Subject: [PATCH 14/25] CSPL-4360 Fixing tests after merge --- pkg/splunk/enterprise/indexercluster_test.go | 2 ++ pkg/splunk/enterprise/util.go | 4 ++-- pkg/splunk/enterprise/util_test.go | 8 ++++---- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 503f8beab..4f788d31a 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2111,6 +2111,8 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, + {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, + {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, }, queueChangedFieldsInputs) assert.Equal(t, 12, len(queueChangedFieldsOutputs)) diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index bdc5d16ab..882a96ff3 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -417,8 +417,8 @@ func GetSmartstoreRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi. return accessKey, secretKey, namespaceScopedSecret.ResourceVersion, nil } -// GetBusRemoteVolumeSecrets is used to retrieve access key and secrete key for Index & Ingestion separation -func GetBusRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject) (string, string, error) { +// GetQueueRemoteVolumeSecrets is used to retrieve access key and secrete key for Index & Ingestion separation +func GetQueueRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject) (string, string, error) { namespaceScopedSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), cr.GetName(), volume.SecretRef) if err != nil { return "", "", err diff --git a/pkg/splunk/enterprise/util_test.go b/pkg/splunk/enterprise/util_test.go index 6ea7b021e..35523a028 100644 --- a/pkg/splunk/enterprise/util_test.go +++ b/pkg/splunk/enterprise/util_test.go @@ -2624,8 +2624,8 @@ func TestUpdateCRStatus(t *testing.T) { WithStatusSubresource(&enterpriseApi.Standalone{}). WithStatusSubresource(&enterpriseApi.MonitoringConsole{}). WithStatusSubresource(&enterpriseApi.IndexerCluster{}). - WithStatusSubresource(&enterpriseApi.Bus{}). - WithStatusSubresource(&enterpriseApi.LargeMessageStore{}). + WithStatusSubresource(&enterpriseApi.Queue{}). + WithStatusSubresource(&enterpriseApi.ObjectStorage{}). WithStatusSubresource(&enterpriseApi.IngestorCluster{}). WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}) c := builder.Build() @@ -3307,8 +3307,8 @@ func TestGetCurrentImage(t *testing.T) { WithStatusSubresource(&enterpriseApi.MonitoringConsole{}). WithStatusSubresource(&enterpriseApi.IndexerCluster{}). WithStatusSubresource(&enterpriseApi.SearchHeadCluster{}). - WithStatusSubresource(&enterpriseApi.Bus{}). - WithStatusSubresource(&enterpriseApi.LargeMessageStore{}). + WithStatusSubresource(&enterpriseApi.Queue{}). + WithStatusSubresource(&enterpriseApi.ObjectStorage{}). WithStatusSubresource(&enterpriseApi.IngestorCluster{}) client := builder.Build() client.Create(ctx, ¤t) From e0a10ba9fc5b8993f55d5dae4e1e1f189f76c47f Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 19 Dec 2025 13:20:33 +0100 Subject: [PATCH 15/25] CSPL-4360 Fix validation that fails for status --- pkg/splunk/enterprise/indexercluster.go | 38 +++++++------------ pkg/splunk/enterprise/indexercluster_test.go | 6 ++- pkg/splunk/enterprise/ingestorcluster.go | 28 ++++++-------- pkg/splunk/enterprise/ingestorcluster_test.go | 2 +- 4 files changed, 32 insertions(+), 42 deletions(-) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 88b6a31d0..37e81afd4 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -1327,20 +1327,22 @@ func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, } splunkClient := newSplunkClientForQueuePipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) - if newCR.Status.Queue == nil { - newCR.Status.Queue = &enterpriseApi.QueueSpec{} + newCrStatusQueue := newCR.Status.Queue + if newCrStatusQueue == nil { + newCrStatusQueue = &enterpriseApi.QueueSpec{} } - if newCR.Status.ObjectStorage == nil { - newCR.Status.ObjectStorage = &enterpriseApi.ObjectStorageSpec{} + newCrStatusObjectStorage := newCR.Status.ObjectStorage + if newCrStatusObjectStorage == nil { + newCrStatusObjectStorage = &enterpriseApi.ObjectStorageSpec{} } afterDelete := false - if (queue.Spec.SQS.Name != "" && newCR.Status.Queue.SQS.Name != "" && queue.Spec.SQS.Name != newCR.Status.Queue.SQS.Name) || - (queue.Spec.Provider != "" && newCR.Status.Queue.Provider != "" && queue.Spec.Provider != newCR.Status.Queue.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { + if (queue.Spec.SQS.Name != "" && newCrStatusQueue.SQS.Name != "" && queue.Spec.SQS.Name != newCrStatusQueue.SQS.Name) || + (queue.Spec.Provider != "" && newCrStatusQueue.Provider != "" && queue.Spec.Provider != newCrStatusQueue.Provider) { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCrStatusQueue.SQS.Name)); err != nil { updateErr = err } - if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCrStatusQueue.SQS.Name)); err != nil { updateErr = err } afterDelete = true @@ -1360,7 +1362,7 @@ func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, } } - queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCR, afterDelete, s3AccessKey, s3SecretKey) + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCrStatusQueue, newCrStatusObjectStorage, afterDelete, s3AccessKey, s3SecretKey) for _, pbVal := range queueChangedFieldsOutputs { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { @@ -1386,22 +1388,10 @@ func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, } // getChangedQueueFieldsForIndexer returns a list of changed queue and pipeline fields for indexer pods -func getChangedQueueFieldsForIndexer(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueIndexerStatus *enterpriseApi.IndexerCluster, afterDelete bool, s3AccessKey, s3SecretKey string) (queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields [][]string) { - // Compare queue fields - oldQueue := queueIndexerStatus.Status.Queue - if oldQueue == nil { - oldQueue = &enterpriseApi.QueueSpec{} - } - newQueue := queue.Spec - - oldOS := queueIndexerStatus.Status.ObjectStorage - if oldOS == nil { - oldOS = &enterpriseApi.ObjectStorageSpec{} - } - newOS := os.Spec - +func getChangedQueueFieldsForIndexer(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueStatus *enterpriseApi.QueueSpec, osStatus *enterpriseApi.ObjectStorageSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields [][]string) { // Push all queue fields - queueChangedFieldsInputs, queueChangedFieldsOutputs = pullQueueChanged(oldQueue, &newQueue, oldOS, &newOS, afterDelete, s3AccessKey, s3SecretKey) + queueChangedFieldsInputs, queueChangedFieldsOutputs = pullQueueChanged(queueStatus, &queue.Spec, osStatus, &os.Spec, afterDelete, s3AccessKey, s3SecretKey) + // Always set all pipeline fields, not just changed ones pipelineChangedFields = pipelineConfig(true) diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 4f788d31a..c891f1dd4 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2096,11 +2096,15 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { Name: os.Name, }, }, + Status: enterpriseApi.IndexerClusterStatus{ + Queue: &enterpriseApi.QueueSpec{}, + ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, + }, } key := "key" secret := "secret" - queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCR, false, key, secret) + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCR.Status.Queue, newCR.Status.ObjectStorage, false, key, secret) assert.Equal(t, 10, len(queueChangedFieldsInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index f3db2a1fa..5aa41dd45 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -388,17 +388,19 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, } splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) - if newCR.Status.Queue == nil { - newCR.Status.Queue = &enterpriseApi.QueueSpec{} + newCrStatusQueue := newCR.Status.Queue + if newCrStatusQueue == nil { + newCrStatusQueue = &enterpriseApi.QueueSpec{} } - if newCR.Status.ObjectStorage == nil { - newCR.Status.ObjectStorage = &enterpriseApi.ObjectStorageSpec{} + newCrStatusObjectStorage := newCR.Status.ObjectStorage + if newCrStatusObjectStorage == nil { + newCrStatusObjectStorage = &enterpriseApi.ObjectStorageSpec{} } afterDelete := false - if (queue.Spec.SQS.Name != "" && newCR.Status.Queue.SQS.Name != "" && queue.Spec.SQS.Name != newCR.Status.Queue.SQS.Name) || - (queue.Spec.Provider != "" && newCR.Status.Queue.Provider != "" && queue.Spec.Provider != newCR.Status.Queue.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCR.Status.Queue.SQS.Name)); err != nil { + if (queue.Spec.SQS.Name != "" && newCrStatusQueue.SQS.Name != "" && queue.Spec.SQS.Name != newCrStatusQueue.SQS.Name) || + (queue.Spec.Provider != "" && newCrStatusQueue.Provider != "" && queue.Spec.Provider != newCrStatusQueue.Provider) { + if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCrStatusQueue.SQS.Name)); err != nil { updateErr = err } afterDelete = true @@ -418,7 +420,7 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, } } - queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCR, afterDelete, s3AccessKey, s3SecretKey) + queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCrStatusQueue, newCrStatusObjectStorage,afterDelete, s3AccessKey, s3SecretKey) for _, pbVal := range queueChangedFields { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { @@ -438,15 +440,9 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, } // getChangedBusFieldsForIngestor returns a list of changed bus and pipeline fields for ingestor pods -func getChangedQueueFieldsForIngestor(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueIngestorStatus *enterpriseApi.IngestorCluster, afterDelete bool, s3AccessKey, s3SecretKey string) (queueChangedFields, pipelineChangedFields [][]string) { - oldQueue := queueIngestorStatus.Status.Queue - newQueue := &queue.Spec - - oldOS := queueIngestorStatus.Status.ObjectStorage - newOS := &os.Spec - +func getChangedQueueFieldsForIngestor(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueStatus *enterpriseApi.QueueSpec, osStatus *enterpriseApi.ObjectStorageSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (queueChangedFields, pipelineChangedFields [][]string) { // Push changed bus fields - queueChangedFields = pushQueueChanged(oldQueue, newQueue, oldOS, newOS, afterDelete, s3AccessKey, s3SecretKey) + queueChangedFields = pushQueueChanged(queueStatus, &queue.Spec, osStatus, &os.Spec, afterDelete, s3AccessKey, s3SecretKey) // Always changed pipeline fields pipelineChangedFields = pipelineConfig(false) diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index 448929572..995e52ff8 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -462,7 +462,7 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { key := "key" secret := "secret" - queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCR, false, key, secret) + queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCR.Status.Queue, newCR.Status.ObjectStorage, false, key, secret) assert.Equal(t, 12, len(queueChangedFields)) assert.Equal(t, [][]string{ From 155b21a49fda387472a95a93391c27865d16cf1b Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Fri, 19 Dec 2025 14:58:10 +0100 Subject: [PATCH 16/25] CSPL-4360 Fix failing to get k8s secret --- pkg/splunk/enterprise/indexercluster.go | 17 +++++++++-------- pkg/splunk/enterprise/indexercluster_test.go | 3 ++- pkg/splunk/enterprise/ingestorcluster.go | 4 ++-- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 37e81afd4..558f862b1 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -115,7 +115,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError } - mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) // Check if we have configured enough number(<= RF) of replicas if mgr.cr.Status.ClusterManagerPhase == enterpriseApi.PhaseReady { err = VerifyRFPeers(ctx, mgr, client) @@ -248,7 +248,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller if cr.Spec.QueueRef.Namespace != "" { ns = cr.Spec.QueueRef.Namespace } - err = client.Get(context.Background(), types.NamespacedName{ + err = client.Get(ctx, types.NamespacedName{ Name: cr.Spec.QueueRef.Name, Namespace: ns, }, &queue) @@ -272,7 +272,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller if cr.Spec.ObjectStorageRef.Namespace != "" { ns = cr.Spec.ObjectStorageRef.Namespace } - err = client.Get(context.Background(), types.NamespacedName{ + err = client.Get(ctx, types.NamespacedName{ Name: cr.Spec.ObjectStorageRef.Name, Namespace: ns, }, &os) @@ -292,7 +292,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // If bus is updated if cr.Spec.QueueRef.Name != "" { if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil || !reflect.DeepEqual(*cr.Status.Queue, queue.Spec) || !reflect.DeepEqual(*cr.Status.ObjectStorage, os.Spec) { - mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) err = mgr.handlePullQueueChange(ctx, cr, queueCopy, osCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) @@ -443,7 +443,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError } - mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) // Check if we have configured enough number(<= RF) of replicas if mgr.cr.Status.ClusterMasterPhase == enterpriseApi.PhaseReady { err = VerifyRFPeers(ctx, mgr, client) @@ -621,7 +621,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // If bus is updated if cr.Spec.QueueRef.Name != "" { if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil || !reflect.DeepEqual(*cr.Status.Queue, queue.Spec) || !reflect.DeepEqual(*cr.Status.ObjectStorage, os.Spec) { - mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient) + mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) err = mgr.handlePullQueueChange(ctx, cr, queueCopy, osCopy, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) @@ -722,12 +722,13 @@ type indexerClusterPodManager struct { } // newIndexerClusterPodManager function to create pod manager this is added to write unit test case -var newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc) indexerClusterPodManager { +var newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc, c splcommon.ControllerClient) indexerClusterPodManager { return indexerClusterPodManager{ log: log, cr: cr, secrets: secret, newSplunkClient: newSplunkClient, + c: c, } } @@ -1391,7 +1392,7 @@ func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, func getChangedQueueFieldsForIndexer(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueStatus *enterpriseApi.QueueSpec, osStatus *enterpriseApi.ObjectStorageSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields [][]string) { // Push all queue fields queueChangedFieldsInputs, queueChangedFieldsOutputs = pullQueueChanged(queueStatus, &queue.Spec, osStatus, &os.Spec, afterDelete, s3AccessKey, s3SecretKey) - + // Always set all pipeline fields, not just changed ones pipelineChangedFields = pipelineConfig(true) diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index c891f1dd4..2b4026ac5 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -1569,7 +1569,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) { return nil } - newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc) indexerClusterPodManager { + newIndexerClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IndexerCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc, c splcommon.ControllerClient) indexerClusterPodManager { return indexerClusterPodManager{ log: log, cr: cr, @@ -1579,6 +1579,7 @@ func TestIndexerClusterWithReadyState(t *testing.T) { c.Client = mclient return c }, + c: c, } } diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 5aa41dd45..62693e1b5 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -238,7 +238,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr if cr.Spec.ObjectStorageRef.Namespace != "" { ns = cr.Spec.ObjectStorageRef.Namespace } - err = client.Get(context.Background(), types.NamespacedName{ + err = client.Get(ctx, types.NamespacedName{ Name: cr.Spec.ObjectStorageRef.Name, Namespace: ns, }, &os) @@ -420,7 +420,7 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, } } - queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCrStatusQueue, newCrStatusObjectStorage,afterDelete, s3AccessKey, s3SecretKey) + queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCrStatusQueue, newCrStatusObjectStorage, afterDelete, s3AccessKey, s3SecretKey) for _, pbVal := range queueChangedFields { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { From f8afd5a7790c489e2997921ba08060e2dd87c075 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Mon, 22 Dec 2025 13:51:03 +0100 Subject: [PATCH 17/25] CSPL-4360 Fix failing integ and helm tests --- api/v4/objectstorage_types.go | 2 +- .../enterprise.splunk.com_objectstorages.yaml | 2 +- docs/CustomResources.md | 10 +-- docs/IndexIngestionSeparation.md | 24 +++--- .../enterprise_v4_indexercluster.yaml | 4 +- .../enterprise_v4_objectstorages.yaml | 2 +- .../templates/enterprise_v4_queues.yaml | 4 +- .../02-assert.yaml | 50 +++++------ .../03-assert.yaml | 20 ++--- .../splunk_index_ingest_sep.yaml | 8 +- pkg/splunk/enterprise/indexercluster.go | 18 ++-- pkg/splunk/enterprise/ingestorcluster.go | 13 +-- pkg/splunk/enterprise/types.go | 2 +- ...dex_and_ingestion_separation_suite_test.go | 28 +++---- .../index_and_ingestion_separation_test.go | 83 ++++++++++--------- test/testenv/remote_index_utils.go | 4 +- test/testenv/util.go | 8 +- 17 files changed, 147 insertions(+), 135 deletions(-) diff --git a/api/v4/objectstorage_types.go b/api/v4/objectstorage_types.go index 9e95392ce..08205743f 100644 --- a/api/v4/objectstorage_types.go +++ b/api/v4/objectstorage_types.go @@ -55,7 +55,7 @@ type S3Spec struct { // ObjectStorageStatus defines the observed state of ObjectStorage. type ObjectStorageStatus struct { - // Phase of the large message store + // Phase of the object storage Phase Phase `json:"phase"` // Resource revision tracker diff --git a/config/crd/bases/enterprise.splunk.com_objectstorages.yaml b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml index 2fac45707..c84474921 100644 --- a/config/crd/bases/enterprise.splunk.com_objectstorages.yaml +++ b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml @@ -87,7 +87,7 @@ spec: description: Auxillary message describing CR status type: string phase: - description: Phase of the large message store + description: Phase of the object storage enum: - Pending - Ready diff --git a/docs/CustomResources.md b/docs/CustomResources.md index 157a9b123..bd85c05ca 100644 --- a/docs/CustomResources.md +++ b/docs/CustomResources.md @@ -404,21 +404,21 @@ spec: endpoint: https://s3.us-west-2.amazonaws.com ``` -ObjectStorage inputs can be found in the table below. As of now, only S3 provider of large message store is supported. +ObjectStorage inputs can be found in the table below. As of now, only S3 provider of object storage is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| provider | string | [Required] Provider of large message store (Allowed values: s3) | -| s3 | S3 | [Required if provider=s3] S3 large message store inputs | +| provider | string | [Required] Provider of object storage (Allowed values: s3) | +| s3 | S3 | [Required if provider=s3] S3 object storage inputs | -S3 large message store inputs can be found in the table below. +S3 object storage inputs can be found in the table below. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | | endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint -Change of any of the large message queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +Change of any of the object storage inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. ## MonitoringConsole Resource Spec Parameters diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index d532e189c..c7b05dcae 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -44,7 +44,7 @@ SQS message queue inputs can be found in the table below. | endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint | dlq | string | [Required] Name of the dead letter queue | -**First provisioning or update of any of the bus inputs requires Ingestor Cluster and Indexer Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.** +**First provisioning or update of any of the queue inputs requires Ingestor Cluster and Indexer Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.** ## Example ``` @@ -67,21 +67,21 @@ ObjectStorage is introduced to store large message (messages that exceed the siz ## Spec -ObjectStorage inputs can be found in the table below. As of now, only S3 provider of large message store is supported. +ObjectStorage inputs can be found in the table below. As of now, only S3 provider of object storage is supported. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | -| provider | string | [Required] Provider of large message store (Allowed values: s3) | -| s3 | S3 | [Required if provider=s3] S3 large message store inputs | +| provider | string | [Required] Provider of object storage (Allowed values: s3) | +| s3 | S3 | [Required if provider=s3] S3 object storage inputs | -S3 large message store inputs can be found in the table below. +S3 object storage inputs can be found in the table below. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | | endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint -Change of any of the large message queue inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +Change of any of the object storage inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. ## Example ``` @@ -108,13 +108,13 @@ In addition to common spec inputs, the IngestorCluster resource provides the fol | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | | queueRef | corev1.ObjectReference | Message queue reference | -| objectStorageRef | corev1.ObjectReference | Large message store reference | +| objectStorageRef | corev1.ObjectReference | Object storage reference | ## Example The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the ingestion process. -In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. +In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The object storage is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. ``` apiVersion: enterprise.splunk.com/v4 @@ -145,13 +145,13 @@ In addition to common spec inputs, the IndexerCluster resource provides the foll | ---------- | ------- | ------------------------------------------------- | | replicas | integer | The number of replicas (defaults to 3) | | queueRef | corev1.ObjectReference | Message queue reference | -| objectStorageRef | corev1.ObjectReference | Large message store reference | +| objectStorageRef | corev1.ObjectReference | Object storage reference | ## Example The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the indexing process. -In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The large message store is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. +In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The object storage is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. ``` apiVersion: enterprise.splunk.com/v4 @@ -717,7 +717,7 @@ Spec: Name: queue Namespace: default Image: splunk/splunk:${SPLUNK_IMAGE_VERSION} - Large Message Store Ref: + Object Storage Ref: Name: os Namespace: default Replicas: 3 @@ -741,7 +741,7 @@ Status: Endpoint: https://sqs.us-west-2.amazonaws.com Name: sqs-test Provider: sqs - Large Message Store: + Object Storage: S3: Endpoint: https://s3.us-west-2.amazonaws.com Path: s3://ingestion/smartbus-test diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml index 235505530..e5541e017 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_indexercluster.yaml @@ -170,8 +170,8 @@ items: namespace: {{ .namespace }} {{- end }} {{- end }} - {{- with $.Values.indexerCluster.objectStoreRef }} - objectStoreRef: + {{- with $.Values.indexerCluster.objectStorageRef }} + objectStorageRef: name: {{ .name }} {{- if .namespace }} namespace: {{ .namespace }} diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml index 7cd5bdca0..033aed904 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_objectstorages.yaml @@ -1,4 +1,4 @@ -{{- if .Values.objectStorage.enabled }} +{{- if .Values.objectStorage }} {{- if .Values.objectStorage.enabled }} apiVersion: enterprise.splunk.com/v4 kind: ObjectStorage diff --git a/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml b/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml index 09cd949dc..06a3c5dbd 100644 --- a/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml +++ b/helm-chart/splunk-enterprise/templates/enterprise_v4_queues.yaml @@ -26,8 +26,8 @@ spec: {{- if .name }} name: {{ .name | quote }} {{- end }} - {{- if .region }} - region: {{ .region | quote }} + {{- if .authRegion }} + authRegion: {{ .authRegion | quote }} {{- end }} {{- if .volumes }} volumes: diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index 547f2a358..ca56ca5ef 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -1,30 +1,30 @@ --- -# assert for bus custom resource to be ready +# assert for queue custom resource to be ready apiVersion: enterprise.splunk.com/v4 -kind: Bus +kind: Queue metadata: - name: bus + name: queue spec: provider: sqs sqs: - name: sqs-test - region: us-west-2 + name: index-ingest-separation-test-q + authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test + dlq: index-ingest-separation-test-dlq status: phase: Ready --- -# assert for large message store custom resource to be ready +# assert for object storage custom resource to be ready apiVersion: enterprise.splunk.com/v4 -kind: LargeMessageStore +kind: ObjectStorage metadata: - name: lms + name: os spec: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test + path: s3://index-ingest-separation-test-bucket/smartbus-test status: phase: Ready @@ -61,24 +61,24 @@ metadata: name: indexer spec: replicas: 3 - busRef: - name: bus - largeMessageStoreRef: - name: lms + queueRef: + name: queue + objectStorageRef: + name: os status: phase: Ready - bus: + queue: provider: sqs sqs: - name: sqs-test - region: us-west-2 + name: index-ingest-separation-test-q + authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test - largeMessageStore: + dlq: index-ingest-separation-test-dlq + objectStorage: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test + path: s3://index-ingest-separation-test-bucket/smartbus-test --- # check for stateful set and replicas as configured @@ -103,7 +103,7 @@ kind: IngestorCluster metadata: name: ingestor spec: - replicas: 4 + replicas: 3 queueRef: name: queue objectStorageRef: @@ -113,15 +113,15 @@ status: queue: provider: sqs sqs: - name: sqs-test - region: us-west-2 + name: index-ingest-separation-test-q + authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test + dlq: index-ingest-separation-test-dlq objectStorage: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test + path: s3://index-ingest-separation-test-bucket/smartbus-test --- # check for stateful set and replicas as configured diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml index 819620baa..765a22192 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml @@ -6,24 +6,24 @@ metadata: name: ingestor spec: replicas: 4 - busRef: - name: bus - largeMessageStoreRef: - name: lms + queueRef: + name: queue + objectStorageRef: + name: os status: phase: Ready - bus: + queue: provider: sqs sqs: - name: sqs-test - region: us-west-2 + name: index-ingest-separation-test-q + authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test - largeMessageStore: + dlq: index-ingest-separation-test-dlq + objectStorage: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test + path: s3://index-ingest-separation-test-bucket/smartbus-test --- # check for stateful sets and replicas updated diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index 7bec8ee7d..46ef7fce3 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -10,10 +10,10 @@ queue: name: queue provider: sqs sqs: - name: sqs-test - region: us-west-2 + name: index-ingest-separation-test-q + authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com - dlq: sqs-dlq-test + dlq: index-ingest-separation-test-dlq volumes: - name: helm-bus-secret-ref-test secretRef: s3-secret @@ -24,7 +24,7 @@ objectStorage: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test + path: s3://index-ingest-separation-test-bucket/smartbus-test ingestorCluster: enabled: true diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 558f862b1..3808539cc 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -76,6 +76,10 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // updates status after function completes cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError + if cr.Status.Replicas < cr.Spec.Replicas { + cr.Status.Queue = nil + cr.Status.ObjectStorage = nil + } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) if cr.Status.Peers == nil { @@ -265,7 +269,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } } - // Large Message Store + // Object Storage os := enterpriseApi.ObjectStorage{} if cr.Spec.ObjectStorageRef.Name != "" { ns := cr.GetNamespace() @@ -281,7 +285,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } } - // Can not override original large message store spec due to comparison in the later code + // Can not override original object storage spec due to comparison in the later code osCopy := os if osCopy.Spec.Provider == "s3" { if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { @@ -289,7 +293,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } } - // If bus is updated + // If queue is updated if cr.Spec.QueueRef.Name != "" { if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil || !reflect.DeepEqual(*cr.Status.Queue, queue.Spec) || !reflect.DeepEqual(*cr.Status.ObjectStorage, os.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) @@ -402,6 +406,10 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, // updates status after function completes cr.Status.Phase = enterpriseApi.PhaseError cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError + if cr.Status.Replicas < cr.Spec.Replicas { + cr.Status.Queue = nil + cr.Status.ObjectStorage = nil + } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) if cr.Status.Peers == nil { @@ -594,7 +602,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } } - // Large Message Store + // Object Storage os := enterpriseApi.ObjectStorage{} if cr.Spec.ObjectStorageRef.Name != "" { ns := cr.GetNamespace() @@ -618,7 +626,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } } - // If bus is updated + // If queue is updated if cr.Spec.QueueRef.Name != "" { if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil || !reflect.DeepEqual(*cr.Status.Queue, queue.Spec) || !reflect.DeepEqual(*cr.Status.ObjectStorage, os.Spec) { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 62693e1b5..78a51ede2 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -71,7 +71,10 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // Update the CR Status defer updateCRStatus(ctx, client, cr, &err) - + if cr.Status.Replicas < cr.Spec.Replicas { + cr.Status.Queue = nil + cr.Status.ObjectStorage = nil + } cr.Status.Replicas = cr.Spec.Replicas // If needed, migrate the app framework status @@ -231,7 +234,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } } - // Large Message Store + // Object Storage os := enterpriseApi.ObjectStorage{} if cr.Spec.ObjectStorageRef.Name != "" { ns := cr.GetNamespace() @@ -255,7 +258,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } } - // If bus is updated + // If queue is updated if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil || !reflect.DeepEqual(*cr.Status.Queue, queue.Spec) || !reflect.DeepEqual(*cr.Status.ObjectStorage, os.Spec) { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) err = mgr.handlePushQueueChange(ctx, cr, queueCopy, osCopy, client) @@ -439,9 +442,9 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, return updateErr } -// getChangedBusFieldsForIngestor returns a list of changed bus and pipeline fields for ingestor pods +// getChangedQueueFieldsForIngestor returns a list of changed queue and pipeline fields for ingestor pods func getChangedQueueFieldsForIngestor(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueStatus *enterpriseApi.QueueSpec, osStatus *enterpriseApi.ObjectStorageSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (queueChangedFields, pipelineChangedFields [][]string) { - // Push changed bus fields + // Push changed queue fields queueChangedFields = pushQueueChanged(queueStatus, &queue.Spec, osStatus, &os.Spec, afterDelete, s3AccessKey, s3SecretKey) // Always changed pipeline fields diff --git a/pkg/splunk/enterprise/types.go b/pkg/splunk/enterprise/types.go index fe96430e4..4267662d8 100644 --- a/pkg/splunk/enterprise/types.go +++ b/pkg/splunk/enterprise/types.go @@ -66,7 +66,7 @@ const ( // SplunkQueue is the queue instance SplunkQueue InstanceType = "queue" - // SplunkObjectStorage is the large message store instance + // SplunkObjectStorage is the object storage instance SplunkObjectStorage InstanceType = "object-storage" // SplunkDeployer is an instance that distributes baseline configurations and apps to search head cluster members diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index 86231df14..8aac52220 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -42,29 +42,29 @@ var ( queue = enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue", + Name: "index-ingest-separation-test-q", AuthRegion: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "test-dead-letter-queue", + DLQ: "index-ingest-separation-test-dlq", }, } objectStorage = enterpriseApi.ObjectStorageSpec{ Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://test-bucket/smartbus-test", + Path: "s3://index-ingest-separation-test-bucket/smartbus-test", }, } serviceAccountName = "index-ingest-sa" inputs = []string{ - "[remote_queue:test-queue]", + "[remote_queue:index-ingest-separation-test-q]", "remote_queue.type = sqs_smartbus", "remote_queue.sqs_smartbus.auth_region = us-west-2", - "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue", + "remote_queue.sqs_smartbus.dead_letter_queue.name = index-ingest-separation-test-dlq", "remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com", "remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com", - "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket/smartbus-test", + "remote_queue.sqs_smartbus.large_message_store.path = s3://index-ingest-separation-test-bucket/smartbus-test", "remote_queue.sqs_smartbus.retry_policy = max_count", "remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4"} outputs = append(inputs, "remote_queue.sqs_smartbus.encoding_format = s2s", "remote_queue.sqs_smartbus.send_interval = 5s") @@ -88,21 +88,21 @@ var ( updateQueue = enterpriseApi.QueueSpec{ Provider: "sqs", SQS: enterpriseApi.SQSSpec{ - Name: "test-queue-updated", + Name: "index-ingest-separation-test-q-updated", AuthRegion: "us-west-2", Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "test-dead-letter-queue-updated", + DLQ: "index-ingest-separation-test-dlq-updated", }, } updatedInputs = []string{ - "[remote_queue:test-queue-updated]", + "[remote_queue:index-ingest-separation-test-q-updated]", "remote_queue.type = sqs_smartbus", "remote_queue.sqs_smartbus.auth_region = us-west-2", - "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue-updated", + "remote_queue.sqs_smartbus.dead_letter_queue.name = index-ingest-separation-test-dlq-updated", "remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com", "remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com", - "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket-updated/smartbus-test", + "remote_queue.sqs_smartbus.large_message_store.path = s3://index-ingest-separation-test-bucket/smartbus-test", "remote_queue.sqs_smartbus.retry_policy = max", "remote_queue.max.sqs_smartbus.max_retries_per_part = 5"} updatedOutputs = append(updatedInputs, "remote_queue.sqs_smartbus.encoding_format = s2s", "remote_queue.sqs_smartbus.send_interval = 4s") @@ -116,9 +116,9 @@ var ( updatedDefaultsIngest = append(updatedDefaultsAll, "[pipeline:indexerPipe]\ndisabled = true") inputsShouldNotContain = []string{ - "[remote_queue:test-queue]", - "remote_queue.sqs_smartbus.dead_letter_queue.name = test-dead-letter-queue", - "remote_queue.sqs_smartbus.large_message_store.path = s3://test-bucket/smartbus-test", + "[remote_queue:index-ingest-separation-test-q]", + "remote_queue.sqs_smartbus.dead_letter_queue.name = index-ingest-separation-test-dlq", + "remote_queue.sqs_smartbus.large_message_store.path = s3://index-ingest-separation-test-bucket/smartbus-test", "remote_queue.sqs_smartbus.retry_policy = max_count", "remote_queue.sqs_smartbus.max_count.max_retries_per_part = 4"} outputsShouldNotContain = append(inputs, "remote_queue.sqs_smartbus.send_interval = 5s") diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index b5e0449f8..85069a071 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -75,13 +75,13 @@ var _ = Describe("indingsep test", func() { Context("Ingestor and Indexer deployment", func() { It("indingsep, smoke, indingsep: Splunk Operator can deploy Ingestors and Indexers", func() { + // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+ // Create Service Account - testcaseEnvInst.Log.Info("Create Service Account") - testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // testcaseEnvInst.Log.Info("Create Service Account") + // testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // TODO: Remove secret reference once IRSA fixed for Splunk and EKS 1.34+ // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} queue.SQS.VolList = volumeSpec updateQueue.SQS.VolList = volumeSpec @@ -97,7 +97,7 @@ var _ = Describe("indingsep test", func() { // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -107,7 +107,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -137,11 +137,11 @@ var _ = Describe("indingsep test", func() { Expect(err).To(Succeed(), "Unable to delete Ingestor Cluster instance", "Ingestor Cluster Name", ingest) // Delete the Queue - queue := &enterpriseApi.Queue{} - err = deployment.GetInstance(ctx, "queue", queue) - Expect(err).To(Succeed(), "Unable to get Queue instance", "Queue Name", queue) - err = deployment.DeleteCR(ctx, queue) - Expect(err).To(Succeed(), "Unable to delete Queue", "Queue Name", queue) + q = &enterpriseApi.Queue{} + err = deployment.GetInstance(ctx, "queue", q) + Expect(err).To(Succeed(), "Unable to get Queue instance", "Queue Name", q) + err = deployment.DeleteCR(ctx, q) + Expect(err).To(Succeed(), "Unable to delete Queue", "Queue Name", q) // Delete the ObjectStorage objStorage = &enterpriseApi.ObjectStorage{} @@ -154,13 +154,13 @@ var _ = Describe("indingsep test", func() { Context("Ingestor and Indexer deployment", func() { It("indingsep, smoke, indingsep: Splunk Operator can deploy Ingestors and Indexers with additional configurations", func() { + // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+ // Create Service Account - testcaseEnvInst.Log.Info("Create Service Account") - testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // testcaseEnvInst.Log.Info("Create Service Account") + // testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // TODO: Remove secret reference once IRSA fixed for Splunk and EKS 1.34+ // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} queue.SQS.VolList = volumeSpec updateQueue.SQS.VolList = volumeSpec @@ -174,24 +174,19 @@ var _ = Describe("indingsep test", func() { objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") - // Upload apps to S3 - testcaseEnvInst.Log.Info("Upload apps to S3") - appFileList := testenv.GetAppFileList(appListV1) - _, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDir, appFileList, downloadDirV1) - Expect(err).To(Succeed(), "Unable to upload V1 apps to S3 test directory for IngestorCluster") - // Deploy Ingestor Cluster with additional configurations (similar to standalone app framework test) appSourceName := "appframework-" + enterpriseApi.ScopeLocal + testenv.RandomDNSName(3) appFrameworkSpec := testenv.GenerateAppFrameworkSpec(ctx, testcaseEnvInst, appSourceVolumeName, enterpriseApi.ScopeLocal, appSourceName, s3TestDir, 60) appFrameworkSpec.MaxConcurrentAppDownloads = uint64(5) ic := &enterpriseApi.IngestorCluster{ ObjectMeta: metav1.ObjectMeta{ - Name: deployment.GetName() + "-ingest", - Namespace: testcaseEnvInst.GetName(), + Name: deployment.GetName() + "-ingest", + Namespace: testcaseEnvInst.GetName(), + Finalizers: []string{"enterprise.splunk.com/delete-pvc"}, }, Spec: enterpriseApi.IngestorClusterSpec{ CommonSplunkSpec: enterpriseApi.CommonSplunkSpec{ - ServiceAccount: serviceAccountName, + // ServiceAccount: serviceAccountName, LivenessInitialDelaySeconds: 600, ReadinessInitialDelaySeconds: 50, StartupProbe: &enterpriseApi.Probe{ @@ -217,10 +212,10 @@ var _ = Describe("indingsep test", func() { Image: testcaseEnvInst.GetSplunkImage(), }, }, - QueueRef: v1.ObjectReference{Name: q.Name}, - ObjectStorageRef: v1.ObjectReference{Name: objStorage.Name}, - Replicas: 3, - AppFrameworkConfig: appFrameworkSpec, + QueueRef: v1.ObjectReference{Name: q.Name}, + ObjectStorageRef: v1.ObjectReference{Name: objStorage.Name}, + Replicas: 3, + AppFrameworkConfig: appFrameworkSpec, }, } @@ -232,6 +227,12 @@ var _ = Describe("indingsep test", func() { testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") testenv.IngestorReady(ctx, deployment, testcaseEnvInst) + // Upload apps to S3 + testcaseEnvInst.Log.Info("Upload apps to S3") + appFileList := testenv.GetAppFileList(appListV1) + _, err = testenv.UploadFilesToS3(testS3Bucket, s3TestDir, appFileList, downloadDirV1) + Expect(err).To(Succeed(), "Unable to upload V1 apps to S3 test directory for IngestorCluster") + // Verify Ingestor Cluster Pods have apps installed testcaseEnvInst.Log.Info("Verify Ingestor Cluster Pods have apps installed") ingestorPod := []string{fmt.Sprintf(testenv.IngestorPod, deployment.GetName()+"-ingest", 0)} @@ -264,15 +265,15 @@ var _ = Describe("indingsep test", func() { Context("Ingestor and Indexer deployment", func() { It("indingsep, integration, indingsep: Splunk Operator can deploy Ingestors and Indexers with correct setup", func() { + // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+ // Create Service Account - testcaseEnvInst.Log.Info("Create Service Account") - testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // testcaseEnvInst.Log.Info("Create Service Account") + // testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // TODO: Remove secret reference once IRSA fixed for Splunk and EKS 1.34+ // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} queue.SQS.VolList = volumeSpec - + // Deploy Queue testcaseEnvInst.Log.Info("Deploy Queue") q, err := deployment.DeployQueue(ctx, "queue", queue) @@ -285,7 +286,7 @@ var _ = Describe("indingsep test", func() { // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -295,7 +296,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase @@ -376,13 +377,13 @@ var _ = Describe("indingsep test", func() { Context("Ingestor and Indexer deployment", func() { It("indingsep, integration, indingsep: Splunk Operator can update Ingestors and Indexers with correct setup", func() { + // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+ // Create Service Account - testcaseEnvInst.Log.Info("Create Service Account") - testcaseEnvInst.CreateServiceAccount(serviceAccountName) + // testcaseEnvInst.Log.Info("Create Service Account") + // testcaseEnvInst.CreateServiceAccount(serviceAccountName) - // TODO: Remove secret reference once IRSA fixed for Splunk and EKS 1.34+ // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateBusVolumeSpec("bus-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} queue.SQS.VolList = volumeSpec updateQueue.SQS.VolList = volumeSpec @@ -398,7 +399,7 @@ var _ = Describe("indingsep test", func() { // Deploy Ingestor Cluster testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) + _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") // Deploy Cluster Manager @@ -408,7 +409,7 @@ var _ = Describe("indingsep test", func() { // Deploy Indexer Cluster testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, serviceAccountName) + _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") // Ensure that Ingestor Cluster is in Ready phase diff --git a/test/testenv/remote_index_utils.go b/test/testenv/remote_index_utils.go index 84e5c0709..f696a4a17 100644 --- a/test/testenv/remote_index_utils.go +++ b/test/testenv/remote_index_utils.go @@ -86,8 +86,8 @@ func RollHotToWarm(ctx context.Context, deployment *Deployment, podName string, return true } -// GeneratBusVolumeSpec return VolumeSpec struct with given values -func GenerateBusVolumeSpec(name, secretRef string) enterpriseApi.VolumeSpec { +// GenerateQueueVolumeSpec return VolumeSpec struct with given values +func GenerateQueueVolumeSpec(name, secretRef string) enterpriseApi.VolumeSpec { return enterpriseApi.VolumeSpec{ Name: name, SecretRef: secretRef, diff --git a/test/testenv/util.go b/test/testenv/util.go index d9c6d5807..366ea3668 100644 --- a/test/testenv/util.go +++ b/test/testenv/util.go @@ -396,8 +396,8 @@ func newIndexerCluster(name, ns, licenseManagerName string, replicas int, cluste }, Defaults: ansibleConfig, }, - Replicas: int32(replicas), - QueueRef: queue, + Replicas: int32(replicas), + QueueRef: queue, ObjectStorageRef: os, }, } @@ -426,8 +426,8 @@ func newIngestorCluster(name, ns string, replicas int, splunkImage string, queue Image: splunkImage, }, }, - Replicas: int32(replicas), - QueueRef: queue, + Replicas: int32(replicas), + QueueRef: queue, ObjectStorageRef: os, }, } From 47d1a354b4025f47cbaea5a4fce44bf77a368157 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Thu, 8 Jan 2026 17:32:52 +0100 Subject: [PATCH 18/25] CSPL-4360 Fixing failing tests due to incorrect secret ref --- ...AL2023-build-test-push-workflow-AL2023.yml | 2 + .../arm-AL2023-int-test-workflow.yml | 2 + .../arm-RHEL-build-test-push-workflow.yml | 2 + .../workflows/arm-RHEL-int-test-workflow.yml | 2 + .../arm-Ubuntu-build-test-push-workflow.yml | 2 + .../arm-Ubuntu-int-test-workflow.yml | 2 + .../workflows/build-test-push-workflow.yml | 2 + .../distroless-build-test-push-workflow.yml | 2 + .../distroless-int-test-workflow.yml | 2 + .github/workflows/helm-test-workflow.yml | 2 + .github/workflows/int-test-workflow.yml | 2 + .../workflows/manual-int-test-workflow.yml | 2 + .../namespace-scope-int-workflow.yml | 2 + .../workflows/nightly-int-test-workflow.yml | 2 + .../01-assert.yaml | 2 +- .../01-create-s3-secret.yaml | 2 +- .../splunk_index_ingest_sep.yaml | 2 +- .../index_and_ingestion_separation_test.go | 8 +- test/testenv/testcaseenv.go | 99 ++++++++++++------- test/testenv/testenv.go | 64 ++++++------ 20 files changed, 134 insertions(+), 71 deletions(-) diff --git a/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml b/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml index 8ccaf2e65..f3a9e38f5 100644 --- a/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml +++ b/.github/workflows/arm-AL2023-build-test-push-workflow-AL2023.yml @@ -146,6 +146,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Chekcout code uses: actions/checkout@v2 diff --git a/.github/workflows/arm-AL2023-int-test-workflow.yml b/.github/workflows/arm-AL2023-int-test-workflow.yml index bdd7fe563..9003cb439 100644 --- a/.github/workflows/arm-AL2023-int-test-workflow.yml +++ b/.github/workflows/arm-AL2023-int-test-workflow.yml @@ -94,6 +94,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/arm-RHEL-build-test-push-workflow.yml b/.github/workflows/arm-RHEL-build-test-push-workflow.yml index d108005e7..0f473836e 100644 --- a/.github/workflows/arm-RHEL-build-test-push-workflow.yml +++ b/.github/workflows/arm-RHEL-build-test-push-workflow.yml @@ -94,6 +94,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/arm-RHEL-int-test-workflow.yml b/.github/workflows/arm-RHEL-int-test-workflow.yml index 681491b61..1718b316b 100644 --- a/.github/workflows/arm-RHEL-int-test-workflow.yml +++ b/.github/workflows/arm-RHEL-int-test-workflow.yml @@ -94,6 +94,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml b/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml index 356812323..8e0d6aa3d 100644 --- a/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml +++ b/.github/workflows/arm-Ubuntu-build-test-push-workflow.yml @@ -146,6 +146,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Chekcout code uses: actions/checkout@v2 diff --git a/.github/workflows/arm-Ubuntu-int-test-workflow.yml b/.github/workflows/arm-Ubuntu-int-test-workflow.yml index ebbea6176..3ddeaa82d 100644 --- a/.github/workflows/arm-Ubuntu-int-test-workflow.yml +++ b/.github/workflows/arm-Ubuntu-int-test-workflow.yml @@ -94,6 +94,8 @@ jobs: DEPLOYMENT_TYPE: "" ARM64: "true" GRAVITON_TESTING: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/build-test-push-workflow.yml b/.github/workflows/build-test-push-workflow.yml index 6c79f58a9..7e8af7d45 100644 --- a/.github/workflows/build-test-push-workflow.yml +++ b/.github/workflows/build-test-push-workflow.yml @@ -190,6 +190,8 @@ jobs: EKS_SSH_PUBLIC_KEY: ${{ secrets.EKS_SSH_PUBLIC_KEY }} CLUSTER_WIDE: "true" DEPLOYMENT_TYPE: "" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Chekcout code uses: actions/checkout@v2 diff --git a/.github/workflows/distroless-build-test-push-workflow.yml b/.github/workflows/distroless-build-test-push-workflow.yml index c47d72ab7..bb99d1742 100644 --- a/.github/workflows/distroless-build-test-push-workflow.yml +++ b/.github/workflows/distroless-build-test-push-workflow.yml @@ -191,6 +191,8 @@ jobs: EKS_SSH_PUBLIC_KEY: ${{ secrets.EKS_SSH_PUBLIC_KEY }} CLUSTER_WIDE: "true" DEPLOYMENT_TYPE: "" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Chekcout code uses: actions/checkout@v2 diff --git a/.github/workflows/distroless-int-test-workflow.yml b/.github/workflows/distroless-int-test-workflow.yml index da4719183..a73d194c5 100644 --- a/.github/workflows/distroless-int-test-workflow.yml +++ b/.github/workflows/distroless-int-test-workflow.yml @@ -88,6 +88,8 @@ jobs: S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: "true" DEPLOYMENT_TYPE: "" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/helm-test-workflow.yml b/.github/workflows/helm-test-workflow.yml index 6e83bcc63..d5e58c914 100644 --- a/.github/workflows/helm-test-workflow.yml +++ b/.github/workflows/helm-test-workflow.yml @@ -65,6 +65,8 @@ jobs: HELM_REPO_PATH: "../../../../helm-chart" INSTALL_OPERATOR: "true" TEST_VPC_ENDPOINT_URL: ${{ secrets.TEST_VPC_ENDPOINT_URL }} + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - uses: chrisdickinson/setup-yq@3d931309f27270ebbafd53f2daee773a82ea1822 - name: Checking YQ installation diff --git a/.github/workflows/int-test-workflow.yml b/.github/workflows/int-test-workflow.yml index e5b12b5dc..c09b6c305 100644 --- a/.github/workflows/int-test-workflow.yml +++ b/.github/workflows/int-test-workflow.yml @@ -84,6 +84,8 @@ jobs: S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: "true" DEPLOYMENT_TYPE: "" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/manual-int-test-workflow.yml b/.github/workflows/manual-int-test-workflow.yml index b76b3d515..c042347aa 100644 --- a/.github/workflows/manual-int-test-workflow.yml +++ b/.github/workflows/manual-int-test-workflow.yml @@ -45,6 +45,8 @@ jobs: PRIVATE_REGISTRY: ${{ secrets.ECR_REPOSITORY }} S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: ${{ github.event.inputs.CLUSTER_WIDE }} + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/namespace-scope-int-workflow.yml b/.github/workflows/namespace-scope-int-workflow.yml index b32dcee92..9153bd950 100644 --- a/.github/workflows/namespace-scope-int-workflow.yml +++ b/.github/workflows/namespace-scope-int-workflow.yml @@ -40,6 +40,8 @@ jobs: PRIVATE_REGISTRY: ${{ secrets.ECR_REPOSITORY }} S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: "false" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/.github/workflows/nightly-int-test-workflow.yml b/.github/workflows/nightly-int-test-workflow.yml index 4bc4c199c..41fbf3d74 100644 --- a/.github/workflows/nightly-int-test-workflow.yml +++ b/.github/workflows/nightly-int-test-workflow.yml @@ -81,6 +81,8 @@ jobs: PRIVATE_REGISTRY: ${{ secrets.ECR_REPOSITORY }} S3_REGION: ${{ secrets.AWS_DEFAULT_REGION }} CLUSTER_WIDE: "true" + AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID: ${{ secrets.AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID }} + AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY: ${{ secrets.AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY }} steps: - name: Set Test Cluster Nodes and Parallel Runs run: >- diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml index e3dd6765c..a4aaa0824 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/01-assert.yaml @@ -2,4 +2,4 @@ apiVersion: v1 kind: Secret metadata: - name: s3-secret + name: index-ing-sep-secret diff --git a/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml b/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml index 8f1b1b95f..591aa8fd5 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/01-create-s3-secret.yaml @@ -2,6 +2,6 @@ apiVersion: kuttl.dev/v1beta1 kind: TestStep commands: - - script: kubectl create secret generic s3-secret --from-literal=s3_access_key=$AWS_ACCESS_KEY_ID --from-literal=s3_secret_key=$AWS_SECRET_ACCESS_KEY --namespace $NAMESPACE + - script: kubectl create secret generic index-ing-sep-secret --from-literal=s3_access_key=$AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID --from-literal=s3_secret_key=$AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY --namespace $NAMESPACE background: false skipLogOutput: true \ No newline at end of file diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index 46ef7fce3..1cdbc33b8 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -16,7 +16,7 @@ queue: dlq: index-ingest-separation-test-dlq volumes: - name: helm-bus-secret-ref-test - secretRef: s3-secret + secretRef: index-ing-sep-secret objectStorage: enabled: true diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 85069a071..6fe07597a 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -81,7 +81,7 @@ var _ = Describe("indingsep test", func() { // testcaseEnvInst.CreateServiceAccount(serviceAccountName) // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} queue.SQS.VolList = volumeSpec updateQueue.SQS.VolList = volumeSpec @@ -160,7 +160,7 @@ var _ = Describe("indingsep test", func() { // testcaseEnvInst.CreateServiceAccount(serviceAccountName) // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} queue.SQS.VolList = volumeSpec updateQueue.SQS.VolList = volumeSpec @@ -271,7 +271,7 @@ var _ = Describe("indingsep test", func() { // testcaseEnvInst.CreateServiceAccount(serviceAccountName) // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} queue.SQS.VolList = volumeSpec // Deploy Queue @@ -383,7 +383,7 @@ var _ = Describe("indingsep test", func() { // testcaseEnvInst.CreateServiceAccount(serviceAccountName) // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexSecretName())} + volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} queue.SQS.VolList = volumeSpec updateQueue.SQS.VolList = volumeSpec diff --git a/test/testenv/testcaseenv.go b/test/testenv/testcaseenv.go index a1081e0a0..737aaa9a6 100644 --- a/test/testenv/testcaseenv.go +++ b/test/testenv/testcaseenv.go @@ -35,24 +35,25 @@ import ( // TestCaseEnv represents a namespaced-isolated k8s cluster environment (aka virtual k8s cluster) to run test cases against type TestCaseEnv struct { - kubeClient client.Client - name string - namespace string - serviceAccountName string - roleName string - roleBindingName string - operatorName string - operatorImage string - splunkImage string - initialized bool - SkipTeardown bool - licenseFilePath string - licenseCMName string - s3IndexSecret string - Log logr.Logger - cleanupFuncs []cleanupFunc - debug string - clusterWideOperator string + kubeClient client.Client + name string + namespace string + serviceAccountName string + roleName string + roleBindingName string + operatorName string + operatorImage string + splunkImage string + initialized bool + SkipTeardown bool + licenseFilePath string + licenseCMName string + s3IndexSecret string + indexIngestSepSecret string + Log logr.Logger + cleanupFuncs []cleanupFunc + debug string + clusterWideOperator string } // GetKubeClient returns the kube client to talk to kube-apiserver @@ -79,21 +80,22 @@ func NewTestCaseEnv(kubeClient client.Client, name string, operatorImage string, } testenv := &TestCaseEnv{ - kubeClient: kubeClient, - name: name, - namespace: name, - serviceAccountName: name, - roleName: name, - roleBindingName: name, - operatorName: "splunk-op-" + name, - operatorImage: operatorImage, - splunkImage: splunkImage, - SkipTeardown: specifiedSkipTeardown, - licenseCMName: name, - licenseFilePath: licenseFilePath, - s3IndexSecret: "splunk-s3-index-" + name, - debug: os.Getenv("DEBUG"), - clusterWideOperator: installOperatorClusterWide, + kubeClient: kubeClient, + name: name, + namespace: name, + serviceAccountName: name, + roleName: name, + roleBindingName: name, + operatorName: "splunk-op-" + name, + operatorImage: operatorImage, + splunkImage: splunkImage, + SkipTeardown: specifiedSkipTeardown, + licenseCMName: name, + licenseFilePath: licenseFilePath, + s3IndexSecret: "splunk-s3-index-" + name, + indexIngestSepSecret: "splunk--index-ingest-sep-" + name, + debug: os.Getenv("DEBUG"), + clusterWideOperator: installOperatorClusterWide, } testenv.Log = logf.Log.WithValues("testcaseenv", testenv.name) @@ -156,6 +158,7 @@ func (testenv *TestCaseEnv) setup() error { switch ClusterProvider { case "eks": testenv.createIndexSecret() + testenv.createIndexIngestSepSecret() case "azure": testenv.createIndexSecretAzure() case "gcp": @@ -588,11 +591,41 @@ func (testenv *TestCaseEnv) createIndexSecretAzure() error { return nil } +// CreateIndexIngestSepSecret creates secret object +func (testenv *TestCaseEnv) createIndexIngestSepSecret() error { + secretName := testenv.indexIngestSepSecret + ns := testenv.namespace + + data := map[string][]byte{"s3_access_key": []byte(os.Getenv("AWS_INDEX_INGEST_SEP_ACCESS_KEY_ID")), + "s3_secret_key": []byte(os.Getenv("AWS_INDEX_INGEST_SEP_SECRET_ACCESS_KEY"))} + secret := newSecretSpec(ns, secretName, data) + + if err := testenv.GetKubeClient().Create(context.TODO(), secret); err != nil { + testenv.Log.Error(err, "Unable to create index and ingestion sep secret object") + return err + } + + testenv.pushCleanupFunc(func() error { + err := testenv.GetKubeClient().Delete(context.TODO(), secret) + if err != nil { + testenv.Log.Error(err, "Unable to delete index and ingestion sep secret object") + return err + } + return nil + }) + return nil +} + // GetIndexSecretName return index secret object name func (testenv *TestCaseEnv) GetIndexSecretName() string { return testenv.s3IndexSecret } +// GetIndexSecretName return index and ingestion separation secret object name +func (testenv *TestCaseEnv) GetIndexIngestSepSecretName() string { + return testenv.indexIngestSepSecret +} + // GetLMConfigMap Return name of license config map func (testenv *TestCaseEnv) GetLMConfigMap() string { return testenv.licenseCMName diff --git a/test/testenv/testenv.go b/test/testenv/testenv.go index f82310015..06fe304d4 100644 --- a/test/testenv/testenv.go +++ b/test/testenv/testenv.go @@ -160,24 +160,25 @@ type cleanupFunc func() error // TestEnv represents a namespaced-isolated k8s cluster environment (aka virtual k8s cluster) to run tests against type TestEnv struct { - kubeAPIServer string - name string - namespace string - serviceAccountName string - roleName string - roleBindingName string - operatorName string - operatorImage string - splunkImage string - initialized bool - SkipTeardown bool - licenseFilePath string - licenseCMName string - s3IndexSecret string - kubeClient client.Client - Log logr.Logger - cleanupFuncs []cleanupFunc - debug string + kubeAPIServer string + name string + namespace string + serviceAccountName string + roleName string + roleBindingName string + operatorName string + operatorImage string + splunkImage string + initialized bool + SkipTeardown bool + licenseFilePath string + licenseCMName string + s3IndexSecret string + indexIngestSepSecret string + kubeClient client.Client + Log logr.Logger + cleanupFuncs []cleanupFunc + debug string } func init() { @@ -231,19 +232,20 @@ func NewTestEnv(name, commitHash, operatorImage, splunkImage, licenseFilePath st } testenv := &TestEnv{ - name: envName, - namespace: envName, - serviceAccountName: envName, - roleName: envName, - roleBindingName: envName, - operatorName: "splunk-op-" + envName, - operatorImage: operatorImage, - splunkImage: splunkImage, - SkipTeardown: specifiedSkipTeardown, - licenseCMName: envName, - licenseFilePath: licenseFilePath, - s3IndexSecret: "splunk-s3-index-" + envName, - debug: os.Getenv("DEBUG"), + name: envName, + namespace: envName, + serviceAccountName: envName, + roleName: envName, + roleBindingName: envName, + operatorName: "splunk-op-" + envName, + operatorImage: operatorImage, + splunkImage: splunkImage, + SkipTeardown: specifiedSkipTeardown, + licenseCMName: envName, + licenseFilePath: licenseFilePath, + s3IndexSecret: "splunk-s3-index-" + envName, + indexIngestSepSecret: "splunk--index-ingest-sep-" + name, + debug: os.Getenv("DEBUG"), } testenv.Log = logf.Log.WithValues("testenv", testenv.name) From 532ca28f6a955a74d62360f8d196bf132eadca43 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Tue, 13 Jan 2026 11:36:36 +0100 Subject: [PATCH 19/25] CSPL-4360 Addressing comments --- api/v4/indexercluster_types.go | 2 + api/v4/ingestorcluster_types.go | 2 + api/v4/objectstorage_types.go | 2 + api/v4/queue_types.go | 2 + ...enterprise.splunk.com_indexerclusters.yaml | 8 + ...nterprise.splunk.com_ingestorclusters.yaml | 8 + .../enterprise.splunk.com_objectstorages.yaml | 4 + .../bases/enterprise.splunk.com_queues.yaml | 4 + pkg/splunk/client/enterprise.go | 19 -- pkg/splunk/client/enterprise_test.go | 32 ---- pkg/splunk/enterprise/indexercluster.go | 147 +++++--------- pkg/splunk/enterprise/indexercluster_test.go | 92 ++++----- pkg/splunk/enterprise/ingestorcluster.go | 139 +++++--------- pkg/splunk/enterprise/ingestorcluster_test.go | 94 ++++----- ...dex_and_ingestion_separation_suite_test.go | 30 --- .../index_and_ingestion_separation_test.go | 181 ------------------ 16 files changed, 213 insertions(+), 553 deletions(-) diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index e74f900a7..34eb0ba3e 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -40,10 +40,12 @@ type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` // +optional + // +kubebuilder:validation:Immutable // Queue reference QueueRef corev1.ObjectReference `json:"queueRef"` // +optional + // +kubebuilder:validation:Immutable // Object Storage reference ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"` diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index f2e061284..15dc47640 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -40,10 +40,12 @@ type IngestorClusterSpec struct { AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` // +kubebuilder:validation:Required + // +kubebuilder:validation:Immutable // Queue reference QueueRef corev1.ObjectReference `json:"queueRef"` // +kubebuilder:validation:Required + // +kubebuilder:validation:Immutable // Object Storage reference ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"` } diff --git a/api/v4/objectstorage_types.go b/api/v4/objectstorage_types.go index 08205743f..587738d20 100644 --- a/api/v4/objectstorage_types.go +++ b/api/v4/objectstorage_types.go @@ -28,6 +28,8 @@ const ( ObjectStoragePausedAnnotation = "objectstorage.enterprise.splunk.com/paused" ) +// +kubebuilder:validation:XValidation:rule="self.provider == oldSelf.provider",message="provider is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.s3 == oldSelf.s3",message="s3 is immutable once created" // +kubebuilder:validation:XValidation:rule="self.provider != 's3' || has(self.s3)",message="s3 must be provided when provider is s3" // ObjectStorageSpec defines the desired state of ObjectStorage type ObjectStorageSpec struct { diff --git a/api/v4/queue_types.go b/api/v4/queue_types.go index 4c3ff9861..d689a4acd 100644 --- a/api/v4/queue_types.go +++ b/api/v4/queue_types.go @@ -28,6 +28,8 @@ const ( QueuePausedAnnotation = "queue.enterprise.splunk.com/paused" ) +// +kubebuilder:validation:XValidation:rule="self.provider == oldSelf.provider",message="provider is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs == oldSelf.sqs",message="sqs is immutable once created" // +kubebuilder:validation:XValidation:rule="self.provider != 'sqs' || has(self.sqs)",message="sqs must be provided when provider is sqs" // QueueSpec defines the desired state of Queue type QueueSpec struct { diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index af672ce67..2d01798e3 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8410,6 +8410,10 @@ spec: - s3 type: object x-kubernetes-validations: + - message: provider is immutable once created + rule: self.provider == oldSelf.provider + - message: s3 is immutable once created + rule: self.s3 == oldSelf.s3 - message: s3 must be provided when provider is s3 rule: self.provider != 's3' || has(self.s3) peers: @@ -8523,6 +8527,10 @@ spec: - sqs type: object x-kubernetes-validations: + - message: provider is immutable once created + rule: self.provider == oldSelf.provider + - message: sqs is immutable once created + rule: self.sqs == oldSelf.sqs - message: sqs must be provided when provider is sqs rule: self.provider != 'sqs' || has(self.sqs) readyReplicas: diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 6ce4c8488..194fdac86 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -4621,6 +4621,10 @@ spec: - s3 type: object x-kubernetes-validations: + - message: provider is immutable once created + rule: self.provider == oldSelf.provider + - message: s3 is immutable once created + rule: self.s3 == oldSelf.s3 - message: s3 must be provided when provider is s3 rule: self.provider != 's3' || has(self.s3) phase: @@ -4704,6 +4708,10 @@ spec: - sqs type: object x-kubernetes-validations: + - message: provider is immutable once created + rule: self.provider == oldSelf.provider + - message: sqs is immutable once created + rule: self.sqs == oldSelf.sqs - message: sqs must be provided when provider is sqs rule: self.provider != 'sqs' || has(self.sqs) readyReplicas: diff --git a/config/crd/bases/enterprise.splunk.com_objectstorages.yaml b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml index c84474921..23d5b437b 100644 --- a/config/crd/bases/enterprise.splunk.com_objectstorages.yaml +++ b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml @@ -78,6 +78,10 @@ spec: - s3 type: object x-kubernetes-validations: + - message: provider is immutable once created + rule: self.provider == oldSelf.provider + - message: s3 is immutable once created + rule: self.s3 == oldSelf.s3 - message: s3 must be provided when provider is s3 rule: self.provider != 's3' || has(self.s3) status: diff --git a/config/crd/bases/enterprise.splunk.com_queues.yaml b/config/crd/bases/enterprise.splunk.com_queues.yaml index f4ed36a45..454d1700b 100644 --- a/config/crd/bases/enterprise.splunk.com_queues.yaml +++ b/config/crd/bases/enterprise.splunk.com_queues.yaml @@ -120,6 +120,10 @@ spec: - sqs type: object x-kubernetes-validations: + - message: provider is immutable once created + rule: self.provider == oldSelf.provider + - message: sqs is immutable once created + rule: self.sqs == oldSelf.sqs - message: sqs must be provided when provider is sqs rule: self.provider != 'sqs' || has(self.sqs) status: diff --git a/pkg/splunk/client/enterprise.go b/pkg/splunk/client/enterprise.go index 6eb4d2f87..e51688661 100644 --- a/pkg/splunk/client/enterprise.go +++ b/pkg/splunk/client/enterprise.go @@ -1015,22 +1015,3 @@ func (c *SplunkClient) UpdateConfFile(scopedLog logr.Logger, fileName, property } return err } - -// Deletes conf files properties -func (c *SplunkClient) DeleteConfFileProperty(scopedLog logr.Logger, fileName, property string) error { - endpoint := fmt.Sprintf("%s/servicesNS/nobody/system/configs/conf-%s/%s", c.ManagementURI, fileName, property) - - scopedLog.Info("Deleting conf file object", "fileName", fileName, "property", property) - request, err := http.NewRequest("DELETE", endpoint, nil) - if err != nil { - scopedLog.Error(err, "Failed to delete conf file object", "fileName", fileName, "property", property) - return err - } - - expectedStatus := []int{200, 201, 404} - err = c.Do(request, expectedStatus, nil) - if err != nil { - scopedLog.Error(err, fmt.Sprintf("Status not in %v for conf file object deletion", expectedStatus), "fileName", fileName, "property", property) - } - return err -} diff --git a/pkg/splunk/client/enterprise_test.go b/pkg/splunk/client/enterprise_test.go index 6b97c24d7..4934eedfc 100644 --- a/pkg/splunk/client/enterprise_test.go +++ b/pkg/splunk/client/enterprise_test.go @@ -705,35 +705,3 @@ func TestUpdateConfFile(t *testing.T) { t.Errorf("UpdateConfFile expected error on update, got nil") } } - -func TestDeleteConfFileProperty(t *testing.T) { - // Test successful deletion of conf property - property := "myproperty" - fileName := "outputs" - - reqLogger := log.FromContext(context.TODO()) - scopedLog := reqLogger.WithName("TestDeleteConfFileProperty") - - wantDeleteRequest, _ := http.NewRequest("DELETE", fmt.Sprintf("https://localhost:8089/servicesNS/nobody/system/configs/conf-outputs/%s", property), nil) - - mockSplunkClient := &spltest.MockHTTPClient{} - mockSplunkClient.AddHandler(wantDeleteRequest, 200, "", nil) - - c := NewSplunkClient("https://localhost:8089", "admin", "p@ssw0rd") - c.Client = mockSplunkClient - - err := c.DeleteConfFileProperty(scopedLog, fileName, property) - if err != nil { - t.Errorf("DeleteConfFileProperty err = %v", err) - } - mockSplunkClient.CheckRequests(t, "TestDeleteConfFileProperty") - - // Negative test: error on delete - mockSplunkClient = &spltest.MockHTTPClient{} - mockSplunkClient.AddHandler(wantDeleteRequest, 500, "", nil) - c.Client = mockSplunkClient - err = c.DeleteConfFileProperty(scopedLog, fileName, property) - if err == nil { - t.Errorf("DeleteConfFileProperty expected error on delete, got nil") - } -} diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 3808539cc..af981be2c 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -19,7 +19,6 @@ import ( "context" "errors" "fmt" - "reflect" "regexp" "sort" "strconv" @@ -260,12 +259,9 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller return result, err } } - - // Can not override original queue spec due to comparison in the later code - queueCopy := queue - if queueCopy.Spec.Provider == "sqs" { - if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + if queue.Spec.Provider == "sqs" { + if queue.Spec.SQS.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + queue.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) } } @@ -284,20 +280,17 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller return result, err } } - - // Can not override original object storage spec due to comparison in the later code - osCopy := os - if osCopy.Spec.Provider == "s3" { - if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + if os.Spec.Provider == "s3" { + if os.Spec.S3.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + os.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) } } // If queue is updated if cr.Spec.QueueRef.Name != "" { - if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil || !reflect.DeepEqual(*cr.Status.Queue, queue.Spec) || !reflect.DeepEqual(*cr.Status.ObjectStorage, os.Spec) { + if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) - err = mgr.handlePullQueueChange(ctx, cr, queueCopy, osCopy, client) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -593,12 +586,9 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, return result, err } } - - // Can not override original queue spec due to comparison in the later code - queueCopy := queue - if queueCopy.Spec.Provider == "sqs" { - if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + if queue.Spec.Provider == "sqs" { + if queue.Spec.SQS.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + queue.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) } } @@ -612,25 +602,21 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, err = client.Get(context.Background(), types.NamespacedName{ Name: cr.Spec.ObjectStorageRef.Name, Namespace: ns, - }, &queue) + }, &os) if err != nil { return result, err } } - - // Can not override original queue spec due to comparison in the later code - osCopy := os - if osCopy.Spec.Provider == "s3" { - if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + if os.Spec.Provider == "s3" { + if os.Spec.S3.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + os.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) } } - // If queue is updated if cr.Spec.QueueRef.Name != "" { - if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil || !reflect.DeepEqual(*cr.Status.Queue, queue.Spec) || !reflect.DeepEqual(*cr.Status.ObjectStorage, os.Spec) { + if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) - err = mgr.handlePullQueueChange(ctx, cr, queueCopy, osCopy, client) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -1317,10 +1303,10 @@ func getSiteName(ctx context.Context, c splcommon.ControllerClient, cr *enterpri var newSplunkClientForQueuePipeline = splclient.NewSplunkClient -// Checks if only PullQueue or Pipeline config changed, and updates the conf file if so -func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue enterpriseApi.Queue, os enterpriseApi.ObjectStorage, k8s rclient.Client) error { +// updateIndexerConfFiles checks if Queue or Pipeline inputs are created for the first time and updates the conf file if so +func (mgr *indexerClusterPodManager) updateIndexerConfFiles(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, k8s rclient.Client) error { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("handlePullQueueChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) + scopedLog := reqLogger.WithName("updateIndexerConfFiles").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) // Only update config for pods that exist readyReplicas := newCR.Status.ReadyReplicas @@ -1336,31 +1322,10 @@ func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, } splunkClient := newSplunkClientForQueuePipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) - newCrStatusQueue := newCR.Status.Queue - if newCrStatusQueue == nil { - newCrStatusQueue = &enterpriseApi.QueueSpec{} - } - newCrStatusObjectStorage := newCR.Status.ObjectStorage - if newCrStatusObjectStorage == nil { - newCrStatusObjectStorage = &enterpriseApi.ObjectStorageSpec{} - } - - afterDelete := false - if (queue.Spec.SQS.Name != "" && newCrStatusQueue.SQS.Name != "" && queue.Spec.SQS.Name != newCrStatusQueue.SQS.Name) || - (queue.Spec.Provider != "" && newCrStatusQueue.Provider != "" && queue.Spec.Provider != newCrStatusQueue.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCrStatusQueue.SQS.Name)); err != nil { - updateErr = err - } - if err := splunkClient.DeleteConfFileProperty(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", newCrStatusQueue.SQS.Name)); err != nil { - updateErr = err - } - afterDelete = true - } - // Secret reference s3AccessKey, s3SecretKey := "", "" - if queue.Spec.Provider == "sqs" && newCR.Spec.ServiceAccount == "" { - for _, vol := range queue.Spec.SQS.VolList { + if queue.Provider == "sqs" && newCR.Spec.ServiceAccount == "" { + for _, vol := range queue.SQS.VolList { if vol.SecretRef != "" { s3AccessKey, s3SecretKey, err = GetQueueRemoteVolumeSecrets(ctx, vol, k8s, newCR) if err != nil { @@ -1371,38 +1336,37 @@ func (mgr *indexerClusterPodManager) handlePullQueueChange(ctx context.Context, } } - queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCrStatusQueue, newCrStatusObjectStorage, afterDelete, s3AccessKey, s3SecretKey) + queueInputs, queueOutputs, pipelineInputs := getQueueAndPipelineInputsForIndexerConfFiles(queue, os, s3AccessKey, s3SecretKey) - for _, pbVal := range queueChangedFieldsOutputs { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { + for _, pbVal := range queueOutputs { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } - for _, pbVal := range queueChangedFieldsInputs { - if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { + for _, pbVal := range queueInputs { + if err := splunkClient.UpdateConfFile(scopedLog, "inputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{pbVal}); err != nil { updateErr = err } } - for _, field := range pipelineChangedFields { + for _, field := range pipelineInputs { if err := splunkClient.UpdateConfFile(scopedLog, "default-mode", field[0], [][]string{{field[1], field[2]}}); err != nil { updateErr = err } } } - // Do NOT restart Splunk return updateErr } -// getChangedQueueFieldsForIndexer returns a list of changed queue and pipeline fields for indexer pods -func getChangedQueueFieldsForIndexer(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueStatus *enterpriseApi.QueueSpec, osStatus *enterpriseApi.ObjectStorageSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields [][]string) { - // Push all queue fields - queueChangedFieldsInputs, queueChangedFieldsOutputs = pullQueueChanged(queueStatus, &queue.Spec, osStatus, &os.Spec, afterDelete, s3AccessKey, s3SecretKey) +// getQueueAndPipelineInputsForIndexerConfFiles returns a list of queue and pipeline inputs for indexer pods conf files +func getQueueAndPipelineInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, s3AccessKey, s3SecretKey string) (queueInputs, queueOutputs, pipelineInputs [][]string) { + // Queue Inputs + queueInputs, queueOutputs = getQueueAndObjectStorageInputsForIndexerConfFiles(queue, os, s3AccessKey, s3SecretKey) - // Always set all pipeline fields, not just changed ones - pipelineChangedFields = pipelineConfig(true) + // Pipeline inputs + pipelineInputs = getPipelineInputsForConfFile(true) return } @@ -1418,45 +1382,34 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { return strings.HasPrefix(previousVersion, "8") && strings.HasPrefix(currentVersion, "9") } -func pullQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS *enterpriseApi.ObjectStorageSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (inputs, outputs [][]string) { +// getQueueAndObjectStorageInputsForIndexerConfFiles returns a list of queue and object storage inputs for conf files +func getQueueAndObjectStorageInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, s3AccessKey, s3SecretKey string) (inputs, outputs [][]string) { queueProvider := "" - if newQueue.Provider == "sqs" { + if queue.Provider == "sqs" { queueProvider = "sqs_smartbus" } osProvider := "" - if newOS.Provider == "s3" { + if os.Provider == "s3" { osProvider = "sqs_smartbus" } - if oldQueue.Provider != newQueue.Provider || afterDelete { - inputs = append(inputs, []string{"remote_queue.type", queueProvider}) - } - if !reflect.DeepEqual(oldQueue.SQS.VolList, newQueue.SQS.VolList) || afterDelete { - if s3AccessKey != "" && s3SecretKey != "" { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), s3AccessKey}) - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), s3SecretKey}) - } - } - if oldQueue.SQS.AuthRegion != newQueue.SQS.AuthRegion || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.AuthRegion}) - } - if newQueue.SQS.Endpoint != "" && (oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete) { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) - } - if newOS.S3.Endpoint != "" && (oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete) { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), newOS.S3.Endpoint}) - } - if oldOS.S3.Path != newOS.S3.Path || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), newOS.S3.Path}) - } - if oldQueue.SQS.DLQ != newQueue.SQS.DLQ || afterDelete { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), newQueue.SQS.DLQ}) - } inputs = append(inputs, + []string{"remote_queue.type", queueProvider}, + []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), queue.SQS.AuthRegion}, + []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), queue.SQS.Endpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), os.S3.Endpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), os.S3.Path}, + []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), queue.SQS.DLQ}, []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"}, []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"}, ) + // TODO: Handle credentials change + if s3AccessKey != "" && s3SecretKey != "" { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), s3AccessKey}) + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), s3SecretKey}) + } + outputs = inputs outputs = append(outputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}, diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 2b4026ac5..9d1bf0118 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2046,10 +2046,10 @@ func TestImageUpdatedTo9(t *testing.T) { } } -func TestGetChangedQueueFieldsForIndexer(t *testing.T) { +func TestGetQueueAndPipelineInputsForIndexerConfFiles(t *testing.T) { provider := "sqs_smartbus" - queue := enterpriseApi.Queue{ + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", @@ -2071,7 +2071,7 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { }, } - os := enterpriseApi.ObjectStorage{ + os := &enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", @@ -2088,29 +2088,13 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { }, } - newCR := &enterpriseApi.IndexerCluster{ - Spec: enterpriseApi.IndexerClusterSpec{ - QueueRef: corev1.ObjectReference{ - Name: queue.Name, - }, - ObjectStorageRef: corev1.ObjectReference{ - Name: os.Name, - }, - }, - Status: enterpriseApi.IndexerClusterStatus{ - Queue: &enterpriseApi.QueueSpec{}, - ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, - }, - } - key := "key" secret := "secret" - queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getChangedQueueFieldsForIndexer(&queue, &os, newCR.Status.Queue, newCR.Status.ObjectStorage, false, key, secret) + + queueChangedFieldsInputs, queueChangedFieldsOutputs, pipelineChangedFields := getQueueAndPipelineInputsForIndexerConfFiles(&queue.Spec, &os.Spec, key, secret) assert.Equal(t, 10, len(queueChangedFieldsInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, - {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, @@ -2118,13 +2102,13 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, }, queueChangedFieldsInputs) assert.Equal(t, 12, len(queueChangedFieldsOutputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, - {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, @@ -2132,6 +2116,8 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, }, queueChangedFieldsOutputs) @@ -2146,11 +2132,14 @@ func TestGetChangedQueueFieldsForIndexer(t *testing.T) { }, pipelineChangedFields) } -func TestHandlePullQueueChange(t *testing.T) { +func TestUpdateIndexerConfFiles(t *testing.T) { + c := spltest.NewMockClient() + ctx := context.TODO() + // Object definitions provider := "sqs_smartbus" - queue := enterpriseApi.Queue{ + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", @@ -2169,6 +2158,7 @@ func TestHandlePullQueueChange(t *testing.T) { }, }, } + c.Create(ctx, queue) os := enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ @@ -2187,8 +2177,9 @@ func TestHandlePullQueueChange(t *testing.T) { }, }, } + c.Create(ctx, &os) - newCR := &enterpriseApi.IndexerCluster{ + cr := &enterpriseApi.IndexerCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IndexerCluster", }, @@ -2211,6 +2202,7 @@ func TestHandlePullQueueChange(t *testing.T) { ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, }, } + c.Create(ctx, cr) pod0 := &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -2252,6 +2244,10 @@ func TestHandlePullQueueChange(t *testing.T) { pod2 := pod0.DeepCopy() pod2.ObjectMeta.Name = "splunk-test-indexer-2" + c.Create(ctx, pod0) + c.Create(ctx, pod1) + c.Create(ctx, pod2) + secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "test-secrets", @@ -2262,19 +2258,9 @@ func TestHandlePullQueueChange(t *testing.T) { }, } - // Mock pods - c := spltest.NewMockClient() - ctx := context.TODO() - c.Create(ctx, &queue) - c.Create(ctx, &os) - c.Create(ctx, newCR) - c.Create(ctx, pod0) - c.Create(ctx, pod1) - c.Create(ctx, pod2) - // Negative test case: secret not found mgr := &indexerClusterPodManager{} - err := mgr.handlePullQueueChange(ctx, newCR, queue, os, c) + err := mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.NotNil(t, err) // Mock secret @@ -2283,9 +2269,9 @@ func TestHandlePullQueueChange(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} // Negative test case: failure in creating remote queue stanza - mgr = newTestPullQueuePipelineManager(mockHTTPClient) + mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.NotNil(t, err) // outputs.conf @@ -2304,22 +2290,22 @@ func TestHandlePullQueueChange(t *testing.T) { propertyKVListOutputs = append(propertyKVListOutputs, []string{fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}) body := buildFormBody(propertyKVListOutputs) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, queue, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, cr, &queue.Spec, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza - mgr = newTestPullQueuePipelineManager(mockHTTPClient) + mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.NotNil(t, err) // inputs.conf body = buildFormBody(propertyKVList) - addRemoteQueueHandlersForIndexer(mockHTTPClient, newCR, queue, newCR.Status.ReadyReplicas, "conf-inputs", body) + addRemoteQueueHandlersForIndexer(mockHTTPClient, cr, &queue.Spec, "conf-inputs", body) // Negative test case: failure in updating remote queue stanza - mgr = newTestPullQueuePipelineManager(mockHTTPClient) + mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.NotNil(t, err) // default-mode.conf @@ -2331,7 +2317,7 @@ func TestHandlePullQueueChange(t *testing.T) { {"pipeline:typing", "disabled", "true"}, } - for i := 0; i < int(newCR.Status.ReadyReplicas); i++ { + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { podName := fmt.Sprintf("splunk-test-indexer-%d", i) baseURL := fmt.Sprintf("https://%s.splunk-test-indexer-headless.test.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName) @@ -2345,9 +2331,9 @@ func TestHandlePullQueueChange(t *testing.T) { } } - mgr = newTestPullQueuePipelineManager(mockHTTPClient) + mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.handlePullQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.Nil(t, err) } @@ -2365,25 +2351,25 @@ func buildFormBody(pairs [][]string) string { return b.String() } -func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, queue enterpriseApi.Queue, replicas int32, confName, body string) { - for i := 0; i < int(replicas); i++ { +func addRemoteQueueHandlersForIndexer(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IndexerCluster, queue *enterpriseApi.QueueSpec, confName, body string) { + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { podName := fmt.Sprintf("splunk-%s-indexer-%d", cr.GetName(), i) baseURL := fmt.Sprintf( "https://%s.splunk-%s-indexer-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/%s", podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } } -func newTestPullQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *indexerClusterPodManager { +func newTestIndexerQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *indexerClusterPodManager { newSplunkClientForQueuePipeline = func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ ManagementURI: uri, diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 78a51ede2..55f0e7d35 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -225,12 +225,9 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr return result, err } } - - // Can not override original queue spec due to comparison in the later code - queueCopy := queue - if queueCopy.Spec.Provider == "sqs" { - if queueCopy.Spec.SQS.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - queueCopy.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queueCopy.Spec.SQS.AuthRegion) + if queue.Spec.Provider == "sqs" { + if queue.Spec.SQS.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + queue.Spec.SQS.Endpoint = fmt.Sprintf("https://sqs.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) } } @@ -249,19 +246,16 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr return result, err } } - - // Can not override original queue spec due to comparison in the later code - osCopy := os - if osCopy.Spec.Provider == "s3" { - if osCopy.Spec.S3.Endpoint == "" && queueCopy.Spec.SQS.AuthRegion != "" { - osCopy.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) + if os.Spec.Provider == "s3" { + if os.Spec.S3.Endpoint == "" && queue.Spec.SQS.AuthRegion != "" { + os.Spec.S3.Endpoint = fmt.Sprintf("https://s3.%s.amazonaws.com", queue.Spec.SQS.AuthRegion) } } // If queue is updated - if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil || !reflect.DeepEqual(*cr.Status.Queue, queue.Spec) || !reflect.DeepEqual(*cr.Status.ObjectStorage, os.Spec) { + if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) - err = mgr.handlePushQueueChange(ctx, cr, queueCopy, osCopy, client) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -344,7 +338,7 @@ func (mgr *ingestorClusterPodManager) getClient(ctx context.Context, n int32) *s // validateIngestorClusterSpec checks validity and makes default updates to a IngestorClusterSpec and returns error if something is wrong func validateIngestorClusterSpec(ctx context.Context, c splcommon.ControllerClient, cr *enterpriseApi.IngestorCluster) error { - // We cannot have 0 replicas in IngestorCluster spec since this refers to number of ingestion pods in an ingestor cluster + // We cannot have 0 replicas in IngestorCluster spec since this refers to number of ingestion pods in the ingestor cluster if cr.Spec.Replicas < 3 { cr.Spec.Replicas = 3 } @@ -372,10 +366,10 @@ func getIngestorStatefulSet(ctx context.Context, client splcommon.ControllerClie return ss, nil } -// Checks if only Queue or Pipeline config changed, and updates the conf file if so -func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue enterpriseApi.Queue, os enterpriseApi.ObjectStorage, k8s client.Client) error { +// updateIngestorConfFiles checks if Queue or Pipeline inputs are created for the first time and updates the conf file if so +func (mgr *ingestorClusterPodManager) updateIngestorConfFiles(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, k8s client.Client) error { reqLogger := log.FromContext(ctx) - scopedLog := reqLogger.WithName("handlePushQueueChange").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) + scopedLog := reqLogger.WithName("updateIngestorConfFiles").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) // Only update config for pods that exist readyReplicas := newCR.Status.Replicas @@ -391,28 +385,10 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, } splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) - newCrStatusQueue := newCR.Status.Queue - if newCrStatusQueue == nil { - newCrStatusQueue = &enterpriseApi.QueueSpec{} - } - newCrStatusObjectStorage := newCR.Status.ObjectStorage - if newCrStatusObjectStorage == nil { - newCrStatusObjectStorage = &enterpriseApi.ObjectStorageSpec{} - } - - afterDelete := false - if (queue.Spec.SQS.Name != "" && newCrStatusQueue.SQS.Name != "" && queue.Spec.SQS.Name != newCrStatusQueue.SQS.Name) || - (queue.Spec.Provider != "" && newCrStatusQueue.Provider != "" && queue.Spec.Provider != newCrStatusQueue.Provider) { - if err := splunkClient.DeleteConfFileProperty(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", newCrStatusQueue.SQS.Name)); err != nil { - updateErr = err - } - afterDelete = true - } - // Secret reference s3AccessKey, s3SecretKey := "", "" - if queue.Spec.Provider == "sqs" && newCR.Spec.ServiceAccount == "" { - for _, vol := range queue.Spec.SQS.VolList { + if queue.Provider == "sqs" && newCR.Spec.ServiceAccount == "" { + for _, vol := range queue.SQS.VolList { if vol.SecretRef != "" { s3AccessKey, s3SecretKey, err = GetQueueRemoteVolumeSecrets(ctx, vol, k8s, newCR) if err != nil { @@ -423,32 +399,31 @@ func (mgr *ingestorClusterPodManager) handlePushQueueChange(ctx context.Context, } } - queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCrStatusQueue, newCrStatusObjectStorage, afterDelete, s3AccessKey, s3SecretKey) + queueInputs, pipelineInputs := getQueueAndPipelineInputsForIngestorConfFiles(queue, os, s3AccessKey, s3SecretKey) - for _, pbVal := range queueChangedFields { - if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name), [][]string{pbVal}); err != nil { + for _, input := range queueInputs { + if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{input}); err != nil { updateErr = err } } - for _, field := range pipelineChangedFields { - if err := splunkClient.UpdateConfFile(scopedLog, "default-mode", field[0], [][]string{{field[1], field[2]}}); err != nil { + for _, input := range pipelineInputs { + if err := splunkClient.UpdateConfFile(scopedLog, "default-mode", input[0], [][]string{{input[1], input[2]}}); err != nil { updateErr = err } } } - // Do NOT restart Splunk return updateErr } -// getChangedQueueFieldsForIngestor returns a list of changed queue and pipeline fields for ingestor pods -func getChangedQueueFieldsForIngestor(queue *enterpriseApi.Queue, os *enterpriseApi.ObjectStorage, queueStatus *enterpriseApi.QueueSpec, osStatus *enterpriseApi.ObjectStorageSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (queueChangedFields, pipelineChangedFields [][]string) { - // Push changed queue fields - queueChangedFields = pushQueueChanged(queueStatus, &queue.Spec, osStatus, &os.Spec, afterDelete, s3AccessKey, s3SecretKey) +// getQueueAndPipelineInputsForIngestorConfFiles returns a list of queue and pipeline inputs for ingestor pods conf files +func getQueueAndPipelineInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, s3AccessKey, s3SecretKey string) (queueInputs, pipelineInputs [][]string) { + // Queue Inputs + queueInputs = getQueueAndObjectStorageInputsForIngestorConfFiles(queue, os, s3AccessKey, s3SecretKey) - // Always changed pipeline fields - pipelineChangedFields = pipelineConfig(false) + // Pipeline inputs + pipelineInputs = getPipelineInputsForConfFile(false) return } @@ -461,7 +436,7 @@ type ingestorClusterPodManager struct { newSplunkClient func(managementURI, username, password string) *splclient.SplunkClient } -// newIngestorClusterPodManager function to create pod manager this is added to write unit test case +// newIngestorClusterPodManager creates pod manager to handle unit test cases var newIngestorClusterPodManager = func(log logr.Logger, cr *enterpriseApi.IngestorCluster, secret *corev1.Secret, newSplunkClient NewSplunkClientFunc, c splcommon.ControllerClient) ingestorClusterPodManager { return ingestorClusterPodManager{ log: log, @@ -472,8 +447,9 @@ var newIngestorClusterPodManager = func(log logr.Logger, cr *enterpriseApi.Inges } } -func pipelineConfig(isIndexer bool) (output [][]string) { - output = append(output, +// getPipelineInputsForConfFile returns a list of pipeline inputs for conf file +func getPipelineInputsForConfFile(isIndexer bool) (config [][]string) { + config = append(config, []string{"pipeline:remotequeueruleset", "disabled", "false"}, []string{"pipeline:ruleset", "disabled", "true"}, []string{"pipeline:remotequeuetyping", "disabled", "false"}, @@ -481,51 +457,40 @@ func pipelineConfig(isIndexer bool) (output [][]string) { []string{"pipeline:typing", "disabled", "true"}, ) if !isIndexer { - output = append(output, []string{"pipeline:indexerPipe", "disabled", "true"}) + config = append(config, []string{"pipeline:indexerPipe", "disabled", "true"}) } - return output + + return } -func pushQueueChanged(oldQueue, newQueue *enterpriseApi.QueueSpec, oldOS, newOS *enterpriseApi.ObjectStorageSpec, afterDelete bool, s3AccessKey, s3SecretKey string) (output [][]string) { +// getQueueAndObjectStorageInputsForConfFiles returns a list of queue and object storage inputs for conf files +func getQueueAndObjectStorageInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, s3AccessKey, s3SecretKey string) (config [][]string) { queueProvider := "" - if newQueue.Provider == "sqs" { + if queue.Provider == "sqs" { queueProvider = "sqs_smartbus" } osProvider := "" - if newOS.Provider == "s3" { + if os.Provider == "s3" { osProvider = "sqs_smartbus" } - - if oldQueue.Provider != newQueue.Provider || afterDelete { - output = append(output, []string{"remote_queue.type", queueProvider}) - } - if !reflect.DeepEqual(oldQueue.SQS.VolList, newQueue.SQS.VolList) || afterDelete { - if s3AccessKey != "" && s3SecretKey != "" { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), s3AccessKey}) - output = append(output, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), s3SecretKey}) - } - } - if oldQueue.SQS.AuthRegion != newQueue.SQS.AuthRegion || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), newQueue.SQS.AuthRegion}) - } - if newQueue.SQS.Endpoint != "" && (oldQueue.SQS.Endpoint != newQueue.SQS.Endpoint || afterDelete) { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), newQueue.SQS.Endpoint}) - } - if newOS.S3.Endpoint != "" && (oldOS.S3.Endpoint != newOS.S3.Endpoint || afterDelete) { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), newOS.S3.Endpoint}) - } - if oldOS.S3.Path != newOS.S3.Path || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), newOS.S3.Path}) - } - if oldQueue.SQS.DLQ != newQueue.SQS.DLQ || afterDelete { - output = append(output, []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), newQueue.SQS.DLQ}) - } - - output = append(output, + config = append(config, + []string{"remote_queue.type", queueProvider}, + []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), queue.SQS.AuthRegion}, + []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), queue.SQS.Endpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), os.S3.Endpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), os.S3.Path}, + []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), queue.SQS.DLQ}, []string{fmt.Sprintf("remote_queue.%s.encoding_format", queueProvider), "s2s"}, []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"}, []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"}, - []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}) + []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}, + ) - return output + // TODO: Handle credentials change + if s3AccessKey != "" && s3SecretKey != "" { + config = append(config, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), s3AccessKey}) + config = append(config, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), s3SecretKey}) + } + + return } diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index 995e52ff8..e79bbaa94 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -86,7 +86,7 @@ func TestApplyIngestorCluster(t *testing.T) { } c.Create(ctx, queue) - os := enterpriseApi.ObjectStorage{ + os := &enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", @@ -103,7 +103,7 @@ func TestApplyIngestorCluster(t *testing.T) { }, }, } - c.Create(ctx, &os) + c.Create(ctx, os) cr := &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ @@ -276,7 +276,7 @@ func TestApplyIngestorCluster(t *testing.T) { } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, queue, cr.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, &queue.Spec, "conf-outputs", body) // default-mode.conf propertyKVList = [][]string{ @@ -403,7 +403,7 @@ func TestGetIngestorStatefulSet(t *testing.T) { test(`{"kind":"StatefulSet","apiVersion":"apps/v1","metadata":{"name":"splunk-test-ingestor","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"ownerReferences":[{"apiVersion":"","kind":"IngestorCluster","name":"test","uid":"","controller":true}]},"spec":{"replicas":3,"selector":{"matchLabels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"},"annotations":{"traffic.sidecar.istio.io/excludeOutboundPorts":"8089,8191,9997","traffic.sidecar.istio.io/includeInboundPorts":"8000,8088"}},"spec":{"volumes":[{"name":"splunk-test-probe-configmap","configMap":{"name":"splunk-test-probe-configmap","defaultMode":365}},{"name":"mnt-splunk-secrets","secret":{"secretName":"splunk-test-ingestor-secret-v1","defaultMode":420}}],"containers":[{"name":"splunk","image":"splunk/splunk","ports":[{"name":"http-splunkweb","containerPort":8000,"protocol":"TCP"},{"name":"http-hec","containerPort":8088,"protocol":"TCP"},{"name":"https-splunkd","containerPort":8089,"protocol":"TCP"},{"name":"tcp-s2s","containerPort":9997,"protocol":"TCP"},{"name":"user-defined","containerPort":32000,"protocol":"UDP"}],"env":[{"name":"TEST_ENV_VAR","value":"test_value"},{"name":"SPLUNK_HOME","value":"/opt/splunk"},{"name":"SPLUNK_START_ARGS","value":"--accept-license"},{"name":"SPLUNK_DEFAULTS_URL","value":"/mnt/splunk-secrets/default.yml"},{"name":"SPLUNK_HOME_OWNERSHIP_ENFORCEMENT","value":"false"},{"name":"SPLUNK_ROLE","value":"splunk_standalone"},{"name":"SPLUNK_DECLARATIVE_ADMIN_PASSWORD","value":"true"},{"name":"SPLUNK_OPERATOR_K8_LIVENESS_DRIVER_FILE_PATH","value":"/tmp/splunk_operator_k8s/probes/k8_liveness_driver.sh"},{"name":"SPLUNK_GENERAL_TERMS","value":"--accept-sgt-current-at-splunk-com"},{"name":"SPLUNK_SKIP_CLUSTER_BUNDLE_PUSH","value":"true"}],"resources":{"limits":{"cpu":"4","memory":"8Gi"},"requests":{"cpu":"100m","memory":"512Mi"}},"volumeMounts":[{"name":"pvc-etc","mountPath":"/opt/splunk/etc"},{"name":"pvc-var","mountPath":"/opt/splunk/var"},{"name":"splunk-test-probe-configmap","mountPath":"/mnt/probes"},{"name":"mnt-splunk-secrets","mountPath":"/mnt/splunk-secrets"}],"livenessProbe":{"exec":{"command":["/mnt/probes/livenessProbe.sh"]},"initialDelaySeconds":30,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":3},"readinessProbe":{"exec":{"command":["/mnt/probes/readinessProbe.sh"]},"initialDelaySeconds":10,"timeoutSeconds":5,"periodSeconds":5,"failureThreshold":3},"startupProbe":{"exec":{"command":["/mnt/probes/startupProbe.sh"]},"initialDelaySeconds":40,"timeoutSeconds":30,"periodSeconds":30,"failureThreshold":12},"imagePullPolicy":"IfNotPresent","securityContext":{"capabilities":{"add":["NET_BIND_SERVICE"],"drop":["ALL"]},"privileged":false,"runAsUser":41812,"runAsNonRoot":true,"allowPrivilegeEscalation":false,"seccompProfile":{"type":"RuntimeDefault"}}}],"serviceAccountName":"defaults","securityContext":{"runAsUser":41812,"runAsNonRoot":true,"fsGroup":41812,"fsGroupChangePolicy":"OnRootMismatch"},"affinity":{"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"weight":100,"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app.kubernetes.io/instance","operator":"In","values":["splunk-test-ingestor"]}]},"topologyKey":"kubernetes.io/hostname"}}]}},"schedulerName":"default-scheduler"}},"volumeClaimTemplates":[{"metadata":{"name":"pvc-etc","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"10Gi"}}},"status":{}},{"metadata":{"name":"pvc-var","namespace":"test","creationTimestamp":null,"labels":{"app.kubernetes.io/component":"ingestor","app.kubernetes.io/instance":"splunk-test-ingestor","app.kubernetes.io/managed-by":"splunk-operator","app.kubernetes.io/name":"ingestor","app.kubernetes.io/part-of":"splunk-test-ingestor","app.kubernetes.io/test-extra-label":"test-extra-label-value"}},"spec":{"accessModes":["ReadWriteOnce"],"resources":{"requests":{"storage":"100Gi"}}},"status":{}}],"serviceName":"splunk-test-ingestor-headless","podManagementPolicy":"Parallel","updateStrategy":{"type":"OnDelete"}},"status":{"replicas":0,"availableReplicas":0}}`) } -func TestGetChangedQueueFieldsForIngestor(t *testing.T) { +func TestGetQueueAndPipelineInputsForIngestorConfFiles(t *testing.T) { provider := "sqs_smartbus" queue := enterpriseApi.Queue{ @@ -445,30 +445,14 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { }, } - newCR := &enterpriseApi.IngestorCluster{ - Spec: enterpriseApi.IngestorClusterSpec{ - QueueRef: corev1.ObjectReference{ - Name: queue.Name, - }, - ObjectStorageRef: corev1.ObjectReference{ - Name: os.Name, - }, - }, - Status: enterpriseApi.IngestorClusterStatus{ - Queue: &enterpriseApi.QueueSpec{}, - ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, - }, - } - key := "key" secret := "secret" - queueChangedFields, pipelineChangedFields := getChangedQueueFieldsForIngestor(&queue, &os, newCR.Status.Queue, newCR.Status.ObjectStorage, false, key, secret) - assert.Equal(t, 12, len(queueChangedFields)) + queueInputs, pipelineInputs := getQueueAndPipelineInputsForIngestorConfFiles(&queue.Spec, &os.Spec, key, secret) + + assert.Equal(t, 12, len(queueInputs)) assert.Equal(t, [][]string{ {"remote_queue.type", provider}, - {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, - {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, @@ -478,9 +462,11 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, {fmt.Sprintf("remote_queue.%s.send_interval", provider), "5s"}, - }, queueChangedFields) + {fmt.Sprintf("remote_queue.%s.access_key", provider), key}, + {fmt.Sprintf("remote_queue.%s.secret_key", provider), secret}, + }, queueInputs) - assert.Equal(t, 6, len(pipelineChangedFields)) + assert.Equal(t, 6, len(pipelineInputs)) assert.Equal(t, [][]string{ {"pipeline:remotequeueruleset", "disabled", "false"}, {"pipeline:ruleset", "disabled", "true"}, @@ -488,14 +474,17 @@ func TestGetChangedQueueFieldsForIngestor(t *testing.T) { {"pipeline:remotequeueoutput", "disabled", "false"}, {"pipeline:typing", "disabled", "true"}, {"pipeline:indexerPipe", "disabled", "true"}, - }, pipelineChangedFields) + }, pipelineInputs) } -func TestHandlePushQueueChange(t *testing.T) { +func TestUpdateIngestorConfFiles(t *testing.T) { + c := spltest.NewMockClient() + ctx := context.TODO() + // Object definitions provider := "sqs_smartbus" - queue := enterpriseApi.Queue{ + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ Kind: "Queue", APIVersion: "enterprise.splunk.com/v4", @@ -514,7 +503,7 @@ func TestHandlePushQueueChange(t *testing.T) { }, } - os := enterpriseApi.ObjectStorage{ + os := &enterpriseApi.ObjectStorage{ TypeMeta: metav1.TypeMeta{ Kind: "ObjectStorage", APIVersion: "enterprise.splunk.com/v4", @@ -531,7 +520,7 @@ func TestHandlePushQueueChange(t *testing.T) { }, } - newCR := &enterpriseApi.IngestorCluster{ + cr := &enterpriseApi.IngestorCluster{ TypeMeta: metav1.TypeMeta{ Kind: "IngestorCluster", }, @@ -595,6 +584,10 @@ func TestHandlePushQueueChange(t *testing.T) { pod2 := pod0.DeepCopy() pod2.ObjectMeta.Name = "splunk-test-ingestor-2" + c.Create(ctx, pod0) + c.Create(ctx, pod1) + c.Create(ctx, pod2) + secret := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "test-secrets", @@ -605,17 +598,10 @@ func TestHandlePushQueueChange(t *testing.T) { }, } - // Mock pods - c := spltest.NewMockClient() - ctx := context.TODO() - c.Create(ctx, pod0) - c.Create(ctx, pod1) - c.Create(ctx, pod2) - // Negative test case: secret not found mgr := &ingestorClusterPodManager{} - err := mgr.handlePushQueueChange(ctx, newCR, queue, os, c) + err := mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.NotNil(t, err) // Mock secret @@ -624,9 +610,9 @@ func TestHandlePushQueueChange(t *testing.T) { mockHTTPClient := &spltest.MockHTTPClient{} // Negative test case: failure in creating remote queue stanza - mgr = newTestPushQueuePipelineManager(mockHTTPClient) + mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.NotNil(t, err) // outputs.conf @@ -643,12 +629,12 @@ func TestHandlePushQueueChange(t *testing.T) { } body := buildFormBody(propertyKVList) - addRemoteQueueHandlersForIngestor(mockHTTPClient, newCR, &queue, newCR.Status.ReadyReplicas, "conf-outputs", body) + addRemoteQueueHandlersForIngestor(mockHTTPClient, cr, &queue.Spec, "conf-outputs", body) // Negative test case: failure in creating remote queue stanza - mgr = newTestPushQueuePipelineManager(mockHTTPClient) + mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.NotNil(t, err) // default-mode.conf @@ -661,9 +647,9 @@ func TestHandlePushQueueChange(t *testing.T) { {"pipeline:indexerPipe", "disabled", "true"}, } - for i := 0; i < int(newCR.Status.ReadyReplicas); i++ { + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { podName := fmt.Sprintf("splunk-test-ingestor-%d", i) - baseURL := fmt.Sprintf("https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName, newCR.GetName(), newCR.GetNamespace()) + baseURL := fmt.Sprintf("https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/conf-default-mode", podName, cr.GetName(), cr.GetNamespace()) for _, field := range propertyKVList { req, _ := http.NewRequest("POST", baseURL, strings.NewReader(fmt.Sprintf("name=%s", field[0]))) @@ -675,32 +661,32 @@ func TestHandlePushQueueChange(t *testing.T) { } } - mgr = newTestPushQueuePipelineManager(mockHTTPClient) + mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.handlePushQueueChange(ctx, newCR, queue, os, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) assert.Nil(t, err) } -func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, queue *enterpriseApi.Queue, replicas int32, confName, body string) { - for i := 0; i < int(replicas); i++ { +func addRemoteQueueHandlersForIngestor(mockHTTPClient *spltest.MockHTTPClient, cr *enterpriseApi.IngestorCluster, queue *enterpriseApi.QueueSpec, confName, body string) { + for i := 0; i < int(cr.Status.ReadyReplicas); i++ { podName := fmt.Sprintf("splunk-%s-ingestor-%d", cr.GetName(), i) baseURL := fmt.Sprintf( "https://%s.splunk-%s-ingestor-headless.%s.svc.cluster.local:8089/servicesNS/nobody/system/configs/%s", podName, cr.GetName(), cr.GetNamespace(), confName, ) - createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) + createReqBody := fmt.Sprintf("name=%s", fmt.Sprintf("remote_queue:%s", queue.SQS.Name)) reqCreate, _ := http.NewRequest("POST", baseURL, strings.NewReader(createReqBody)) mockHTTPClient.AddHandler(reqCreate, 200, "", nil) - updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.Spec.SQS.Name)) + updateURL := fmt.Sprintf("%s/%s", baseURL, fmt.Sprintf("remote_queue:%s", queue.SQS.Name)) reqUpdate, _ := http.NewRequest("POST", updateURL, strings.NewReader(body)) mockHTTPClient.AddHandler(reqUpdate, 200, "", nil) } } -func newTestPushQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *ingestorClusterPodManager { - newSplunkClientForPushQueuePipeline := func(uri, user, pass string) *splclient.SplunkClient { +func newTestIngestorQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *ingestorClusterPodManager { + newSplunkClientForQueuePipeline := func(uri, user, pass string) *splclient.SplunkClient { return &splclient.SplunkClient{ ManagementURI: uri, Username: user, @@ -709,6 +695,6 @@ func newTestPushQueuePipelineManager(mockHTTPClient *spltest.MockHTTPClient) *in } } return &ingestorClusterPodManager{ - newSplunkClient: newSplunkClientForPushQueuePipeline, + newSplunkClient: newSplunkClientForQueuePipeline, } } diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index 8aac52220..3e18b669c 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -85,36 +85,6 @@ var ( "AWS_STS_REGIONAL_ENDPOINTS=regional", } - updateQueue = enterpriseApi.QueueSpec{ - Provider: "sqs", - SQS: enterpriseApi.SQSSpec{ - Name: "index-ingest-separation-test-q-updated", - AuthRegion: "us-west-2", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - DLQ: "index-ingest-separation-test-dlq-updated", - }, - } - - updatedInputs = []string{ - "[remote_queue:index-ingest-separation-test-q-updated]", - "remote_queue.type = sqs_smartbus", - "remote_queue.sqs_smartbus.auth_region = us-west-2", - "remote_queue.sqs_smartbus.dead_letter_queue.name = index-ingest-separation-test-dlq-updated", - "remote_queue.sqs_smartbus.endpoint = https://sqs.us-west-2.amazonaws.com", - "remote_queue.sqs_smartbus.large_message_store.endpoint = https://s3.us-west-2.amazonaws.com", - "remote_queue.sqs_smartbus.large_message_store.path = s3://index-ingest-separation-test-bucket/smartbus-test", - "remote_queue.sqs_smartbus.retry_policy = max", - "remote_queue.max.sqs_smartbus.max_retries_per_part = 5"} - updatedOutputs = append(updatedInputs, "remote_queue.sqs_smartbus.encoding_format = s2s", "remote_queue.sqs_smartbus.send_interval = 4s") - updatedDefaultsAll = []string{ - "[pipeline:remotequeueruleset]\ndisabled = false", - "[pipeline:ruleset]\ndisabled = false", - "[pipeline:remotequeuetyping]\ndisabled = false", - "[pipeline:remotequeueoutput]\ndisabled = false", - "[pipeline:typing]\ndisabled = true", - } - updatedDefaultsIngest = append(updatedDefaultsAll, "[pipeline:indexerPipe]\ndisabled = true") - inputsShouldNotContain = []string{ "[remote_queue:index-ingest-separation-test-q]", "remote_queue.sqs_smartbus.dead_letter_queue.name = index-ingest-separation-test-dlq", diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 6fe07597a..4314124cc 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -83,7 +83,6 @@ var _ = Describe("indingsep test", func() { // Secret reference volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} queue.SQS.VolList = volumeSpec - updateQueue.SQS.VolList = volumeSpec // Deploy Queue testcaseEnvInst.Log.Info("Deploy Queue") @@ -162,7 +161,6 @@ var _ = Describe("indingsep test", func() { // Secret reference volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} queue.SQS.VolList = volumeSpec - updateQueue.SQS.VolList = volumeSpec // Deploy Queue testcaseEnvInst.Log.Info("Deploy Queue") @@ -374,183 +372,4 @@ var _ = Describe("indingsep test", func() { } }) }) - - Context("Ingestor and Indexer deployment", func() { - It("indingsep, integration, indingsep: Splunk Operator can update Ingestors and Indexers with correct setup", func() { - // TODO: Remove secret reference and uncomment serviceAccountName part once IRSA fixed for Splunk and EKS 1.34+ - // Create Service Account - // testcaseEnvInst.Log.Info("Create Service Account") - // testcaseEnvInst.CreateServiceAccount(serviceAccountName) - - // Secret reference - volumeSpec := []enterpriseApi.VolumeSpec{testenv.GenerateQueueVolumeSpec("queue-secret-ref-volume", testcaseEnvInst.GetIndexIngestSepSecretName())} - queue.SQS.VolList = volumeSpec - updateQueue.SQS.VolList = volumeSpec - - // Deploy Queue - testcaseEnvInst.Log.Info("Deploy Queue") - q, err := deployment.DeployQueue(ctx, "queue", queue) - Expect(err).To(Succeed(), "Unable to deploy Queue") - - // Deploy ObjectStorage - testcaseEnvInst.Log.Info("Deploy ObjectStorage") - objStorage, err := deployment.DeployObjectStorage(ctx, "os", objectStorage) - Expect(err).To(Succeed(), "Unable to deploy ObjectStorage") - - // Deploy Ingestor Cluster - testcaseEnvInst.Log.Info("Deploy Ingestor Cluster") - _, err = deployment.DeployIngestorCluster(ctx, deployment.GetName()+"-ingest", 3, v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) - Expect(err).To(Succeed(), "Unable to deploy Ingestor Cluster") - - // Deploy Cluster Manager - testcaseEnvInst.Log.Info("Deploy Cluster Manager") - _, err = deployment.DeployClusterManagerWithGivenSpec(ctx, deployment.GetName(), cmSpec) - Expect(err).To(Succeed(), "Unable to deploy Cluster Manager") - - // Deploy Indexer Cluster - testcaseEnvInst.Log.Info("Deploy Indexer Cluster") - _, err = deployment.DeployIndexerCluster(ctx, deployment.GetName()+"-idxc", "", 3, deployment.GetName(), "", v1.ObjectReference{Name: q.Name}, v1.ObjectReference{Name: objStorage.Name}, "") // , serviceAccountName) - Expect(err).To(Succeed(), "Unable to deploy Indexer Cluster") - - // Ensure that Ingestor Cluster is in Ready phase - testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") - testenv.IngestorReady(ctx, deployment, testcaseEnvInst) - - // Ensure that Cluster Manager is in Ready phase - testcaseEnvInst.Log.Info("Ensure that Cluster Manager is in Ready phase") - testenv.ClusterManagerReady(ctx, deployment, testcaseEnvInst) - - // Ensure that Indexer Cluster is in Ready phase - testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") - testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) - - // Get instance of current Queue CR with latest config - testcaseEnvInst.Log.Info("Get instance of current Queue CR with latest config") - queue := &enterpriseApi.Queue{} - err = deployment.GetInstance(ctx, q.Name, queue) - Expect(err).To(Succeed(), "Failed to get instance of Queue") - - // Update instance of Queue CR with new queue - testcaseEnvInst.Log.Info("Update instance of Queue CR with new queue") - queue.Spec = updateQueue - err = deployment.UpdateCR(ctx, queue) - Expect(err).To(Succeed(), "Unable to deploy Queue with updated CR") - - // Ensure that Ingestor Cluster is in Ready phase - testcaseEnvInst.Log.Info("Ensure that Ingestor Cluster is in Ready phase") - testenv.IngestorReady(ctx, deployment, testcaseEnvInst) - - // Get instance of current Ingestor Cluster CR with latest config - testcaseEnvInst.Log.Info("Get instance of current Ingestor Cluster CR with latest config") - ingest := &enterpriseApi.IngestorCluster{} - err = deployment.GetInstance(ctx, deployment.GetName()+"-ingest", ingest) - Expect(err).To(Succeed(), "Failed to get instance of Ingestor Cluster") - - // Verify Ingestor Cluster Status - testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(*ingest.Status.Queue).To(Equal(updateQueue), "Ingestor queue status is not the same as provided as input") - - // Ensure that Indexer Cluster is in Ready phase - testcaseEnvInst.Log.Info("Ensure that Indexer Cluster is in Ready phase") - testenv.SingleSiteIndexersReady(ctx, deployment, testcaseEnvInst) - - // Get instance of current Indexer Cluster CR with latest config - testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") - index := &enterpriseApi.IndexerCluster{} - err = deployment.GetInstance(ctx, deployment.GetName()+"-idxc", index) - Expect(err).To(Succeed(), "Failed to get instance of Indexer Cluster") - - // Verify Indexer Cluster Status - testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(*index.Status.Queue).To(Equal(updateQueue), "Indexer queue status is not the same as provided as input") - - // Verify conf files - testcaseEnvInst.Log.Info("Verify conf files") - pods := testenv.DumpGetPods(deployment.GetName()) - for _, pod := range pods { - defaultsConf := "" - - if strings.Contains(pod, "ingest") || strings.Contains(pod, "idxc") { - // Verify outputs.conf - testcaseEnvInst.Log.Info("Verify outputs.conf") - outputsPath := "opt/splunk/etc/system/local/outputs.conf" - outputsConf, err := testenv.GetConfFile(pod, outputsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get outputs.conf from Ingestor Cluster pod") - testenv.ValidateContent(outputsConf, updatedOutputs, true) - testenv.ValidateContent(outputsConf, outputsShouldNotContain, false) - - // Verify default-mode.conf - testcaseEnvInst.Log.Info("Verify default-mode.conf") - defaultsPath := "opt/splunk/etc/system/local/default-mode.conf" - defaultsConf, err := testenv.GetConfFile(pod, defaultsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get default-mode.conf from Ingestor Cluster pod") - testenv.ValidateContent(defaultsConf, defaultsAll, true) - - // Verify AWS env variables - testcaseEnvInst.Log.Info("Verify AWS env variables") - envVars, err := testenv.GetAWSEnv(pod, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get AWS env variables from Ingestor Cluster pod") - testenv.ValidateContent(envVars, awsEnvVars, true) - } - - if strings.Contains(pod, "ingest") { - // Verify default-mode.conf - testcaseEnvInst.Log.Info("Verify default-mode.conf") - testenv.ValidateContent(defaultsConf, defaultsIngest, true) - } else if strings.Contains(pod, "idxc") { - // Verify inputs.conf - testcaseEnvInst.Log.Info("Verify inputs.conf") - inputsPath := "opt/splunk/etc/system/local/inputs.conf" - inputsConf, err := testenv.GetConfFile(pod, inputsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get inputs.conf from Indexer Cluster pod") - testenv.ValidateContent(inputsConf, updatedInputs, true) - testenv.ValidateContent(inputsConf, inputsShouldNotContain, false) - } - } - - // Verify conf files - testcaseEnvInst.Log.Info("Verify conf files") - pods = testenv.DumpGetPods(deployment.GetName()) - for _, pod := range pods { - defaultsConf := "" - - if strings.Contains(pod, "ingest") || strings.Contains(pod, "idxc") { - // Verify outputs.conf - testcaseEnvInst.Log.Info("Verify outputs.conf") - outputsPath := "opt/splunk/etc/system/local/outputs.conf" - outputsConf, err := testenv.GetConfFile(pod, outputsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get outputs.conf from Ingestor Cluster pod") - testenv.ValidateContent(outputsConf, updatedOutputs, true) - testenv.ValidateContent(outputsConf, outputsShouldNotContain, false) - - // Verify default-mode.conf - testcaseEnvInst.Log.Info("Verify default-mode.conf") - defaultsPath := "opt/splunk/etc/system/local/default-mode.conf" - defaultsConf, err := testenv.GetConfFile(pod, defaultsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get default-mode.conf from Ingestor Cluster pod") - testenv.ValidateContent(defaultsConf, updatedDefaultsAll, true) - - // Verify AWS env variables - testcaseEnvInst.Log.Info("Verify AWS env variables") - envVars, err := testenv.GetAWSEnv(pod, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get AWS env variables from Ingestor Cluster pod") - testenv.ValidateContent(envVars, awsEnvVars, true) - } - - if strings.Contains(pod, "ingest") { - // Verify default-mode.conf - testcaseEnvInst.Log.Info("Verify default-mode.conf") - testenv.ValidateContent(defaultsConf, updatedDefaultsIngest, true) - } else if strings.Contains(pod, "idxc") { - // Verify inputs.conf - testcaseEnvInst.Log.Info("Verify inputs.conf") - inputsPath := "opt/splunk/etc/system/local/inputs.conf" - inputsConf, err := testenv.GetConfFile(pod, inputsPath, deployment.GetName()) - Expect(err).To(Succeed(), "Failed to get inputs.conf from Indexer Cluster pod") - testenv.ValidateContent(inputsConf, updatedInputs, true) - testenv.ValidateContent(inputsConf, inputsShouldNotContain, false) - } - } - }) - }) }) From 5c6e7867024169603458cee81d99b1ef6958fd59 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Tue, 13 Jan 2026 13:04:14 +0100 Subject: [PATCH 20/25] CSPL-4360 Addressing secret value change and removing redundant controllers --- api/v4/indexercluster_types.go | 7 +- api/v4/ingestorcluster_types.go | 7 +- api/v4/objectstorage_types.go | 30 -- api/v4/queue_types.go | 30 -- api/v4/zz_generated.deepcopy.go | 20 -- cmd/main.go | 14 - ...enterprise.splunk.com_indexerclusters.yaml | 112 +------- ...nterprise.splunk.com_ingestorclusters.yaml | 112 +------- config/rbac/role.yaml | 6 - .../controller/objectstorage_controller.go | 120 -------- .../objectstorage_controller_test.go | 260 ----------------- internal/controller/queue_controller.go | 120 -------- internal/controller/queue_controller_test.go | 269 ------------------ internal/controller/suite_test.go | 12 - pkg/splunk/enterprise/indexercluster.go | 82 +++--- pkg/splunk/enterprise/indexercluster_test.go | 18 +- pkg/splunk/enterprise/ingestorcluster.go | 57 ++-- pkg/splunk/enterprise/ingestorcluster_test.go | 18 +- pkg/splunk/enterprise/util.go | 12 +- .../index_and_ingestion_separation_test.go | 6 +- 20 files changed, 117 insertions(+), 1195 deletions(-) delete mode 100644 internal/controller/objectstorage_controller.go delete mode 100644 internal/controller/objectstorage_controller_test.go delete mode 100644 internal/controller/queue_controller.go delete mode 100644 internal/controller/queue_controller_test.go diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 34eb0ba3e..f1332d8c4 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -123,11 +123,8 @@ type IndexerClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Queue - Queue *QueueSpec `json:"queue,omitempty"` - - // Object Storage - ObjectStorage *ObjectStorageSpec `json:"objectStorage,omitempty"` + // Queue and bucket access secret version + QueueBucketAccessSecretVersion string `json:"queueBucketAccessSecretVersion,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index 15dc47640..9ce919809 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -76,11 +76,8 @@ type IngestorClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Queue - Queue *QueueSpec `json:"queue,omitempty"` - - // Object Storage - ObjectStorage *ObjectStorageSpec `json:"objectStorage,omitempty"` + // Queue and bucket access secret version + QueueBucketAccessSecretVersion string `json:"queueBucketAccessSecretVersion,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v4/objectstorage_types.go b/api/v4/objectstorage_types.go index 587738d20..7712e81d6 100644 --- a/api/v4/objectstorage_types.go +++ b/api/v4/objectstorage_types.go @@ -17,7 +17,6 @@ limitations under the License. package v4 import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -109,32 +108,3 @@ type ObjectStorageList struct { func init() { SchemeBuilder.Register(&ObjectStorage{}, &ObjectStorageList{}) } - -// NewEvent creates a new event associated with the object and ready -// to be published to Kubernetes API -func (os *ObjectStorage) NewEvent(eventType, reason, message string) corev1.Event { - t := metav1.Now() - return corev1.Event{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: reason + "-", - Namespace: os.ObjectMeta.Namespace, - }, - InvolvedObject: corev1.ObjectReference{ - Kind: "ObjectStorage", - Namespace: os.Namespace, - Name: os.Name, - UID: os.UID, - APIVersion: GroupVersion.String(), - }, - Reason: reason, - Message: message, - Source: corev1.EventSource{ - Component: "splunk-object-storage-controller", - }, - FirstTimestamp: t, - LastTimestamp: t, - Count: 1, - Type: eventType, - ReportingController: "enterprise.splunk.com/object-storage-controller", - } -} diff --git a/api/v4/queue_types.go b/api/v4/queue_types.go index d689a4acd..999eaccc8 100644 --- a/api/v4/queue_types.go +++ b/api/v4/queue_types.go @@ -17,7 +17,6 @@ limitations under the License. package v4 import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) @@ -123,32 +122,3 @@ type QueueList struct { func init() { SchemeBuilder.Register(&Queue{}, &QueueList{}) } - -// NewEvent creates a new event associated with the object and ready -// to be published to Kubernetes API -func (os *Queue) NewEvent(eventType, reason, message string) corev1.Event { - t := metav1.Now() - return corev1.Event{ - ObjectMeta: metav1.ObjectMeta{ - GenerateName: reason + "-", - Namespace: os.ObjectMeta.Namespace, - }, - InvolvedObject: corev1.ObjectReference{ - Kind: "Queue", - Namespace: os.Namespace, - Name: os.Name, - UID: os.UID, - APIVersion: GroupVersion.String(), - }, - Reason: reason, - Message: message, - Source: corev1.EventSource{ - Component: "splunk-queue-controller", - }, - FirstTimestamp: t, - LastTimestamp: t, - Count: 1, - Type: eventType, - ReportingController: "enterprise.splunk.com/queue-controller", - } -} diff --git a/api/v4/zz_generated.deepcopy.go b/api/v4/zz_generated.deepcopy.go index 1f2215a9a..c7759fa58 100644 --- a/api/v4/zz_generated.deepcopy.go +++ b/api/v4/zz_generated.deepcopy.go @@ -545,16 +545,6 @@ func (in *IndexerClusterStatus) DeepCopyInto(out *IndexerClusterStatus) { *out = make([]IndexerClusterMemberStatus, len(*in)) copy(*out, *in) } - if in.Queue != nil { - in, out := &in.Queue, &out.Queue - *out = new(QueueSpec) - (*in).DeepCopyInto(*out) - } - if in.ObjectStorage != nil { - in, out := &in.ObjectStorage, &out.ObjectStorage - *out = new(ObjectStorageSpec) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IndexerClusterStatus. @@ -648,16 +638,6 @@ func (in *IngestorClusterStatus) DeepCopyInto(out *IngestorClusterStatus) { } } in.AppContext.DeepCopyInto(&out.AppContext) - if in.Queue != nil { - in, out := &in.Queue, &out.Queue - *out = new(QueueSpec) - (*in).DeepCopyInto(*out) - } - if in.ObjectStorage != nil { - in, out := &in.ObjectStorage, &out.ObjectStorage - *out = new(ObjectStorageSpec) - **out = **in - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngestorClusterStatus. diff --git a/cmd/main.go b/cmd/main.go index dfb9c87e1..a037f87b1 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -230,20 +230,6 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "IngestorCluster") os.Exit(1) } - if err := (&controller.QueueReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "Queue") - os.Exit(1) - } - if err := (&controller.ObjectStorageReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - }).SetupWithManager(mgr); err != nil { - setupLog.Error(err, "unable to create controller", "controller", "ObjectStorage") - os.Exit(1) - } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 2d01798e3..9b3f50bc8 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8383,39 +8383,6 @@ spec: namespace_scoped_secret_resource_version: description: Indicates resource version of namespace scoped secret type: string - objectStorage: - description: Object Storage - properties: - provider: - description: Provider of queue resources - enum: - - s3 - type: string - s3: - description: s3 specific inputs - properties: - endpoint: - description: S3-compatible Service endpoint - pattern: ^https?://[^\s/$.?#].[^\s]*$ - type: string - path: - description: S3 bucket path - pattern: ^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$ - type: string - required: - - path - type: object - required: - - provider - - s3 - type: object - x-kubernetes-validations: - - message: provider is immutable once created - rule: self.provider == oldSelf.provider - - message: s3 is immutable once created - rule: self.s3 == oldSelf.s3 - - message: s3 must be provided when provider is s3 - rule: self.provider != 's3' || has(self.s3) peers: description: status of each indexer cluster peer items: @@ -8457,82 +8424,9 @@ spec: - Terminating - Error type: string - queue: - description: Queue - properties: - provider: - description: Provider of queue resources - enum: - - sqs - type: string - sqs: - description: sqs specific inputs - properties: - authRegion: - description: Auth Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string - dlq: - description: Name of the dead letter queue resource - minLength: 1 - type: string - endpoint: - description: Amazon SQS Service endpoint - pattern: ^https?://[^\s/$.?#].[^\s]*$ - type: string - name: - description: Name of the queue - minLength: 1 - type: string - volumes: - description: List of remote storage volumes - items: - description: VolumeSpec defines remote volume config - properties: - endpoint: - description: Remote volume URI - type: string - name: - description: Remote volume name - type: string - path: - description: Remote volume path - type: string - provider: - description: 'App Package Remote Store provider. Supported - values: aws, minio, azure, gcp.' - type: string - region: - description: Region of the remote storage volume where - apps reside. Used for aws, if provided. Not used for - minio and azure. - type: string - secretRef: - description: Secret object name - type: string - storageType: - description: 'Remote Storage type. Supported values: - s3, blob, gcs. s3 works with aws or minio providers, - whereas blob works with azure provider, gcs works - for gcp.' - type: string - type: object - type: array - required: - - dlq - - name - type: object - required: - - provider - - sqs - type: object - x-kubernetes-validations: - - message: provider is immutable once created - rule: self.provider == oldSelf.provider - - message: sqs is immutable once created - rule: self.sqs == oldSelf.sqs - - message: sqs must be provided when provider is sqs - rule: self.provider != 'sqs' || has(self.sqs) + queueBucketAccessSecretVersion: + description: Queue and bucket access secret version + type: string readyReplicas: description: current number of ready indexer peers format: int32 diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 194fdac86..e04e1a021 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -4594,39 +4594,6 @@ spec: message: description: Auxillary message describing CR status type: string - objectStorage: - description: Object Storage - properties: - provider: - description: Provider of queue resources - enum: - - s3 - type: string - s3: - description: s3 specific inputs - properties: - endpoint: - description: S3-compatible Service endpoint - pattern: ^https?://[^\s/$.?#].[^\s]*$ - type: string - path: - description: S3 bucket path - pattern: ^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$ - type: string - required: - - path - type: object - required: - - provider - - s3 - type: object - x-kubernetes-validations: - - message: provider is immutable once created - rule: self.provider == oldSelf.provider - - message: s3 is immutable once created - rule: self.s3 == oldSelf.s3 - - message: s3 must be provided when provider is s3 - rule: self.provider != 's3' || has(self.s3) phase: description: Phase of the ingestor pods enum: @@ -4638,82 +4605,9 @@ spec: - Terminating - Error type: string - queue: - description: Queue - properties: - provider: - description: Provider of queue resources - enum: - - sqs - type: string - sqs: - description: sqs specific inputs - properties: - authRegion: - description: Auth Region of the resources - pattern: ^(?:us|ap|eu|me|af|sa|ca|cn|il)(?:-[a-z]+){1,3}-\d$ - type: string - dlq: - description: Name of the dead letter queue resource - minLength: 1 - type: string - endpoint: - description: Amazon SQS Service endpoint - pattern: ^https?://[^\s/$.?#].[^\s]*$ - type: string - name: - description: Name of the queue - minLength: 1 - type: string - volumes: - description: List of remote storage volumes - items: - description: VolumeSpec defines remote volume config - properties: - endpoint: - description: Remote volume URI - type: string - name: - description: Remote volume name - type: string - path: - description: Remote volume path - type: string - provider: - description: 'App Package Remote Store provider. Supported - values: aws, minio, azure, gcp.' - type: string - region: - description: Region of the remote storage volume where - apps reside. Used for aws, if provided. Not used for - minio and azure. - type: string - secretRef: - description: Secret object name - type: string - storageType: - description: 'Remote Storage type. Supported values: - s3, blob, gcs. s3 works with aws or minio providers, - whereas blob works with azure provider, gcs works - for gcp.' - type: string - type: object - type: array - required: - - dlq - - name - type: object - required: - - provider - - sqs - type: object - x-kubernetes-validations: - - message: provider is immutable once created - rule: self.provider == oldSelf.provider - - message: sqs is immutable once created - rule: self.sqs == oldSelf.sqs - - message: sqs must be provided when provider is sqs - rule: self.provider != 'sqs' || has(self.sqs) + queueBucketAccessSecretVersion: + description: Queue and bucket access secret version + type: string readyReplicas: description: Number of ready ingestor pods format: int32 diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 973105d16..fc8513023 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -54,8 +54,6 @@ rules: - licensemanagers - licensemasters - monitoringconsoles - - objectstorages - - queues - searchheadclusters - standalones verbs: @@ -76,8 +74,6 @@ rules: - licensemanagers/finalizers - licensemasters/finalizers - monitoringconsoles/finalizers - - objectstorages/finalizers - - queues/finalizers - searchheadclusters/finalizers - standalones/finalizers verbs: @@ -92,8 +88,6 @@ rules: - licensemanagers/status - licensemasters/status - monitoringconsoles/status - - objectstorages/status - - queues/status - searchheadclusters/status - standalones/status verbs: diff --git a/internal/controller/objectstorage_controller.go b/internal/controller/objectstorage_controller.go deleted file mode 100644 index 4ae36b1a2..000000000 --- a/internal/controller/objectstorage_controller.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "time" - - k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/pkg/errors" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/splunk/splunk-operator/internal/controller/common" - metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics" - enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" -) - -// ObjectStorageReconciler reconciles a ObjectStorage object -type ObjectStorageReconciler struct { - client.Client - Scheme *runtime.Scheme -} - -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=objectstorages/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the ObjectStorage object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile -func (r *ObjectStorageReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "ObjectStorage")).Inc() - defer recordInstrumentionData(time.Now(), req, "controller", "ObjectStorage") - - reqLogger := log.FromContext(ctx) - reqLogger = reqLogger.WithValues("objectstorage", req.NamespacedName) - - // Fetch the ObjectStorage - instance := &enterpriseApi.ObjectStorage{} - err := r.Get(ctx, req.NamespacedName, instance) - if err != nil { - if k8serrors.IsNotFound(err) { - // Request object not found, could have been deleted after - // reconcile request. Owned objects are automatically - // garbage collected. For additional cleanup logic use - // finalizers. Return and don't requeue - return ctrl.Result{}, nil - } - // Error reading the object - requeue the request. - return ctrl.Result{}, errors.Wrap(err, "could not load objectstorage data") - } - - // If the reconciliation is paused, requeue - annotations := instance.GetAnnotations() - if annotations != nil { - if _, ok := annotations[enterpriseApi.ObjectStoragePausedAnnotation]; ok { - return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil - } - } - - reqLogger.Info("start", "CR version", instance.GetResourceVersion()) - - result, err := ApplyObjectStorage(ctx, r.Client, instance) - if result.Requeue && result.RequeueAfter != 0 { - reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) - } - - return result, err -} - -var ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { - return enterprise.ApplyObjectStorage(ctx, client, instance) -} - -// SetupWithManager sets up the controller with the Manager. -func (r *ObjectStorageReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&enterpriseApi.ObjectStorage{}). - WithEventFilter(predicate.Or( - common.GenerationChangedPredicate(), - common.AnnotationChangedPredicate(), - common.LabelChangedPredicate(), - common.SecretChangedPredicate(), - common.ConfigMapChangedPredicate(), - common.StatefulsetChangedPredicate(), - common.PodChangedPredicate(), - common.CrdChangedPredicate(), - )). - WithOptions(controller.Options{ - MaxConcurrentReconciles: enterpriseApi.TotalWorker, - }). - Complete(r) -} diff --git a/internal/controller/objectstorage_controller_test.go b/internal/controller/objectstorage_controller_test.go deleted file mode 100644 index 6d7dec87a..000000000 --- a/internal/controller/objectstorage_controller_test.go +++ /dev/null @@ -1,260 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "fmt" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/splunk/splunk-operator/internal/controller/testutils" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -var _ = Describe("ObjectStorage Controller", func() { - BeforeEach(func() { - time.Sleep(2 * time.Second) - }) - - AfterEach(func() { - - }) - - Context("ObjectStorage Management", func() { - - It("Get ObjectStorage custom resource should fail", func() { - namespace := "ns-splunk-objectstorage-1" - ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - _, err := GetObjectStorage("test", nsSpecs.Name) - Expect(err.Error()).Should(Equal("objectstorages.enterprise.splunk.com \"test\" not found")) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Create ObjectStorage custom resource with annotations should pause", func() { - namespace := "ns-splunk-objectstorage-2" - annotations := make(map[string]string) - annotations[enterpriseApi.ObjectStoragePausedAnnotation] = "" - ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - spec := enterpriseApi.ObjectStorageSpec{ - Provider: "s3", - S3: enterpriseApi.S3Spec{ - Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://ingestion/smartbus-test", - }, - } - CreateObjectStorage("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - osSpec, _ := GetObjectStorage("test", nsSpecs.Name) - annotations = map[string]string{} - osSpec.Annotations = annotations - osSpec.Status.Phase = "Ready" - UpdateObjectStorage(osSpec, enterpriseApi.PhaseReady, spec) - DeleteObjectStorage("test", nsSpecs.Name) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Create ObjectStorage custom resource should succeeded", func() { - namespace := "ns-splunk-objectstorage-3" - ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - annotations := make(map[string]string) - spec := enterpriseApi.ObjectStorageSpec{ - Provider: "s3", - S3: enterpriseApi.S3Spec{ - Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://ingestion/smartbus-test", - }, - } - CreateObjectStorage("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - DeleteObjectStorage("test", nsSpecs.Name) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Cover Unused methods", func() { - namespace := "ns-splunk-objectstorage-4" - ApplyObjectStorage = func(ctx context.Context, client client.Client, instance *enterpriseApi.ObjectStorage) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - ctx := context.TODO() - builder := fake.NewClientBuilder() - c := builder.Build() - instance := ObjectStorageReconciler{ - Client: c, - Scheme: scheme.Scheme, - } - request := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: "test", - Namespace: namespace, - }, - } - _, err := instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - spec := enterpriseApi.ObjectStorageSpec{ - Provider: "s3", - S3: enterpriseApi.S3Spec{ - Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://ingestion/smartbus-test", - }, - } - osSpec := testutils.NewObjectStorage("test", namespace, spec) - Expect(c.Create(ctx, osSpec)).Should(Succeed()) - - annotations := make(map[string]string) - annotations[enterpriseApi.ObjectStoragePausedAnnotation] = "" - osSpec.Annotations = annotations - Expect(c.Update(ctx, osSpec)).Should(Succeed()) - - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - annotations = map[string]string{} - osSpec.Annotations = annotations - Expect(c.Update(ctx, osSpec)).Should(Succeed()) - - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - osSpec.DeletionTimestamp = &metav1.Time{} - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - }) - - }) -}) - -func GetObjectStorage(name string, namespace string) (*enterpriseApi.ObjectStorage, error) { - By("Expecting ObjectStorage custom resource to be retrieved successfully") - - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - os := &enterpriseApi.ObjectStorage{} - - err := k8sClient.Get(context.Background(), key, os) - if err != nil { - return nil, err - } - - return os, err -} - -func CreateObjectStorage(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { - By("Expecting ObjectStorage custom resource to be created successfully") - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - osSpec := &enterpriseApi.ObjectStorage{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: annotations, - }, - Spec: spec, - } - - Expect(k8sClient.Create(context.Background(), osSpec)).Should(Succeed()) - time.Sleep(2 * time.Second) - - os := &enterpriseApi.ObjectStorage{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, os) - if status != "" { - fmt.Printf("status is set to %v", status) - os.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), os)).Should(Succeed()) - time.Sleep(2 * time.Second) - } - return true - }, timeout, interval).Should(BeTrue()) - - return os -} - -func UpdateObjectStorage(instance *enterpriseApi.ObjectStorage, status enterpriseApi.Phase, spec enterpriseApi.ObjectStorageSpec) *enterpriseApi.ObjectStorage { - By("Expecting ObjectStorage custom resource to be updated successfully") - key := types.NamespacedName{ - Name: instance.Name, - Namespace: instance.Namespace, - } - - osSpec := testutils.NewObjectStorage(instance.Name, instance.Namespace, spec) - osSpec.ResourceVersion = instance.ResourceVersion - Expect(k8sClient.Update(context.Background(), osSpec)).Should(Succeed()) - time.Sleep(2 * time.Second) - - os := &enterpriseApi.ObjectStorage{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, os) - if status != "" { - fmt.Printf("status is set to %v", status) - os.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), os)).Should(Succeed()) - time.Sleep(2 * time.Second) - } - return true - }, timeout, interval).Should(BeTrue()) - - return os -} - -func DeleteObjectStorage(name string, namespace string) { - By("Expecting ObjectStorage custom resource to be deleted successfully") - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - - Eventually(func() error { - os := &enterpriseApi.ObjectStorage{} - _ = k8sClient.Get(context.Background(), key, os) - err := k8sClient.Delete(context.Background(), os) - return err - }, timeout, interval).Should(Succeed()) -} diff --git a/internal/controller/queue_controller.go b/internal/controller/queue_controller.go deleted file mode 100644 index 6fff662b9..000000000 --- a/internal/controller/queue_controller.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "time" - - k8serrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/runtime" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - "github.com/pkg/errors" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/splunk/splunk-operator/internal/controller/common" - metrics "github.com/splunk/splunk-operator/pkg/splunk/client/metrics" - enterprise "github.com/splunk/splunk-operator/pkg/splunk/enterprise" -) - -// QueueReconciler reconciles a Queue object -type QueueReconciler struct { - client.Client - Scheme *runtime.Scheme -} - -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues,verbs=get;list;watch;create;update;patch;delete -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/status,verbs=get;update;patch -// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// TODO(user): Modify the Reconcile function to compare the state specified by -// the Queue object against the actual cluster state, and then -// perform operations to make the cluster state reflect the state specified by -// the user. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.22.1/pkg/reconcile -func (r *QueueReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - metrics.ReconcileCounters.With(metrics.GetPrometheusLabels(req, "Queue")).Inc() - defer recordInstrumentionData(time.Now(), req, "controller", "Queue") - - reqLogger := log.FromContext(ctx) - reqLogger = reqLogger.WithValues("queue", req.NamespacedName) - - // Fetch the Queue - instance := &enterpriseApi.Queue{} - err := r.Get(ctx, req.NamespacedName, instance) - if err != nil { - if k8serrors.IsNotFound(err) { - // Request object not found, could have been deleted after - // reconcile request. Owned objects are automatically - // garbage collected. For additional cleanup logic use - // finalizers. Return and don't requeue - return ctrl.Result{}, nil - } - // Error reading the object - requeue the request. - return ctrl.Result{}, errors.Wrap(err, "could not load queue data") - } - - // If the reconciliation is paused, requeue - annotations := instance.GetAnnotations() - if annotations != nil { - if _, ok := annotations[enterpriseApi.QueuePausedAnnotation]; ok { - return ctrl.Result{Requeue: true, RequeueAfter: pauseRetryDelay}, nil - } - } - - reqLogger.Info("start", "CR version", instance.GetResourceVersion()) - - result, err := ApplyQueue(ctx, r.Client, instance) - if result.Requeue && result.RequeueAfter != 0 { - reqLogger.Info("Requeued", "period(seconds)", int(result.RequeueAfter/time.Second)) - } - - return result, err -} - -var ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { - return enterprise.ApplyQueue(ctx, client, instance) -} - -// SetupWithManager sets up the controller with the Manager. -func (r *QueueReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&enterpriseApi.Queue{}). - WithEventFilter(predicate.Or( - common.GenerationChangedPredicate(), - common.AnnotationChangedPredicate(), - common.LabelChangedPredicate(), - common.SecretChangedPredicate(), - common.ConfigMapChangedPredicate(), - common.StatefulsetChangedPredicate(), - common.PodChangedPredicate(), - common.CrdChangedPredicate(), - )). - WithOptions(controller.Options{ - MaxConcurrentReconciles: enterpriseApi.TotalWorker, - }). - Complete(r) -} diff --git a/internal/controller/queue_controller_test.go b/internal/controller/queue_controller_test.go deleted file mode 100644 index b04a5d4b3..000000000 --- a/internal/controller/queue_controller_test.go +++ /dev/null @@ -1,269 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "fmt" - "time" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - enterpriseApi "github.com/splunk/splunk-operator/api/v4" - "github.com/splunk/splunk-operator/internal/controller/testutils" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - "sigs.k8s.io/controller-runtime/pkg/reconcile" -) - -var _ = Describe("Queue Controller", func() { - BeforeEach(func() { - time.Sleep(2 * time.Second) - }) - - AfterEach(func() { - - }) - - Context("Queue Management", func() { - - It("Get Queue custom resource should fail", func() { - namespace := "ns-splunk-queue-1" - ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - _, err := GetQueue("test", nsSpecs.Name) - Expect(err.Error()).Should(Equal("queues.enterprise.splunk.com \"test\" not found")) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Create Queue custom resource with annotations should pause", func() { - namespace := "ns-splunk-queue-2" - annotations := make(map[string]string) - annotations[enterpriseApi.QueuePausedAnnotation] = "" - ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - spec := enterpriseApi.QueueSpec{ - Provider: "sqs", - SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - AuthRegion: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - }, - } - CreateQueue("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - icSpec, _ := GetQueue("test", nsSpecs.Name) - annotations = map[string]string{} - icSpec.Annotations = annotations - icSpec.Status.Phase = "Ready" - UpdateQueue(icSpec, enterpriseApi.PhaseReady, spec) - DeleteQueue("test", nsSpecs.Name) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Create Queue custom resource should succeeded", func() { - namespace := "ns-splunk-queue-3" - ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - annotations := make(map[string]string) - spec := enterpriseApi.QueueSpec{ - Provider: "sqs", - SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - AuthRegion: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - }, - } - CreateQueue("test", nsSpecs.Name, annotations, enterpriseApi.PhaseReady, spec) - DeleteQueue("test", nsSpecs.Name) - Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) - }) - - It("Cover Unused methods", func() { - namespace := "ns-splunk-queue-4" - ApplyQueue = func(ctx context.Context, client client.Client, instance *enterpriseApi.Queue) (reconcile.Result, error) { - return reconcile.Result{}, nil - } - nsSpecs := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}} - - Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) - - ctx := context.TODO() - builder := fake.NewClientBuilder() - c := builder.Build() - instance := QueueReconciler{ - Client: c, - Scheme: scheme.Scheme, - } - request := reconcile.Request{ - NamespacedName: types.NamespacedName{ - Name: "test", - Namespace: namespace, - }, - } - _, err := instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - spec := enterpriseApi.QueueSpec{ - Provider: "sqs", - SQS: enterpriseApi.SQSSpec{ - Name: "smartbus-queue", - AuthRegion: "us-west-2", - DLQ: "smartbus-dlq", - Endpoint: "https://sqs.us-west-2.amazonaws.com", - }, - } - bcSpec := testutils.NewQueue("test", namespace, spec) - Expect(c.Create(ctx, bcSpec)).Should(Succeed()) - - annotations := make(map[string]string) - annotations[enterpriseApi.QueuePausedAnnotation] = "" - bcSpec.Annotations = annotations - Expect(c.Update(ctx, bcSpec)).Should(Succeed()) - - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - annotations = map[string]string{} - bcSpec.Annotations = annotations - Expect(c.Update(ctx, bcSpec)).Should(Succeed()) - - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - - bcSpec.DeletionTimestamp = &metav1.Time{} - _, err = instance.Reconcile(ctx, request) - Expect(err).ToNot(HaveOccurred()) - }) - - }) -}) - -func GetQueue(name string, namespace string) (*enterpriseApi.Queue, error) { - By("Expecting Queue custom resource to be retrieved successfully") - - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - b := &enterpriseApi.Queue{} - - err := k8sClient.Get(context.Background(), key, b) - if err != nil { - return nil, err - } - - return b, err -} - -func CreateQueue(name string, namespace string, annotations map[string]string, status enterpriseApi.Phase, spec enterpriseApi.QueueSpec) *enterpriseApi.Queue { - By("Expecting Queue custom resource to be created successfully") - - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - ingSpec := &enterpriseApi.Queue{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Annotations: annotations, - }, - Spec: spec, - } - - Expect(k8sClient.Create(context.Background(), ingSpec)).Should(Succeed()) - time.Sleep(2 * time.Second) - - b := &enterpriseApi.Queue{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, b) - if status != "" { - fmt.Printf("status is set to %v", status) - b.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), b)).Should(Succeed()) - time.Sleep(2 * time.Second) - } - return true - }, timeout, interval).Should(BeTrue()) - - return b -} - -func UpdateQueue(instance *enterpriseApi.Queue, status enterpriseApi.Phase, spec enterpriseApi.QueueSpec) *enterpriseApi.Queue { - By("Expecting Queue custom resource to be updated successfully") - - key := types.NamespacedName{ - Name: instance.Name, - Namespace: instance.Namespace, - } - - bSpec := testutils.NewQueue(instance.Name, instance.Namespace, spec) - bSpec.ResourceVersion = instance.ResourceVersion - Expect(k8sClient.Update(context.Background(), bSpec)).Should(Succeed()) - time.Sleep(2 * time.Second) - - b := &enterpriseApi.Queue{} - Eventually(func() bool { - _ = k8sClient.Get(context.Background(), key, b) - if status != "" { - fmt.Printf("status is set to %v", status) - b.Status.Phase = status - Expect(k8sClient.Status().Update(context.Background(), b)).Should(Succeed()) - time.Sleep(2 * time.Second) - } - return true - }, timeout, interval).Should(BeTrue()) - - return b -} - -func DeleteQueue(name string, namespace string) { - By("Expecting Queue custom resource to be deleted successfully") - - key := types.NamespacedName{ - Name: name, - Namespace: namespace, - } - - Eventually(func() error { - b := &enterpriseApi.Queue{} - _ = k8sClient.Get(context.Background(), key, b) - err := k8sClient.Delete(context.Background(), b) - return err - }, timeout, interval).Should(Succeed()) -} diff --git a/internal/controller/suite_test.go b/internal/controller/suite_test.go index 8454d15b5..142a8720c 100644 --- a/internal/controller/suite_test.go +++ b/internal/controller/suite_test.go @@ -98,12 +98,6 @@ var _ = BeforeSuite(func(ctx context.Context) { Scheme: clientgoscheme.Scheme, }) Expect(err).ToNot(HaveOccurred()) - if err := (&QueueReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) - } if err := (&ClusterManagerReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), @@ -128,12 +122,6 @@ var _ = BeforeSuite(func(ctx context.Context) { }).SetupWithManager(k8sManager); err != nil { Expect(err).NotTo(HaveOccurred()) } - if err := (&ObjectStorageReconciler{ - Client: k8sManager.GetClient(), - Scheme: k8sManager.GetScheme(), - }).SetupWithManager(k8sManager); err != nil { - Expect(err).NotTo(HaveOccurred()) - } if err := (&LicenseManagerReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index af981be2c..42b714924 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -76,8 +76,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // updates status after function completes cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Queue = nil - cr.Status.ObjectStorage = nil + cr.Status.QueueBucketAccessSecretVersion = "0" } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -286,11 +285,27 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } } + // Secret reference + accessKey, secretKey, version := "", "", "" + if queue.Spec.Provider == "sqs" && cr.Spec.ServiceAccount == "" { + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef != "" { + accessKey, secretKey, version, err = GetQueueRemoteVolumeSecrets(ctx, vol, client, cr) + if err != nil { + scopedLog.Error(err, "Failed to get queue remote volume secrets") + return result, err + } + } + } + } + + secretChanged := cr.Status.QueueBucketAccessSecretVersion != version + // If queue is updated if cr.Spec.QueueRef.Name != "" { - if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil { + if secretChanged { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) - err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, client) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -306,8 +321,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller scopedLog.Info("Restarted splunk", "indexer", i) } - cr.Status.Queue = &queue.Spec - cr.Status.ObjectStorage = &os.Spec + cr.Status.QueueBucketAccessSecretVersion = version } } @@ -400,8 +414,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr.Status.Phase = enterpriseApi.PhaseError cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Queue = nil - cr.Status.ObjectStorage = nil + cr.Status.QueueBucketAccessSecretVersion = "0" } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -613,10 +626,26 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } } + // Secret reference + accessKey, secretKey, version := "", "", "" + if queue.Spec.Provider == "sqs" && cr.Spec.ServiceAccount == "" { + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef != "" { + accessKey, secretKey, version, err = GetQueueRemoteVolumeSecrets(ctx, vol, client, cr) + if err != nil { + scopedLog.Error(err, "Failed to get queue remote volume secrets") + return result, err + } + } + } + } + + secretChanged := cr.Status.QueueBucketAccessSecretVersion != version + if cr.Spec.QueueRef.Name != "" { - if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil { + if secretChanged { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) - err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, client) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIndexerClusterManager", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -632,8 +661,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, scopedLog.Info("Restarted splunk", "indexer", i) } - cr.Status.Queue = &queue.Spec - cr.Status.ObjectStorage = &os.Spec + cr.Status.QueueBucketAccessSecretVersion = version } } @@ -1304,7 +1332,7 @@ func getSiteName(ctx context.Context, c splcommon.ControllerClient, cr *enterpri var newSplunkClientForQueuePipeline = splclient.NewSplunkClient // updateIndexerConfFiles checks if Queue or Pipeline inputs are created for the first time and updates the conf file if so -func (mgr *indexerClusterPodManager) updateIndexerConfFiles(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, k8s rclient.Client) error { +func (mgr *indexerClusterPodManager) updateIndexerConfFiles(ctx context.Context, newCR *enterpriseApi.IndexerCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string, k8s rclient.Client) error { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("updateIndexerConfFiles").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) @@ -1322,21 +1350,7 @@ func (mgr *indexerClusterPodManager) updateIndexerConfFiles(ctx context.Context, } splunkClient := newSplunkClientForQueuePipeline(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) - // Secret reference - s3AccessKey, s3SecretKey := "", "" - if queue.Provider == "sqs" && newCR.Spec.ServiceAccount == "" { - for _, vol := range queue.SQS.VolList { - if vol.SecretRef != "" { - s3AccessKey, s3SecretKey, err = GetQueueRemoteVolumeSecrets(ctx, vol, k8s, newCR) - if err != nil { - scopedLog.Error(err, "Failed to get queue remote volume secrets") - return err - } - } - } - } - - queueInputs, queueOutputs, pipelineInputs := getQueueAndPipelineInputsForIndexerConfFiles(queue, os, s3AccessKey, s3SecretKey) + queueInputs, queueOutputs, pipelineInputs := getQueueAndPipelineInputsForIndexerConfFiles(queue, os, accessKey, secretKey) for _, pbVal := range queueOutputs { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{pbVal}); err != nil { @@ -1361,9 +1375,9 @@ func (mgr *indexerClusterPodManager) updateIndexerConfFiles(ctx context.Context, } // getQueueAndPipelineInputsForIndexerConfFiles returns a list of queue and pipeline inputs for indexer pods conf files -func getQueueAndPipelineInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, s3AccessKey, s3SecretKey string) (queueInputs, queueOutputs, pipelineInputs [][]string) { +func getQueueAndPipelineInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (queueInputs, queueOutputs, pipelineInputs [][]string) { // Queue Inputs - queueInputs, queueOutputs = getQueueAndObjectStorageInputsForIndexerConfFiles(queue, os, s3AccessKey, s3SecretKey) + queueInputs, queueOutputs = getQueueAndObjectStorageInputsForIndexerConfFiles(queue, os, accessKey, secretKey) // Pipeline inputs pipelineInputs = getPipelineInputsForConfFile(true) @@ -1383,7 +1397,7 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { } // getQueueAndObjectStorageInputsForIndexerConfFiles returns a list of queue and object storage inputs for conf files -func getQueueAndObjectStorageInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, s3AccessKey, s3SecretKey string) (inputs, outputs [][]string) { +func getQueueAndObjectStorageInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (inputs, outputs [][]string) { queueProvider := "" if queue.Provider == "sqs" { queueProvider = "sqs_smartbus" @@ -1405,9 +1419,9 @@ func getQueueAndObjectStorageInputsForIndexerConfFiles(queue *enterpriseApi.Queu ) // TODO: Handle credentials change - if s3AccessKey != "" && s3SecretKey != "" { - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), s3AccessKey}) - inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), s3SecretKey}) + if accessKey != "" && secretKey != "" { + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), accessKey}) + inputs = append(inputs, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), secretKey}) } outputs = inputs diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index 9d1bf0118..ac9e59554 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2139,6 +2139,9 @@ func TestUpdateIndexerConfFiles(t *testing.T) { // Object definitions provider := "sqs_smartbus" + accessKey := "accessKey" + secretKey := "secretKey" + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ Kind: "Queue", @@ -2197,9 +2200,8 @@ func TestUpdateIndexerConfFiles(t *testing.T) { }, }, Status: enterpriseApi.IndexerClusterStatus{ - ReadyReplicas: 3, - Queue: &enterpriseApi.QueueSpec{}, - ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, + ReadyReplicas: 3, + QueueBucketAccessSecretVersion: "123", }, } c.Create(ctx, cr) @@ -2260,7 +2262,7 @@ func TestUpdateIndexerConfFiles(t *testing.T) { // Negative test case: secret not found mgr := &indexerClusterPodManager{} - err := mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err := mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // Mock secret @@ -2271,7 +2273,7 @@ func TestUpdateIndexerConfFiles(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // outputs.conf @@ -2295,7 +2297,7 @@ func TestUpdateIndexerConfFiles(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // inputs.conf @@ -2305,7 +2307,7 @@ func TestUpdateIndexerConfFiles(t *testing.T) { // Negative test case: failure in updating remote queue stanza mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // default-mode.conf @@ -2333,7 +2335,7 @@ func TestUpdateIndexerConfFiles(t *testing.T) { mgr = newTestIndexerQueuePipelineManager(mockHTTPClient) - err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.Nil(t, err) } diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index 55f0e7d35..fb4c9474a 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -72,8 +72,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // Update the CR Status defer updateCRStatus(ctx, client, cr, &err) if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.Queue = nil - cr.Status.ObjectStorage = nil + cr.Status.QueueBucketAccessSecretVersion = "0" } cr.Status.Replicas = cr.Spec.Replicas @@ -252,10 +251,26 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } } + // Secret reference + accessKey, secretKey, version := "", "", "" + if queue.Spec.Provider == "sqs" && cr.Spec.ServiceAccount == "" { + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef != "" { + accessKey, secretKey, version, err = GetQueueRemoteVolumeSecrets(ctx, vol, client, cr) + if err != nil { + scopedLog.Error(err, "Failed to get queue remote volume secrets") + return result, err + } + } + } + } + + secretChanged := cr.Status.QueueBucketAccessSecretVersion != version + // If queue is updated - if cr.Status.Queue == nil || cr.Status.ObjectStorage == nil { + if secretChanged { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) - err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, client) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, client) if err != nil { eventPublisher.Warning(ctx, "ApplyIngestorCluster", fmt.Sprintf("Failed to update conf file for Queue/Pipeline config change after pod creation: %s", err.Error())) scopedLog.Error(err, "Failed to update conf file for Queue/Pipeline config change after pod creation") @@ -271,8 +286,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr scopedLog.Info("Restarted splunk", "ingestor", i) } - cr.Status.Queue = &queue.Spec - cr.Status.ObjectStorage = &os.Spec + cr.Status.QueueBucketAccessSecretVersion = version } // Upgrade fron automated MC to MC CRD @@ -367,7 +381,7 @@ func getIngestorStatefulSet(ctx context.Context, client splcommon.ControllerClie } // updateIngestorConfFiles checks if Queue or Pipeline inputs are created for the first time and updates the conf file if so -func (mgr *ingestorClusterPodManager) updateIngestorConfFiles(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, k8s client.Client) error { +func (mgr *ingestorClusterPodManager) updateIngestorConfFiles(ctx context.Context, newCR *enterpriseApi.IngestorCluster, queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string, k8s client.Client) error { reqLogger := log.FromContext(ctx) scopedLog := reqLogger.WithName("updateIngestorConfFiles").WithValues("name", newCR.GetName(), "namespace", newCR.GetNamespace()) @@ -385,21 +399,7 @@ func (mgr *ingestorClusterPodManager) updateIngestorConfFiles(ctx context.Contex } splunkClient := mgr.newSplunkClient(fmt.Sprintf("https://%s:8089", fqdnName), "admin", string(adminPwd)) - // Secret reference - s3AccessKey, s3SecretKey := "", "" - if queue.Provider == "sqs" && newCR.Spec.ServiceAccount == "" { - for _, vol := range queue.SQS.VolList { - if vol.SecretRef != "" { - s3AccessKey, s3SecretKey, err = GetQueueRemoteVolumeSecrets(ctx, vol, k8s, newCR) - if err != nil { - scopedLog.Error(err, "Failed to get queue remote volume secrets") - return err - } - } - } - } - - queueInputs, pipelineInputs := getQueueAndPipelineInputsForIngestorConfFiles(queue, os, s3AccessKey, s3SecretKey) + queueInputs, pipelineInputs := getQueueAndPipelineInputsForIngestorConfFiles(queue, os, accessKey, secretKey) for _, input := range queueInputs { if err := splunkClient.UpdateConfFile(scopedLog, "outputs", fmt.Sprintf("remote_queue:%s", queue.SQS.Name), [][]string{input}); err != nil { @@ -418,9 +418,9 @@ func (mgr *ingestorClusterPodManager) updateIngestorConfFiles(ctx context.Contex } // getQueueAndPipelineInputsForIngestorConfFiles returns a list of queue and pipeline inputs for ingestor pods conf files -func getQueueAndPipelineInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, s3AccessKey, s3SecretKey string) (queueInputs, pipelineInputs [][]string) { +func getQueueAndPipelineInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (queueInputs, pipelineInputs [][]string) { // Queue Inputs - queueInputs = getQueueAndObjectStorageInputsForIngestorConfFiles(queue, os, s3AccessKey, s3SecretKey) + queueInputs = getQueueAndObjectStorageInputsForIngestorConfFiles(queue, os, accessKey, secretKey) // Pipeline inputs pipelineInputs = getPipelineInputsForConfFile(false) @@ -464,7 +464,7 @@ func getPipelineInputsForConfFile(isIndexer bool) (config [][]string) { } // getQueueAndObjectStorageInputsForConfFiles returns a list of queue and object storage inputs for conf files -func getQueueAndObjectStorageInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, s3AccessKey, s3SecretKey string) (config [][]string) { +func getQueueAndObjectStorageInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (config [][]string) { queueProvider := "" if queue.Provider == "sqs" { queueProvider = "sqs_smartbus" @@ -486,10 +486,9 @@ func getQueueAndObjectStorageInputsForIngestorConfFiles(queue *enterpriseApi.Que []string{fmt.Sprintf("remote_queue.%s.send_interval", queueProvider), "5s"}, ) - // TODO: Handle credentials change - if s3AccessKey != "" && s3SecretKey != "" { - config = append(config, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), s3AccessKey}) - config = append(config, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), s3SecretKey}) + if accessKey != "" && secretKey != "" { + config = append(config, []string{fmt.Sprintf("remote_queue.%s.access_key", queueProvider), accessKey}) + config = append(config, []string{fmt.Sprintf("remote_queue.%s.secret_key", queueProvider), secretKey}) } return diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index e79bbaa94..f7dd54b39 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -484,6 +484,9 @@ func TestUpdateIngestorConfFiles(t *testing.T) { // Object definitions provider := "sqs_smartbus" + accessKey := "accessKey" + secretKey := "secretKey" + queue := &enterpriseApi.Queue{ TypeMeta: metav1.TypeMeta{ Kind: "Queue", @@ -537,10 +540,9 @@ func TestUpdateIngestorConfFiles(t *testing.T) { }, }, Status: enterpriseApi.IngestorClusterStatus{ - Replicas: 3, - ReadyReplicas: 3, - Queue: &enterpriseApi.QueueSpec{}, - ObjectStorage: &enterpriseApi.ObjectStorageSpec{}, + Replicas: 3, + ReadyReplicas: 3, + QueueBucketAccessSecretVersion: "123", }, } @@ -601,7 +603,7 @@ func TestUpdateIngestorConfFiles(t *testing.T) { // Negative test case: secret not found mgr := &ingestorClusterPodManager{} - err := mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err := mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // Mock secret @@ -612,7 +614,7 @@ func TestUpdateIngestorConfFiles(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // outputs.conf @@ -634,7 +636,7 @@ func TestUpdateIngestorConfFiles(t *testing.T) { // Negative test case: failure in creating remote queue stanza mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.NotNil(t, err) // default-mode.conf @@ -663,7 +665,7 @@ func TestUpdateIngestorConfFiles(t *testing.T) { mgr = newTestIngestorQueuePipelineManager(mockHTTPClient) - err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, c) + err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, c) assert.Nil(t, err) } diff --git a/pkg/splunk/enterprise/util.go b/pkg/splunk/enterprise/util.go index 882a96ff3..88a85b448 100644 --- a/pkg/splunk/enterprise/util.go +++ b/pkg/splunk/enterprise/util.go @@ -418,22 +418,24 @@ func GetSmartstoreRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi. } // GetQueueRemoteVolumeSecrets is used to retrieve access key and secrete key for Index & Ingestion separation -func GetQueueRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject) (string, string, error) { +func GetQueueRemoteVolumeSecrets(ctx context.Context, volume enterpriseApi.VolumeSpec, client splcommon.ControllerClient, cr splcommon.MetaObject) (string, string, string, error) { namespaceScopedSecret, err := splutil.GetSecretByName(ctx, client, cr.GetNamespace(), cr.GetName(), volume.SecretRef) if err != nil { - return "", "", err + return "", "", "", err } accessKey := string(namespaceScopedSecret.Data[s3AccessKey]) secretKey := string(namespaceScopedSecret.Data[s3SecretKey]) + version := namespaceScopedSecret.ResourceVersion + if accessKey == "" { - return "", "", errors.New("access Key is missing") + return "", "", "", errors.New("access Key is missing") } else if secretKey == "" { - return "", "", errors.New("secret Key is missing") + return "", "", "", errors.New("secret Key is missing") } - return accessKey, secretKey, nil + return accessKey, secretKey, version, nil } // getLocalAppFileName generates the local app file name diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 4314124cc..17b5bd8da 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -317,7 +317,8 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(*ingest.Status.Queue).To(Equal(queue), "Ingestor queue status is not the same as provided as input") + Expect(ingest.Status.QueueBucketAccessSecretVersion).To(Not(Equal("")), "Ingestor queue status queue bucket access secret version is empty") + Expect(ingest.Status.QueueBucketAccessSecretVersion).To(Not(Equal("0")), "Ingestor queue status queue bucket access secret version is 0") // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -327,7 +328,8 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(*index.Status.Queue).To(Equal(queue), "Indexer queue status is not the same as provided as input") + Expect(index.Status.QueueBucketAccessSecretVersion).To(Not(Equal("")), "Indexer queue status queue bucket access secret version is empty") + Expect(index.Status.QueueBucketAccessSecretVersion).To(Not(Equal("0")), "Indexer queue status queue bucket access secret version is 0") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files") From 42dc8e8ab139a21ffe15cafdcbac762e04d87829 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Tue, 13 Jan 2026 15:07:27 +0100 Subject: [PATCH 21/25] CSPL-4360 Update of docs, helm tests and validations --- api/v4/indexercluster_types.go | 4 +-- api/v4/ingestorcluster_types.go | 4 +-- api/v4/queue_types.go | 5 ++- ...enterprise.splunk.com_indexerclusters.yaml | 4 +++ ...nterprise.splunk.com_ingestorclusters.yaml | 5 +++ .../bases/enterprise.splunk.com_queues.yaml | 10 ++++-- docs/IndexIngestionSeparation.md | 36 ++++++++++--------- .../02-assert.yaml | 24 ------------- .../03-assert.yaml | 12 ------- 9 files changed, 45 insertions(+), 59 deletions(-) diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index f1332d8c4..02cf1562d 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -35,17 +35,17 @@ const ( ) // +kubebuilder:validation:XValidation:rule="has(self.queueRef) == has(self.objectStorageRef)",message="queueRef and objectStorageRef must both be set or both be empty" +// +kubebuilder:validation:XValidation:rule="self.queueRef == oldSelf.queueRef",message="queueRef is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.objectStorageRef == oldSelf.objectStorageRef",message="objectStorageRef is immutable once created" // IndexerClusterSpec defines the desired state of a Splunk Enterprise indexer cluster type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` // +optional - // +kubebuilder:validation:Immutable // Queue reference QueueRef corev1.ObjectReference `json:"queueRef"` // +optional - // +kubebuilder:validation:Immutable // Object Storage reference ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"` diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index 9ce919809..021acd025 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -28,6 +28,8 @@ const ( IngestorClusterPausedAnnotation = "ingestorcluster.enterprise.splunk.com/paused" ) +// +kubebuilder:validation:XValidation:rule="self.queueRef == oldSelf.queueRef",message="queueRef is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.objectStorageRef == oldSelf.objectStorageRef",message="objectStorageRef is immutable once created" // IngestorClusterSpec defines the spec of Ingestor Cluster type IngestorClusterSpec struct { // Common Splunk spec @@ -40,12 +42,10 @@ type IngestorClusterSpec struct { AppFrameworkConfig AppFrameworkSpec `json:"appRepo,omitempty"` // +kubebuilder:validation:Required - // +kubebuilder:validation:Immutable // Queue reference QueueRef corev1.ObjectReference `json:"queueRef"` // +kubebuilder:validation:Required - // +kubebuilder:validation:Immutable // Object Storage reference ObjectStorageRef corev1.ObjectReference `json:"objectStorageRef"` } diff --git a/api/v4/queue_types.go b/api/v4/queue_types.go index 999eaccc8..2139f43dd 100644 --- a/api/v4/queue_types.go +++ b/api/v4/queue_types.go @@ -28,7 +28,10 @@ const ( ) // +kubebuilder:validation:XValidation:rule="self.provider == oldSelf.provider",message="provider is immutable once created" -// +kubebuilder:validation:XValidation:rule="self.sqs == oldSelf.sqs",message="sqs is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs.name == oldSelf.sqs.name",message="sqs.name is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs.authRegion == oldSelf.sqs.authRegion",message="sqs.authRegion is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs.dlq == oldSelf.sqs.dlq",message="sqs.dlq is immutable once created" +// +kubebuilder:validation:XValidation:rule="self.sqs.endpoint == oldSelf.sqs.endpoint",message="sqs.endpoint is immutable once created" // +kubebuilder:validation:XValidation:rule="self.provider != 'sqs' || has(self.sqs)",message="sqs must be provided when provider is sqs" // QueueSpec defines the desired state of Queue type QueueSpec struct { diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 9b3f50bc8..3ea073d7d 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8331,6 +8331,10 @@ spec: x-kubernetes-validations: - message: queueRef and objectStorageRef must both be set or both be empty rule: has(self.queueRef) == has(self.objectStorageRef) + - message: queueRef is immutable once created + rule: self.queueRef == oldSelf.queueRef + - message: objectStorageRef is immutable once created + rule: self.objectStorageRef == oldSelf.objectStorageRef status: description: IndexerClusterStatus defines the observed state of a Splunk Enterprise indexer cluster diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index e04e1a021..703af01e6 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -4306,6 +4306,11 @@ spec: - objectStorageRef - queueRef type: object + x-kubernetes-validations: + - message: queueRef is immutable once created + rule: self.queueRef == oldSelf.queueRef + - message: objectStorageRef is immutable once created + rule: self.objectStorageRef == oldSelf.objectStorageRef status: description: IngestorClusterStatus defines the observed state of Ingestor Cluster diff --git a/config/crd/bases/enterprise.splunk.com_queues.yaml b/config/crd/bases/enterprise.splunk.com_queues.yaml index 454d1700b..e10ee536a 100644 --- a/config/crd/bases/enterprise.splunk.com_queues.yaml +++ b/config/crd/bases/enterprise.splunk.com_queues.yaml @@ -122,8 +122,14 @@ spec: x-kubernetes-validations: - message: provider is immutable once created rule: self.provider == oldSelf.provider - - message: sqs is immutable once created - rule: self.sqs == oldSelf.sqs + - message: sqs.name is immutable once created + rule: self.sqs.name == oldSelf.sqs.name + - message: sqs.authRegion is immutable once created + rule: self.sqs.authRegion == oldSelf.sqs.authRegion + - message: sqs.dlq is immutable once created + rule: self.sqs.dlq == oldSelf.sqs.dlq + - message: sqs.endpoint is immutable once created + rule: self.sqs.endpoint == oldSelf.sqs.endpoint - message: sqs must be provided when provider is sqs rule: self.provider != 'sqs' || has(self.sqs) status: diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index c7b05dcae..ab6f789c7 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -43,8 +43,9 @@ SQS message queue inputs can be found in the table below. | region | string | [Required] Region where the queue is located | | endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint | dlq | string | [Required] Name of the dead letter queue | +| volumes | []VolumeSpec | [Optional] List of remote storage volumes used to mount the credentials for queue and bucket access (must contain s3_access_key and s3_secret_key) | -**First provisioning or update of any of the queue inputs requires Ingestor Cluster and Indexer Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.** +**SOK doesn't support update of any of the Queue inputs except from the volumes which allow the change of secrets.** ## Example ``` @@ -59,6 +60,9 @@ spec: region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test + volumes: + - name: s3-sqs-volume + secretRef: s3-secret ``` # ObjectStorage @@ -81,7 +85,7 @@ S3 object storage inputs can be found in the table below. | path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | | endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint -Change of any of the object storage inputs triggers the restart of Splunk so that appropriate .conf files are correctly refreshed and consumed. +**SOK doesn't support update of any of the ObjectStorage inputs.** ## Example ``` @@ -110,9 +114,13 @@ In addition to common spec inputs, the IngestorCluster resource provides the fol | queueRef | corev1.ObjectReference | Message queue reference | | objectStorageRef | corev1.ObjectReference | Object storage reference | +**SOK doesn't support update of queueRef and objectStorageRef.** + +**First provisioning or scaling up the number of replicas requires Ingestor Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.** + ## Example -The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the ingestion process. +The example presented below configures IngestorCluster named ingestor with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the ingestion traffic. This IngestorCluster custom resource is set up with the s3-secret credentials allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the ingestion process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The object storage is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf and outputs.conf files are configured accordingly. @@ -147,9 +155,13 @@ In addition to common spec inputs, the IndexerCluster resource provides the foll | queueRef | corev1.ObjectReference | Message queue reference | | objectStorageRef | corev1.ObjectReference | Object storage reference | +**SOK doesn't support update of queueRef and objectStorageRef.** + +**First provisioning or scaling up the number of replicas requires Indexer Cluster Splunkd restart, but this restart is implemented automatically and done by SOK.** + ## Example -The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the service account named ingestor-sa allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the indexing process. +The example presented below configures IndexerCluster named indexer with Splunk ${SPLUNK_IMAGE_VERSION} image that resides in a default namespace and is scaled to 3 replicas that serve the indexing traffic. This IndexerCluster custom resource is set up with the s3-secret credentials allowing it to perform SQS and S3 operations. Queue and ObjectStorage references allow the user to specify queue and bucket settings for the indexing process. In this case, the setup uses the SQS and S3 based configuration where the messages are stored in and retrieved from sqs-test queue in us-west-2 region with dead letter queue set to sqs-dlq-test queue. The object storage is set to ingestion bucket in smartbus-test directory. Based on these inputs, default-mode.conf, inputs.conf and outputs.conf files are configured accordingly. @@ -204,6 +216,9 @@ queue: region: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test + volumes: + - name: s3-sqs-volume + secretRef: s3-secret ``` ``` @@ -734,18 +749,7 @@ Status: Is Deployment In Progress: false Last App Info Check Time: 0 Version: 0 - Queue: - Sqs: - Region: us-west-2 - DLQ: sqs-dlq-test - Endpoint: https://sqs.us-west-2.amazonaws.com - Name: sqs-test - Provider: sqs - Object Storage: - S3: - Endpoint: https://s3.us-west-2.amazonaws.com - Path: s3://ingestion/smartbus-test - Provider: s3 + Queue Bucket Access Secret Version: 33744270 Message: Phase: Ready Ready Replicas: 3 diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index ca56ca5ef..5848da973 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -67,18 +67,6 @@ spec: name: os status: phase: Ready - queue: - provider: sqs - sqs: - name: index-ingest-separation-test-q - authRegion: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - dlq: index-ingest-separation-test-dlq - objectStorage: - provider: s3 - s3: - endpoint: https://s3.us-west-2.amazonaws.com - path: s3://index-ingest-separation-test-bucket/smartbus-test --- # check for stateful set and replicas as configured @@ -110,18 +98,6 @@ spec: name: os status: phase: Ready - queue: - provider: sqs - sqs: - name: index-ingest-separation-test-q - authRegion: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - dlq: index-ingest-separation-test-dlq - objectStorage: - provider: s3 - s3: - endpoint: https://s3.us-west-2.amazonaws.com - path: s3://index-ingest-separation-test-bucket/smartbus-test --- # check for stateful set and replicas as configured diff --git a/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml index 765a22192..8bf619148 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/03-assert.yaml @@ -12,18 +12,6 @@ spec: name: os status: phase: Ready - queue: - provider: sqs - sqs: - name: index-ingest-separation-test-q - authRegion: us-west-2 - endpoint: https://sqs.us-west-2.amazonaws.com - dlq: index-ingest-separation-test-dlq - objectStorage: - provider: s3 - s3: - endpoint: https://s3.us-west-2.amazonaws.com - path: s3://index-ingest-separation-test-bucket/smartbus-test --- # check for stateful sets and replicas updated From c50984b38724fde0a92ecaa13a2697c5d4ddb2bd Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Tue, 13 Jan 2026 15:56:22 +0100 Subject: [PATCH 22/25] CSPL-4360 Add secret watch and fix controller tests --- api/v4/indexercluster_types.go | 4 +- ...enterprise.splunk.com_indexerclusters.yaml | 4 +- config/rbac/role.yaml | 6 ++ .../templates/rbac/clusterrole.yaml | 78 +++++++++++++++++++ .../rbac/objectstorage_editor_role.yaml | 55 +++++++++++++ .../rbac/objectstorage_viewer_role.yaml | 47 +++++++++++ .../controller/indexercluster_controller.go | 51 ++++++++++++ .../controller/ingestorcluster_controller.go | 55 +++++++++++++ .../ingestorcluster_controller_test.go | 37 ++++++++- internal/controller/testutils/new.go | 9 ++- .../02-assert.yaml | 4 - 11 files changed, 336 insertions(+), 14 deletions(-) create mode 100644 helm-chart/splunk-operator/templates/rbac/objectstorage_editor_role.yaml create mode 100644 helm-chart/splunk-operator/templates/rbac/objectstorage_viewer_role.yaml diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 02cf1562d..4c2bc47d2 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -35,8 +35,8 @@ const ( ) // +kubebuilder:validation:XValidation:rule="has(self.queueRef) == has(self.objectStorageRef)",message="queueRef and objectStorageRef must both be set or both be empty" -// +kubebuilder:validation:XValidation:rule="self.queueRef == oldSelf.queueRef",message="queueRef is immutable once created" -// +kubebuilder:validation:XValidation:rule="self.objectStorageRef == oldSelf.objectStorageRef",message="objectStorageRef is immutable once created" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.queueRef) || self.queueRef == oldSelf.queueRef",message="queueRef is immutable once created" +// +kubebuilder:validation:XValidation:rule="!has(oldSelf.objectStorageRef) || self.objectStorageRef == oldSelf.objectStorageRef",message="objectStorageRef is immutable once created" // IndexerClusterSpec defines the desired state of a Splunk Enterprise indexer cluster type IndexerClusterSpec struct { CommonSplunkSpec `json:",inline"` diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 3ea073d7d..2dbb09925 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8332,9 +8332,9 @@ spec: - message: queueRef and objectStorageRef must both be set or both be empty rule: has(self.queueRef) == has(self.objectStorageRef) - message: queueRef is immutable once created - rule: self.queueRef == oldSelf.queueRef + rule: '!has(oldSelf.queueRef) || self.queueRef == oldSelf.queueRef' - message: objectStorageRef is immutable once created - rule: self.objectStorageRef == oldSelf.objectStorageRef + rule: '!has(oldSelf.objectStorageRef) || self.objectStorageRef == oldSelf.objectStorageRef' status: description: IndexerClusterStatus defines the observed state of a Splunk Enterprise indexer cluster diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index fc8513023..973105d16 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -54,6 +54,8 @@ rules: - licensemanagers - licensemasters - monitoringconsoles + - objectstorages + - queues - searchheadclusters - standalones verbs: @@ -74,6 +76,8 @@ rules: - licensemanagers/finalizers - licensemasters/finalizers - monitoringconsoles/finalizers + - objectstorages/finalizers + - queues/finalizers - searchheadclusters/finalizers - standalones/finalizers verbs: @@ -88,6 +92,8 @@ rules: - licensemanagers/status - licensemasters/status - monitoringconsoles/status + - objectstorages/status + - queues/status - searchheadclusters/status - standalones/status verbs: diff --git a/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml b/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml index 2b5d51ec9..a952b174c 100644 --- a/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml +++ b/helm-chart/splunk-operator/templates/rbac/clusterrole.yaml @@ -222,6 +222,32 @@ rules: - get - patch - update +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - ingestorclusters/status + verbs: + - get + - patch + - update - apiGroups: - enterprise.splunk.com resources: @@ -300,6 +326,58 @@ rules: - get - patch - update +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status + verbs: + - get + - patch + - update +- apiGroups: + - enterprise.splunk.com + resources: + - queues + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - queues/finalizers + verbs: + - update +- apiGroups: + - enterprise.splunk.com + resources: + - queues/status + verbs: + - get + - patch + - update - apiGroups: - enterprise.splunk.com resources: diff --git a/helm-chart/splunk-operator/templates/rbac/objectstorage_editor_role.yaml b/helm-chart/splunk-operator/templates/rbac/objectstorage_editor_role.yaml new file mode 100644 index 000000000..d90f7673b --- /dev/null +++ b/helm-chart/splunk-operator/templates/rbac/objectstorage_editor_role.yaml @@ -0,0 +1,55 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants permissions to create, update, and delete resources within the enterprise.splunk.com. +# This role is intended for users who need to manage these resources +# but should not control RBAC or manage permissions for others. +{{- if .Values.splunkOperator.clusterWideAccess }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status + verbs: + - get +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-editor-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status + verbs: + - get +{{- end }} \ No newline at end of file diff --git a/helm-chart/splunk-operator/templates/rbac/objectstorage_viewer_role.yaml b/helm-chart/splunk-operator/templates/rbac/objectstorage_viewer_role.yaml new file mode 100644 index 000000000..ec9358b8d --- /dev/null +++ b/helm-chart/splunk-operator/templates/rbac/objectstorage_viewer_role.yaml @@ -0,0 +1,47 @@ +# This rule is not used by the project splunk-operator itself. +# It is provided to allow the cluster admin to help manage permissions for users. +# +# Grants read-only access to enterprise.splunk.com resources. +# This role is intended for users who need visibility into these resources +# without permissions to modify them. It is ideal for monitoring purposes and limited-access viewing. +{{- if .Values.splunkOperator.clusterWideAccess }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status + verbs: + - get +{{- else }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "splunk-operator.operator.fullname" . }}-objectstorage-viewer-role +rules: +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages + verbs: + - get + - list + - watch +- apiGroups: + - enterprise.splunk.com + resources: + - objectstorages/status + verbs: + - get +{{- end }} \ No newline at end of file diff --git a/internal/controller/indexercluster_controller.go b/internal/controller/indexercluster_controller.go index 7efb6e1b8..4f83f5abe 100644 --- a/internal/controller/indexercluster_controller.go +++ b/internal/controller/indexercluster_controller.go @@ -148,6 +148,57 @@ func (r *IndexerClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IndexerCluster{}, )). + Watches(&corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + secret, ok := obj.(*corev1.Secret) + if !ok { + return nil + } + + // Only consider indexer clusters in the same namespace as the Secret + var list enterpriseApi.IndexerClusterList + if err := r.Client.List(ctx, &list, client.InNamespace(secret.Namespace)); err != nil { + return nil + } + + var reqs []reconcile.Request + for _, ic := range list.Items { + if ic.Spec.QueueRef.Name == "" { + continue + } + + queueNS := ic.Spec.QueueRef.Namespace + if queueNS == "" { + queueNS = ic.Namespace + } + + queue := &enterpriseApi.Queue{} + if err := r.Client.Get(ctx, types.NamespacedName{ + Name: ic.Spec.QueueRef.Name, + Namespace: queueNS, + }, queue); err != nil { + continue + } + + if queue.Spec.Provider != "sqs" { + continue + } + + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef == secret.Name { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + break + } + } + } + return reqs + }), + ). Watches(&corev1.Pod{}, handler.EnqueueRequestForOwner( mgr.GetScheme(), diff --git a/internal/controller/ingestorcluster_controller.go b/internal/controller/ingestorcluster_controller.go index 0d8117bd2..b5aa3d911 100644 --- a/internal/controller/ingestorcluster_controller.go +++ b/internal/controller/ingestorcluster_controller.go @@ -50,6 +50,10 @@ type IngestorClusterReconciler struct { // +kubebuilder:rbac:groups=enterprise.splunk.com,resources=ingestorclusters/status,verbs=get;update;patch // +kubebuilder:rbac:groups=enterprise.splunk.com,resources=ingestorclusters/finalizers,verbs=update +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues;objectstorages,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/status;objectstorages/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=enterprise.splunk.com,resources=queues/finalizers;objectstorages/finalizers,verbs=update + // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // TODO(user): Modify the Reconcile function to compare the state specified by @@ -129,6 +133,57 @@ func (r *IngestorClusterReconciler) SetupWithManager(mgr ctrl.Manager) error { mgr.GetRESTMapper(), &enterpriseApi.IngestorCluster{}, )). + Watches(&corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { + secret, ok := obj.(*corev1.Secret) + if !ok { + return nil + } + + // Only consider ingestor clusters in the same namespace as the Secret + var list enterpriseApi.IngestorClusterList + if err := r.Client.List(ctx, &list, client.InNamespace(secret.Namespace)); err != nil { + return nil + } + + var reqs []reconcile.Request + for _, ic := range list.Items { + if ic.Spec.QueueRef.Name == "" { + continue + } + + queueNS := ic.Spec.QueueRef.Namespace + if queueNS == "" { + queueNS = ic.Namespace + } + + queue := &enterpriseApi.Queue{} + if err := r.Client.Get(ctx, types.NamespacedName{ + Name: ic.Spec.QueueRef.Name, + Namespace: queueNS, + }, queue); err != nil { + continue + } + + if queue.Spec.Provider != "sqs" { + continue + } + + for _, vol := range queue.Spec.SQS.VolList { + if vol.SecretRef == secret.Name { + reqs = append(reqs, reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: ic.Name, + Namespace: ic.Namespace, + }, + }) + break + } + } + } + return reqs + }), + ). Watches(&corev1.Pod{}, handler.EnqueueRequestForOwner( mgr.GetScheme(), diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go index 38e7cbb4e..49d59e608 100644 --- a/internal/controller/ingestorcluster_controller_test.go +++ b/internal/controller/ingestorcluster_controller_test.go @@ -104,7 +104,7 @@ var _ = Describe("IngestorCluster Controller", func() { annotations = map[string]string{} icSpec.Annotations = annotations icSpec.Status.Phase = "Ready" - UpdateIngestorCluster(icSpec, enterpriseApi.PhaseReady) + UpdateIngestorCluster(icSpec, enterpriseApi.PhaseReady, os, queue) DeleteIngestorCluster("test", nsSpecs.Name) Expect(k8sClient.Delete(context.Background(), nsSpecs)).Should(Succeed()) }) @@ -161,6 +161,35 @@ var _ = Describe("IngestorCluster Controller", func() { Expect(k8sClient.Create(context.Background(), nsSpecs)).Should(Succeed()) + queue := &enterpriseApi.Queue{ + ObjectMeta: metav1.ObjectMeta{ + Name: "queue", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.QueueSpec{ + Provider: "sqs", + SQS: enterpriseApi.SQSSpec{ + Name: "smartbus-queue", + AuthRegion: "us-west-2", + DLQ: "smartbus-dlq", + Endpoint: "https://sqs.us-west-2.amazonaws.com", + }, + }, + } + os := &enterpriseApi.ObjectStorage{ + ObjectMeta: metav1.ObjectMeta{ + Name: "os", + Namespace: nsSpecs.Name, + }, + Spec: enterpriseApi.ObjectStorageSpec{ + Provider: "s3", + S3: enterpriseApi.S3Spec{ + Endpoint: "https://s3.us-west-2.amazonaws.com", + Path: "s3://ingestion/smartbus-test", + }, + }, + } + ctx := context.TODO() builder := fake.NewClientBuilder() c := builder.Build() @@ -177,7 +206,7 @@ var _ = Describe("IngestorCluster Controller", func() { _, err := instance.Reconcile(ctx, request) Expect(err).ToNot(HaveOccurred()) - icSpec := testutils.NewIngestorCluster("test", namespace, "image") + icSpec := testutils.NewIngestorCluster("test", namespace, "image", os, queue) Expect(c.Create(ctx, icSpec)).Should(Succeed()) annotations := make(map[string]string) @@ -269,7 +298,7 @@ func CreateIngestorCluster(name string, namespace string, annotations map[string return ic } -func UpdateIngestorCluster(instance *enterpriseApi.IngestorCluster, status enterpriseApi.Phase) *enterpriseApi.IngestorCluster { +func UpdateIngestorCluster(instance *enterpriseApi.IngestorCluster, status enterpriseApi.Phase, os *enterpriseApi.ObjectStorage, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster { By("Expecting IngestorCluster custom resource to be updated successfully") key := types.NamespacedName{ @@ -277,7 +306,7 @@ func UpdateIngestorCluster(instance *enterpriseApi.IngestorCluster, status enter Namespace: instance.Namespace, } - icSpec := testutils.NewIngestorCluster(instance.Name, instance.Namespace, "image") + icSpec := testutils.NewIngestorCluster(instance.Name, instance.Namespace, "image", os, queue) icSpec.ResourceVersion = instance.ResourceVersion Expect(k8sClient.Update(context.Background(), icSpec)).Should(Succeed()) time.Sleep(2 * time.Second) diff --git a/internal/controller/testutils/new.go b/internal/controller/testutils/new.go index aa47e8092..63a291a1d 100644 --- a/internal/controller/testutils/new.go +++ b/internal/controller/testutils/new.go @@ -46,7 +46,7 @@ func NewStandalone(name, ns, image string) *enterpriseApi.Standalone { } // NewIngestorCluster returns new IngestorCluster instance with its config hash -func NewIngestorCluster(name, ns, image string) *enterpriseApi.IngestorCluster { +func NewIngestorCluster(name, ns, image string, os *enterpriseApi.ObjectStorage, queue *enterpriseApi.Queue) *enterpriseApi.IngestorCluster { return &enterpriseApi.IngestorCluster{ ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: ns}, Spec: enterpriseApi.IngestorClusterSpec{ @@ -55,7 +55,12 @@ func NewIngestorCluster(name, ns, image string) *enterpriseApi.IngestorCluster { }, Replicas: 3, QueueRef: corev1.ObjectReference{ - Name: "queue", + Name: queue.Name, + Namespace: queue.Namespace, + }, + ObjectStorageRef: corev1.ObjectReference{ + Name: os.Name, + Namespace: os.Namespace, }, }, } diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index 5848da973..c6cc343d8 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -11,8 +11,6 @@ spec: authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: index-ingest-separation-test-dlq -status: - phase: Ready --- # assert for object storage custom resource to be ready @@ -25,8 +23,6 @@ spec: s3: endpoint: https://s3.us-west-2.amazonaws.com path: s3://index-ingest-separation-test-bucket/smartbus-test -status: - phase: Ready --- # assert for cluster manager custom resource to be ready From e81280c8278d173e445044ba66ba1cf23d5b9661 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Wed, 28 Jan 2026 13:52:45 +0100 Subject: [PATCH 23/25] CSPL-4360 Update docs --- docs/IndexIngestionSeparation.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index ab6f789c7..6ffd859f1 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -40,8 +40,8 @@ SQS message queue inputs can be found in the table below. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | name | string | [Required] Name of the queue | -| region | string | [Required] Region where the queue is located | -| endpoint | string | [Optional, if not provided formed based on region] AWS SQS Service endpoint +| authRegion | string | [Required] Region where the queue is located | +| endpoint | string | [Optional, if not provided formed based on authRegion] AWS SQS Service endpoint | dlq | string | [Required] Name of the dead letter queue | | volumes | []VolumeSpec | [Optional] List of remote storage volumes used to mount the credentials for queue and bucket access (must contain s3_access_key and s3_secret_key) | @@ -57,7 +57,7 @@ spec: provider: sqs sqs: name: sqs-test - region: us-west-2 + authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test volumes: @@ -83,7 +83,7 @@ S3 object storage inputs can be found in the table below. | Key | Type | Description | | ---------- | ------- | ------------------------------------------------- | | path | string | [Required] Remote storage location for messages that are larger than the underlying maximum message size | -| endpoint | string | [Optional, if not provided formed based on region] S3-compatible service endpoint +| endpoint | string | [Optional, if not provided formed based on authRegion] S3-compatible service endpoint **SOK doesn't support update of any of the ObjectStorage inputs.** @@ -213,7 +213,7 @@ queue: provider: sqs sqs: name: sqs-test - region: us-west-2 + authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test volumes: @@ -584,7 +584,7 @@ spec: provider: sqs sqs: name: sqs-test - region: us-west-2 + authRegion: us-west-2 endpoint: https://sqs.us-west-2.amazonaws.com dlq: sqs-dlq-test ``` @@ -616,7 +616,7 @@ Metadata: UID: 12345678-1234-5678-1234-012345678911 Spec: Sqs: - Region: us-west-2 + Auth Region: us-west-2 DLQ: sqs-dlq-test Endpoint: https://sqs.us-west-2.amazonaws.com Name: sqs-test From d29aed1a73d3542f5282490fd5bfc6328fc29cc9 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Thu, 29 Jan 2026 09:53:40 +0100 Subject: [PATCH 24/25] CSPL-4360 Restart Splunk when SA changed --- api/v4/indexercluster_types.go | 3 +++ api/v4/ingestorcluster_types.go | 3 +++ .../bases/enterprise.splunk.com_indexerclusters.yaml | 3 +++ .../bases/enterprise.splunk.com_ingestorclusters.yaml | 3 +++ pkg/splunk/enterprise/indexercluster.go | 10 ++++++++-- pkg/splunk/enterprise/ingestorcluster.go | 5 ++++- 6 files changed, 24 insertions(+), 3 deletions(-) diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 4c2bc47d2..3fbcf3dd6 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -125,6 +125,9 @@ type IndexerClusterStatus struct { // Queue and bucket access secret version QueueBucketAccessSecretVersion string `json:"queueBucketAccessSecretVersion,omitempty"` + + // Queue and bucket access service account + QueueBucketAccessServiceAccount string `json:"queueBucketAccessServiceAccount,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index 021acd025..3a8d8a940 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -78,6 +78,9 @@ type IngestorClusterStatus struct { // Queue and bucket access secret version QueueBucketAccessSecretVersion string `json:"queueBucketAccessSecretVersion,omitempty"` + + // Queue and bucket access service account + QueueBucketAccessServiceAccount string `json:"queueBucketAccessServiceAccount,omitempty"` } // +kubebuilder:object:root=true diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 2dbb09925..17717e5af 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8431,6 +8431,9 @@ spec: queueBucketAccessSecretVersion: description: Queue and bucket access secret version type: string + queueBucketAccessServiceAccount: + description: Queue and bucket access service account + type: string readyReplicas: description: current number of ready indexer peers format: int32 diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 703af01e6..171392367 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -4613,6 +4613,9 @@ spec: queueBucketAccessSecretVersion: description: Queue and bucket access secret version type: string + queueBucketAccessServiceAccount: + description: Queue and bucket access service account + type: string readyReplicas: description: Number of ready ingestor pods format: int32 diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 42b714924..3f4b507f7 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -77,6 +77,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { cr.Status.QueueBucketAccessSecretVersion = "0" + cr.Status.QueueBucketAccessServiceAccount = "" } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -300,10 +301,11 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } secretChanged := cr.Status.QueueBucketAccessSecretVersion != version + serviceAccountChanged := cr.Status.QueueBucketAccessServiceAccount != cr.Spec.ServiceAccount // If queue is updated if cr.Spec.QueueRef.Name != "" { - if secretChanged { + if secretChanged || serviceAccountChanged { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, client) if err != nil { @@ -322,6 +324,7 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } cr.Status.QueueBucketAccessSecretVersion = version + cr.Status.QueueBucketAccessServiceAccount = cr.Spec.ServiceAccount } } @@ -415,6 +418,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { cr.Status.QueueBucketAccessSecretVersion = "0" + cr.Status.QueueBucketAccessServiceAccount = "" } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -641,9 +645,10 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } secretChanged := cr.Status.QueueBucketAccessSecretVersion != version + serviceAccountChanged := cr.Status.QueueBucketAccessServiceAccount != cr.Spec.ServiceAccount if cr.Spec.QueueRef.Name != "" { - if secretChanged { + if secretChanged || serviceAccountChanged { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, client) if err != nil { @@ -662,6 +667,7 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } cr.Status.QueueBucketAccessSecretVersion = version + cr.Status.QueueBucketAccessServiceAccount = cr.Spec.ServiceAccount } } diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index fb4c9474a..de4f0c55c 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -73,6 +73,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr defer updateCRStatus(ctx, client, cr, &err) if cr.Status.Replicas < cr.Spec.Replicas { cr.Status.QueueBucketAccessSecretVersion = "0" + cr.Status.QueueBucketAccessServiceAccount = "" } cr.Status.Replicas = cr.Spec.Replicas @@ -266,9 +267,10 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } secretChanged := cr.Status.QueueBucketAccessSecretVersion != version + serviceAccountChanged := cr.Status.QueueBucketAccessServiceAccount != cr.Spec.ServiceAccount // If queue is updated - if secretChanged { + if secretChanged || serviceAccountChanged { mgr := newIngestorClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) err = mgr.updateIngestorConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, client) if err != nil { @@ -287,6 +289,7 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } cr.Status.QueueBucketAccessSecretVersion = version + cr.Status.QueueBucketAccessServiceAccount = cr.Spec.ServiceAccount } // Upgrade fron automated MC to MC CRD From 4ebe9d8701369ffa4003345f421a00518f2d42c5 Mon Sep 17 00:00:00 2001 From: Kasia Koziol Date: Mon, 9 Feb 2026 11:17:00 +0100 Subject: [PATCH 25/25] Addressing comments --- api/v4/indexercluster_types.go | 8 +-- api/v4/ingestorcluster_types.go | 10 ++-- api/v4/objectstorage_types.go | 2 +- ...enterprise.splunk.com_indexerclusters.yaml | 16 +++--- ...nterprise.splunk.com_ingestorclusters.yaml | 18 ++++--- .../enterprise.splunk.com_objectstorages.yaml | 2 +- docs/CustomResources.md | 2 +- docs/IndexIngestionSeparation.md | 10 ++-- .../ingestorcluster_controller_test.go | 6 +-- .../02-assert.yaml | 2 +- .../splunk_index_ingest_sep.yaml | 2 +- pkg/splunk/enterprise/indexercluster.go | 50 ++++++++++++------- pkg/splunk/enterprise/indexercluster_test.go | 14 +++--- pkg/splunk/enterprise/ingestorcluster.go | 38 ++++++++++---- pkg/splunk/enterprise/ingestorcluster_test.go | 14 +++--- pkg/splunk/enterprise/objectstorage_test.go | 2 +- ...dex_and_ingestion_separation_suite_test.go | 2 +- .../index_and_ingestion_separation_test.go | 8 +-- 18 files changed, 124 insertions(+), 82 deletions(-) diff --git a/api/v4/indexercluster_types.go b/api/v4/indexercluster_types.go index 3fbcf3dd6..5035c3cff 100644 --- a/api/v4/indexercluster_types.go +++ b/api/v4/indexercluster_types.go @@ -123,11 +123,11 @@ type IndexerClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Queue and bucket access secret version - QueueBucketAccessSecretVersion string `json:"queueBucketAccessSecretVersion,omitempty"` + // Credential secret version to track changes to the secret and trigger rolling restart of indexer cluster peers when the secret is updated + CredentialSecretVersion string `json:"credentialSecretVersion,omitempty"` - // Queue and bucket access service account - QueueBucketAccessServiceAccount string `json:"queueBucketAccessServiceAccount,omitempty"` + // Service account to track changes to the service account and trigger rolling restart of indexer cluster peers when the service account is updated + ServiceAccount string `json:"serviceAccount,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/api/v4/ingestorcluster_types.go b/api/v4/ingestorcluster_types.go index 3a8d8a940..0dc3ce844 100644 --- a/api/v4/ingestorcluster_types.go +++ b/api/v4/ingestorcluster_types.go @@ -36,6 +36,8 @@ type IngestorClusterSpec struct { CommonSplunkSpec `json:",inline"` // Number of ingestor pods + // +kubebuilder:validation:Minimum=3 + // +kubebuilder:default=3 Replicas int32 `json:"replicas"` // Splunk Enterprise app repository that specifies remote app location and scope for Splunk app management @@ -76,11 +78,11 @@ type IngestorClusterStatus struct { // Auxillary message describing CR status Message string `json:"message"` - // Queue and bucket access secret version - QueueBucketAccessSecretVersion string `json:"queueBucketAccessSecretVersion,omitempty"` + // Credential secret version to track changes to the secret and trigger rolling restart of indexer cluster peers when the secret is updated + CredentialSecretVersion string `json:"credentialSecretVersion,omitempty"` - // Queue and bucket access service account - QueueBucketAccessServiceAccount string `json:"queueBucketAccessServiceAccount,omitempty"` + // Service account to track changes to the service account and trigger rolling restart of indexer cluster peers when the service account is updated + ServiceAccount string `json:"serviceAccount,omitempty"` } // +kubebuilder:object:root=true diff --git a/api/v4/objectstorage_types.go b/api/v4/objectstorage_types.go index 7712e81d6..b041ef466 100644 --- a/api/v4/objectstorage_types.go +++ b/api/v4/objectstorage_types.go @@ -49,7 +49,7 @@ type S3Spec struct { Endpoint string `json:"endpoint"` // +kubebuilder:validation:Required - // +kubebuilder:validation:Pattern=`^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$` + // +kubebuilder:validation:Pattern=`^(?:s3://)?[a-z0-9.-]{3,63}(?:/[^\s]+)?$` // S3 bucket path Path string `json:"path"` } diff --git a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml index 17717e5af..819c2f971 100644 --- a/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_indexerclusters.yaml @@ -8366,6 +8366,11 @@ spec: - Terminating - Error type: string + credentialSecretVersion: + description: Credential secret version to track changes to the secret + and trigger rolling restart of indexer cluster peers when the secret + is updated + type: string indexer_secret_changed_flag: description: Indicates when the idxc_secret has been changed for a peer @@ -8428,12 +8433,6 @@ spec: - Terminating - Error type: string - queueBucketAccessSecretVersion: - description: Queue and bucket access secret version - type: string - queueBucketAccessServiceAccount: - description: Queue and bucket access service account - type: string readyReplicas: description: current number of ready indexer peers format: int32 @@ -8449,6 +8448,11 @@ spec: description: Indicates whether the manager is ready to begin servicing, based on whether it is initialized. type: boolean + serviceAccount: + description: Service account to track changes to the service account + and trigger rolling restart of indexer cluster peers when the service + account is updated + type: string type: object type: object served: true diff --git a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml index 171392367..5f0e3dd88 100644 --- a/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml +++ b/config/crd/bases/enterprise.splunk.com_ingestorclusters.yaml @@ -1699,8 +1699,10 @@ spec: type: integer type: object replicas: + default: 3 description: Number of ingestor pods format: int32 + minimum: 3 type: integer resources: description: resource requirements for the pod containers @@ -4596,6 +4598,11 @@ spec: description: App Framework version info for future use type: integer type: object + credentialSecretVersion: + description: Credential secret version to track changes to the secret + and trigger rolling restart of indexer cluster peers when the secret + is updated + type: string message: description: Auxillary message describing CR status type: string @@ -4610,12 +4617,6 @@ spec: - Terminating - Error type: string - queueBucketAccessSecretVersion: - description: Queue and bucket access secret version - type: string - queueBucketAccessServiceAccount: - description: Queue and bucket access service account - type: string readyReplicas: description: Number of ready ingestor pods format: int32 @@ -4632,6 +4633,11 @@ spec: selector: description: Selector for pods used by HorizontalPodAutoscaler type: string + serviceAccount: + description: Service account to track changes to the service account + and trigger rolling restart of indexer cluster peers when the service + account is updated + type: string telAppInstalled: description: Telemetry App installation flag type: boolean diff --git a/config/crd/bases/enterprise.splunk.com_objectstorages.yaml b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml index 23d5b437b..015d5797d 100644 --- a/config/crd/bases/enterprise.splunk.com_objectstorages.yaml +++ b/config/crd/bases/enterprise.splunk.com_objectstorages.yaml @@ -68,7 +68,7 @@ spec: type: string path: description: S3 bucket path - pattern: ^s3://[a-z0-9.-]{3,63}(?:/[^\s]+)?$ + pattern: ^(?:s3://)?[a-z0-9.-]{3,63}(?:/[^\s]+)?$ type: string required: - path diff --git a/docs/CustomResources.md b/docs/CustomResources.md index bd85c05ca..cd29cde79 100644 --- a/docs/CustomResources.md +++ b/docs/CustomResources.md @@ -400,7 +400,7 @@ metadata: spec: provider: s3 s3: - path: s3://ingestion/smartbus-test + path: ingestion/smartbus-test endpoint: https://s3.us-west-2.amazonaws.com ``` diff --git a/docs/IndexIngestionSeparation.md b/docs/IndexIngestionSeparation.md index 6ffd859f1..0dfdc3eb0 100644 --- a/docs/IndexIngestionSeparation.md +++ b/docs/IndexIngestionSeparation.md @@ -96,7 +96,7 @@ metadata: spec: provider: s3 s3: - path: s3://ingestion/smartbus-test + path: ingestion/smartbus-test endpoint: https://s3.us-west-2.amazonaws.com ``` @@ -228,7 +228,7 @@ objectStorage: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test + path: ingestion/smartbus-test ``` ``` @@ -642,7 +642,7 @@ spec: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com - path: s3://ingestion/smartbus-test + path: ingestion/smartbus-test ``` ``` @@ -673,7 +673,7 @@ Metadata: Spec: S3: Endpoint: https://s3.us-west-2.amazonaws.com - Path: s3://ingestion/smartbus-test + Path: ingestion/smartbus-test Provider: s3 Status: Message: @@ -749,7 +749,7 @@ Status: Is Deployment In Progress: false Last App Info Check Time: 0 Version: 0 - Queue Bucket Access Secret Version: 33744270 + Credential Secret Version: 33744270 Message: Phase: Ready Ready Replicas: 3 diff --git a/internal/controller/ingestorcluster_controller_test.go b/internal/controller/ingestorcluster_controller_test.go index 49d59e608..52519f724 100644 --- a/internal/controller/ingestorcluster_controller_test.go +++ b/internal/controller/ingestorcluster_controller_test.go @@ -95,7 +95,7 @@ var _ = Describe("IngestorCluster Controller", func() { Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://ingestion/smartbus-test", + Path: "ingestion/smartbus-test", }, }, } @@ -143,7 +143,7 @@ var _ = Describe("IngestorCluster Controller", func() { Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://ingestion/smartbus-test", + Path: "ingestion/smartbus-test", }, }, } @@ -185,7 +185,7 @@ var _ = Describe("IngestorCluster Controller", func() { Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://ingestion/smartbus-test", + Path: "ingestion/smartbus-test", }, }, } diff --git a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml index c6cc343d8..99669acf1 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/02-assert.yaml @@ -22,7 +22,7 @@ spec: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com - path: s3://index-ingest-separation-test-bucket/smartbus-test + path: index-ingest-separation-test-bucket/smartbus-test --- # assert for cluster manager custom resource to be ready diff --git a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml index 1cdbc33b8..8c733e53b 100644 --- a/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml +++ b/kuttl/tests/helm/index-and-ingest-separation/splunk_index_ingest_sep.yaml @@ -24,7 +24,7 @@ objectStorage: provider: s3 s3: endpoint: https://s3.us-west-2.amazonaws.com - path: s3://index-ingest-separation-test-bucket/smartbus-test + path: index-ingest-separation-test-bucket/smartbus-test ingestorCluster: enabled: true diff --git a/pkg/splunk/enterprise/indexercluster.go b/pkg/splunk/enterprise/indexercluster.go index 3f4b507f7..c3fb51615 100644 --- a/pkg/splunk/enterprise/indexercluster.go +++ b/pkg/splunk/enterprise/indexercluster.go @@ -76,8 +76,8 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller // updates status after function completes cr.Status.ClusterManagerPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.QueueBucketAccessSecretVersion = "0" - cr.Status.QueueBucketAccessServiceAccount = "" + cr.Status.CredentialSecretVersion = "0" + cr.Status.ServiceAccount = "" } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -300,12 +300,12 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller } } - secretChanged := cr.Status.QueueBucketAccessSecretVersion != version - serviceAccountChanged := cr.Status.QueueBucketAccessServiceAccount != cr.Spec.ServiceAccount + secretChanged := cr.Status.CredentialSecretVersion != version + serviceAccountChanged := cr.Status.ServiceAccount != cr.Spec.ServiceAccount // If queue is updated if cr.Spec.QueueRef.Name != "" { - if secretChanged || serviceAccountChanged { + if secretChanged || serviceAccountChanged { mgr := newIndexerClusterPodManager(scopedLog, cr, namespaceScopedSecret, splclient.NewSplunkClient, client) err = mgr.updateIndexerConfFiles(ctx, cr, &queue.Spec, &os.Spec, accessKey, secretKey, client) if err != nil { @@ -323,8 +323,8 @@ func ApplyIndexerClusterManager(ctx context.Context, client splcommon.Controller scopedLog.Info("Restarted splunk", "indexer", i) } - cr.Status.QueueBucketAccessSecretVersion = version - cr.Status.QueueBucketAccessServiceAccount = cr.Spec.ServiceAccount + cr.Status.CredentialSecretVersion = version + cr.Status.ServiceAccount = cr.Spec.ServiceAccount } } @@ -417,8 +417,8 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, cr.Status.Phase = enterpriseApi.PhaseError cr.Status.ClusterMasterPhase = enterpriseApi.PhaseError if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.QueueBucketAccessSecretVersion = "0" - cr.Status.QueueBucketAccessServiceAccount = "" + cr.Status.CredentialSecretVersion = "0" + cr.Status.ServiceAccount = "" } cr.Status.Replicas = cr.Spec.Replicas cr.Status.Selector = fmt.Sprintf("app.kubernetes.io/instance=splunk-%s-indexer", cr.GetName()) @@ -644,8 +644,8 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, } } - secretChanged := cr.Status.QueueBucketAccessSecretVersion != version - serviceAccountChanged := cr.Status.QueueBucketAccessServiceAccount != cr.Spec.ServiceAccount + secretChanged := cr.Status.CredentialSecretVersion != version + serviceAccountChanged := cr.Status.ServiceAccount != cr.Spec.ServiceAccount if cr.Spec.QueueRef.Name != "" { if secretChanged || serviceAccountChanged { @@ -666,8 +666,8 @@ func ApplyIndexerCluster(ctx context.Context, client splcommon.ControllerClient, scopedLog.Info("Restarted splunk", "indexer", i) } - cr.Status.QueueBucketAccessSecretVersion = version - cr.Status.QueueBucketAccessServiceAccount = cr.Spec.ServiceAccount + cr.Status.CredentialSecretVersion = version + cr.Status.ServiceAccount = cr.Spec.ServiceAccount } } @@ -1405,21 +1405,35 @@ func imageUpdatedTo9(previousImage string, currentImage string) bool { // getQueueAndObjectStorageInputsForIndexerConfFiles returns a list of queue and object storage inputs for conf files func getQueueAndObjectStorageInputsForIndexerConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (inputs, outputs [][]string) { queueProvider := "" + authRegion := "" + endpoint := "" + dlq := "" if queue.Provider == "sqs" { queueProvider = "sqs_smartbus" + authRegion = queue.SQS.AuthRegion + endpoint = queue.SQS.Endpoint + dlq = queue.SQS.DLQ } + + path := "" + osEndpoint := "" osProvider := "" if os.Provider == "s3" { osProvider = "sqs_smartbus" + osEndpoint = os.S3.Endpoint + path = os.S3.Path + if !strings.HasPrefix(path, "s3://") { + path = "s3://" + path + } } inputs = append(inputs, []string{"remote_queue.type", queueProvider}, - []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), queue.SQS.AuthRegion}, - []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), queue.SQS.Endpoint}, - []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), os.S3.Endpoint}, - []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), os.S3.Path}, - []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), queue.SQS.DLQ}, + []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), authRegion}, + []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), endpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), osEndpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), path}, + []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), dlq}, []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"}, []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"}, ) diff --git a/pkg/splunk/enterprise/indexercluster_test.go b/pkg/splunk/enterprise/indexercluster_test.go index ac9e59554..86f6d2074 100644 --- a/pkg/splunk/enterprise/indexercluster_test.go +++ b/pkg/splunk/enterprise/indexercluster_test.go @@ -2083,7 +2083,7 @@ func TestGetQueueAndPipelineInputsForIndexerConfFiles(t *testing.T) { Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://bucket/key", + Path: "bucket/key", }, }, } @@ -2098,7 +2098,7 @@ func TestGetQueueAndPipelineInputsForIndexerConfFiles(t *testing.T) { {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), "s3://" + os.Spec.S3.Path}, {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, @@ -2112,7 +2112,7 @@ func TestGetQueueAndPipelineInputsForIndexerConfFiles(t *testing.T) { {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), "s3://" + os.Spec.S3.Path}, {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, {fmt.Sprintf("remote_queue.%s.retry_policy", provider), "max_count"}, @@ -2176,7 +2176,7 @@ func TestUpdateIndexerConfFiles(t *testing.T) { Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://bucket/key", + Path: "bucket/key", }, }, } @@ -2200,8 +2200,8 @@ func TestUpdateIndexerConfFiles(t *testing.T) { }, }, Status: enterpriseApi.IndexerClusterStatus{ - ReadyReplicas: 3, - QueueBucketAccessSecretVersion: "123", + ReadyReplicas: 3, + CredentialSecretVersion: "123", }, } c.Create(ctx, cr) @@ -2431,7 +2431,7 @@ func TestApplyIndexerClusterManager_Queue_Success(t *testing.T) { Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://bucket/key", + Path: "bucket/key", }, }, } diff --git a/pkg/splunk/enterprise/ingestorcluster.go b/pkg/splunk/enterprise/ingestorcluster.go index de4f0c55c..8280ddc0d 100644 --- a/pkg/splunk/enterprise/ingestorcluster.go +++ b/pkg/splunk/enterprise/ingestorcluster.go @@ -20,6 +20,7 @@ import ( "context" "fmt" "reflect" + "strings" "time" "github.com/go-logr/logr" @@ -72,8 +73,8 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr // Update the CR Status defer updateCRStatus(ctx, client, cr, &err) if cr.Status.Replicas < cr.Spec.Replicas { - cr.Status.QueueBucketAccessSecretVersion = "0" - cr.Status.QueueBucketAccessServiceAccount = "" + cr.Status.CredentialSecretVersion = "0" + cr.Status.ServiceAccount = "" } cr.Status.Replicas = cr.Spec.Replicas @@ -266,8 +267,8 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr } } - secretChanged := cr.Status.QueueBucketAccessSecretVersion != version - serviceAccountChanged := cr.Status.QueueBucketAccessServiceAccount != cr.Spec.ServiceAccount + secretChanged := cr.Status.CredentialSecretVersion != version + serviceAccountChanged := cr.Status.ServiceAccount != cr.Spec.ServiceAccount // If queue is updated if secretChanged || serviceAccountChanged { @@ -288,8 +289,8 @@ func ApplyIngestorCluster(ctx context.Context, client client.Client, cr *enterpr scopedLog.Info("Restarted splunk", "ingestor", i) } - cr.Status.QueueBucketAccessSecretVersion = version - cr.Status.QueueBucketAccessServiceAccount = cr.Spec.ServiceAccount + cr.Status.CredentialSecretVersion = version + cr.Status.ServiceAccount = cr.Spec.ServiceAccount } // Upgrade fron automated MC to MC CRD @@ -469,20 +470,35 @@ func getPipelineInputsForConfFile(isIndexer bool) (config [][]string) { // getQueueAndObjectStorageInputsForConfFiles returns a list of queue and object storage inputs for conf files func getQueueAndObjectStorageInputsForIngestorConfFiles(queue *enterpriseApi.QueueSpec, os *enterpriseApi.ObjectStorageSpec, accessKey, secretKey string) (config [][]string) { queueProvider := "" + authRegion := "" + endpoint := "" + dlq := "" if queue.Provider == "sqs" { queueProvider = "sqs_smartbus" + authRegion = queue.SQS.AuthRegion + endpoint = queue.SQS.Endpoint + dlq = queue.SQS.DLQ } + + path := "" + osEndpoint := "" osProvider := "" if os.Provider == "s3" { osProvider = "sqs_smartbus" + osEndpoint = os.S3.Endpoint + path = os.S3.Path + if !strings.HasPrefix(path, "s3://") { + path = "s3://" + path + } } + config = append(config, []string{"remote_queue.type", queueProvider}, - []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), queue.SQS.AuthRegion}, - []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), queue.SQS.Endpoint}, - []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), os.S3.Endpoint}, - []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), os.S3.Path}, - []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), queue.SQS.DLQ}, + []string{fmt.Sprintf("remote_queue.%s.auth_region", queueProvider), authRegion}, + []string{fmt.Sprintf("remote_queue.%s.endpoint", queueProvider), endpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", osProvider), osEndpoint}, + []string{fmt.Sprintf("remote_queue.%s.large_message_store.path", osProvider), path}, + []string{fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", queueProvider), dlq}, []string{fmt.Sprintf("remote_queue.%s.encoding_format", queueProvider), "s2s"}, []string{fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", queueProvider), "4"}, []string{fmt.Sprintf("remote_queue.%s.retry_policy", queueProvider), "max_count"}, diff --git a/pkg/splunk/enterprise/ingestorcluster_test.go b/pkg/splunk/enterprise/ingestorcluster_test.go index f7dd54b39..cdeaa14ac 100644 --- a/pkg/splunk/enterprise/ingestorcluster_test.go +++ b/pkg/splunk/enterprise/ingestorcluster_test.go @@ -99,7 +99,7 @@ func TestApplyIngestorCluster(t *testing.T) { Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://bucket/key", + Path: "bucket/key", }, }, } @@ -440,7 +440,7 @@ func TestGetQueueAndPipelineInputsForIngestorConfFiles(t *testing.T) { Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://bucket/key", + Path: "bucket/key", }, }, } @@ -456,7 +456,7 @@ func TestGetQueueAndPipelineInputsForIngestorConfFiles(t *testing.T) { {fmt.Sprintf("remote_queue.%s.auth_region", provider), queue.Spec.SQS.AuthRegion}, {fmt.Sprintf("remote_queue.%s.endpoint", provider), queue.Spec.SQS.Endpoint}, {fmt.Sprintf("remote_queue.%s.large_message_store.endpoint", provider), os.Spec.S3.Endpoint}, - {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), os.Spec.S3.Path}, + {fmt.Sprintf("remote_queue.%s.large_message_store.path", provider), "s3://" + os.Spec.S3.Path}, {fmt.Sprintf("remote_queue.%s.dead_letter_queue.name", provider), queue.Spec.SQS.DLQ}, {fmt.Sprintf("remote_queue.%s.encoding_format", provider), "s2s"}, {fmt.Sprintf("remote_queue.%s.max_count.max_retries_per_part", provider), "4"}, @@ -518,7 +518,7 @@ func TestUpdateIngestorConfFiles(t *testing.T) { Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://bucket/key", + Path: "bucket/key", }, }, } @@ -540,9 +540,9 @@ func TestUpdateIngestorConfFiles(t *testing.T) { }, }, Status: enterpriseApi.IngestorClusterStatus{ - Replicas: 3, - ReadyReplicas: 3, - QueueBucketAccessSecretVersion: "123", + Replicas: 3, + ReadyReplicas: 3, + CredentialSecretVersion: "123", }, } diff --git a/pkg/splunk/enterprise/objectstorage_test.go b/pkg/splunk/enterprise/objectstorage_test.go index a3511af69..1c91f131d 100644 --- a/pkg/splunk/enterprise/objectstorage_test.go +++ b/pkg/splunk/enterprise/objectstorage_test.go @@ -68,7 +68,7 @@ func TestApplyObjectStorage(t *testing.T) { Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://bucket/key", + Path: "bucket/key", }, }, } diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go index 3e18b669c..8bc789ac7 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_suite_test.go @@ -52,7 +52,7 @@ var ( Provider: "s3", S3: enterpriseApi.S3Spec{ Endpoint: "https://s3.us-west-2.amazonaws.com", - Path: "s3://index-ingest-separation-test-bucket/smartbus-test", + Path: "index-ingest-separation-test-bucket/smartbus-test", }, } serviceAccountName = "index-ingest-sa" diff --git a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go index 17b5bd8da..f3eac42e5 100644 --- a/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go +++ b/test/index_and_ingestion_separation/index_and_ingestion_separation_test.go @@ -317,8 +317,8 @@ var _ = Describe("indingsep test", func() { // Verify Ingestor Cluster Status testcaseEnvInst.Log.Info("Verify Ingestor Cluster Status") - Expect(ingest.Status.QueueBucketAccessSecretVersion).To(Not(Equal("")), "Ingestor queue status queue bucket access secret version is empty") - Expect(ingest.Status.QueueBucketAccessSecretVersion).To(Not(Equal("0")), "Ingestor queue status queue bucket access secret version is 0") + Expect(ingest.Status.CredentialSecretVersion).To(Not(Equal("")), "Ingestor queue status credential access secret version is empty") + Expect(ingest.Status.CredentialSecretVersion).To(Not(Equal("0")), "Ingestor queue status credential access secret version is 0") // Get instance of current Indexer Cluster CR with latest config testcaseEnvInst.Log.Info("Get instance of current Indexer Cluster CR with latest config") @@ -328,8 +328,8 @@ var _ = Describe("indingsep test", func() { // Verify Indexer Cluster Status testcaseEnvInst.Log.Info("Verify Indexer Cluster Status") - Expect(index.Status.QueueBucketAccessSecretVersion).To(Not(Equal("")), "Indexer queue status queue bucket access secret version is empty") - Expect(index.Status.QueueBucketAccessSecretVersion).To(Not(Equal("0")), "Indexer queue status queue bucket access secret version is 0") + Expect(index.Status.CredentialSecretVersion).To(Not(Equal("")), "Indexer queue status credential access secret version is empty") + Expect(index.Status.CredentialSecretVersion).To(Not(Equal("0")), "Indexer queue status credential access secret version is 0") // Verify conf files testcaseEnvInst.Log.Info("Verify conf files")