diff --git a/CHANGELOG.md b/CHANGELOG.md index 02d26f3c..17112fcf 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,13 @@ All notable changes to this project will be documented in this file. ## [Unreleased] +### Changed + +- OLM deployer doesn't add owner references to cluster scoped objects anymore ([#360]). + Owner references ensure that objects are garbage collected by OpenShift upon operator removal but they cause problems when the operator is updated. + This means that cluster wide objects are not removed anymore when the operator is uninstalled. + This behaviour is in line with the default behaviour of Helm and OLM. + ## [25.11.0] - 2025-11-07 ## [25.11.0-rc1] - 2025-11-06 diff --git a/rust/olm-deployer/src/owner/mod.rs b/rust/olm-deployer/src/owner/mod.rs index 50361ebe..cf7cb4bd 100644 --- a/rust/olm-deployer/src/owner/mod.rs +++ b/rust/olm-deployer/src/owner/mod.rs @@ -11,9 +11,16 @@ use stackable_operator::{ }, }; -/// Updates the owner list of the `target` according to it's scope. -/// For namespaced objects it uses the `ns_owner` whereas for cluster wide -/// objects it uses the `cluster_owner`. +/// Updates owner references of objects created by this deployer so that when an operator is +/// uninstalled by OLM, all created objects are also removed by Kubernetes garbage collection. +/// +/// Namespaced object's owner references are updated in place with the value of `ns_owner`. +/// +/// A previous version of this function also updated cluster scoped objects to set the owner +/// reference to `cluster_owner`, but this turned out to be problematic. First, this is not how OLM +/// and Helm behave by default. Second, when updating the listener operator, deleting and recreating +/// the CSI driver definition the cluster was left in a broken state. +/// See: this comment for more details: https://github.com/stackabletech/issues/issues/799#issuecomment-3601121617 pub(super) fn maybe_update_owner( target: &mut DynamicObject, scope: &Scope, @@ -21,9 +28,13 @@ pub(super) fn maybe_update_owner( cluster_owner: &ClusterRole, ) -> Result<()> { let owner_ref = owner_ref(scope, ns_owner, cluster_owner)?; - match target.metadata.owner_references { - Some(ref mut ors) => ors.push(owner_ref), - None => target.metadata.owner_references = Some(vec![owner_ref]), + // 2025-12-10: do not set owner references for cluster scoped objects anymore to prevent them from being + // deleted upon operator upgrades. + if scope == &Scope::Namespaced { + match target.metadata.owner_references { + Some(ref mut ors) => ors.push(owner_ref), + None => target.metadata.owner_references = Some(vec![owner_ref]), + } } Ok(()) } @@ -147,13 +158,7 @@ rules: let mut daemonset = DAEMONSET.clone(); maybe_update_owner(&mut daemonset, &Scope::Cluster, &DEPLOYMENT, &CLUSTER_ROLE)?; - let expected = Some(vec![OwnerReference { - uid: "d9287d0a-3069-47c3-8c90-b714dc6dddaa".to_string(), - name: "listener-operator-clusterrole".to_string(), - kind: "ClusterRole".to_string(), - api_version: "rbac.authorization.k8s.io/v1".to_string(), - ..OwnerReference::default() - }]); + let expected = None; assert_eq!(daemonset.metadata.owner_references, expected); Ok(()) }