From 9b7a62096fc5ee017519ed24079dd20729c722fe Mon Sep 17 00:00:00 2001 From: Martin Dekov Date: Mon, 13 Oct 2025 13:37:04 +0300 Subject: [PATCH 1/7] Validate disk removals Validating disk removals in case the disk contains the volume with last healthy replica. The healthy replica is determined by the FailedAt. In case this field is empty, then the replica is deemed healthy. Also rejection happens in case it hosts the last usable backing image. "Usable backing image" is one which in ready state. Signed-off-by: Martin Dekov --- cmd/node-disk-manager-webhook/main.go | 20 +- .../templates/rbac.yaml | 5 +- pkg/utils/utils.go | 2 + pkg/webhook/blockdevice/validator.go | 200 ++++++++++++++++- .../controllers/longhorn.io/factory.go | 72 ++++++ .../controllers/longhorn.io/interface.go | 43 ++++ .../longhorn.io/v1beta2/backingimage.go | 208 ++++++++++++++++++ .../v1beta2/backingimagedatasource.go | 208 ++++++++++++++++++ .../controllers/longhorn.io/v1beta2/backup.go | 208 ++++++++++++++++++ .../longhorn.io/v1beta2/backupbackingimage.go | 208 ++++++++++++++++++ .../longhorn.io/v1beta2/backuptarget.go | 208 ++++++++++++++++++ .../longhorn.io/v1beta2/backupvolume.go | 208 ++++++++++++++++++ .../controllers/longhorn.io/v1beta2/engine.go | 208 ++++++++++++++++++ .../longhorn.io/v1beta2/interface.go | 104 +++++++++ .../controllers/longhorn.io/v1beta2/node.go | 208 ++++++++++++++++++ .../longhorn.io/v1beta2/replica.go | 208 ++++++++++++++++++ .../longhorn.io/v1beta2/setting.go | 208 ++++++++++++++++++ .../longhorn.io/v1beta2/snapshot.go | 208 ++++++++++++++++++ .../controllers/longhorn.io/v1beta2/volume.go | 208 ++++++++++++++++++ vendor/modules.txt | 2 + 20 files changed, 2939 insertions(+), 5 deletions(-) create mode 100644 vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/factory.go create mode 100644 vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/interface.go create mode 100644 vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backingimage.go create mode 100644 vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backingimagedatasource.go create mode 100644 vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backup.go create mode 100644 vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backupbackingimage.go create mode 100644 vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backuptarget.go create mode 100644 vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backupvolume.go create mode 100644 vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/engine.go create mode 100644 vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/interface.go create mode 100644 vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/node.go create mode 100644 vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/replica.go create mode 100644 vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/setting.go create mode 100644 vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/snapshot.go create mode 100644 vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/volume.go diff --git a/cmd/node-disk-manager-webhook/main.go b/cmd/node-disk-manager-webhook/main.go index 30e764701..bea1baea9 100644 --- a/cmd/node-disk-manager-webhook/main.go +++ b/cmd/node-disk-manager-webhook/main.go @@ -5,6 +5,8 @@ import ( "fmt" "os" + ctrllh "github.com/harvester/harvester/pkg/generated/controllers/longhorn.io" + lhv1beta2 "github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2" "github.com/harvester/webhook/pkg/config" "github.com/harvester/webhook/pkg/server" "github.com/harvester/webhook/pkg/server/admission" @@ -32,6 +34,10 @@ type resourceCaches struct { lvmVGCache ctldiskv1.LVMVolumeGroupCache storageClassCache ctlstoragev1.StorageClassCache pvCache ctlcorev1.PersistentVolumeCache + volumeCache lhv1beta2.VolumeCache + backingImageCache lhv1beta2.BackingImageCache + lhNodeCache lhv1beta2.NodeCache + replicaCache lhv1beta2.ReplicaCache } func main() { @@ -117,7 +123,8 @@ func runWebhookServer(ctx context.Context, cfg *rest.Config, options *config.Opt bdMutator, } - bdValidator := blockdevice.NewBlockdeviceValidator(resourceCaches.bdCache, resourceCaches.storageClassCache, resourceCaches.pvCache) + bdValidator := blockdevice.NewBlockdeviceValidator(resourceCaches.bdCache, resourceCaches.storageClassCache, resourceCaches.pvCache, + resourceCaches.volumeCache, resourceCaches.backingImageCache, resourceCaches.lhNodeCache, resourceCaches.replicaCache) scValidator := storageclass.NewStorageClassValidator(resourceCaches.lvmVGCache) var validators = []admission.Validator{ bdValidator, @@ -156,12 +163,21 @@ func newCaches(ctx context.Context, cfg *rest.Config, threadiness int) (*resourc if err != nil { return nil, err } - starters = append(starters, disks, storageFactory, coreFactory) + lhFactory, err := ctrllh.NewFactoryFromConfig(cfg) + if err != nil { + return nil, err + } + + starters = append(starters, disks, storageFactory, coreFactory, lhFactory) resourceCaches := &resourceCaches{ bdCache: disks.Harvesterhci().V1beta1().BlockDevice().Cache(), lvmVGCache: disks.Harvesterhci().V1beta1().LVMVolumeGroup().Cache(), storageClassCache: storageFactory.Storage().V1().StorageClass().Cache(), pvCache: coreFactory.Core().V1().PersistentVolume().Cache(), + volumeCache: lhFactory.Longhorn().V1beta2().Volume().Cache(), + backingImageCache: lhFactory.Longhorn().V1beta2().BackingImage().Cache(), + lhNodeCache: lhFactory.Longhorn().V1beta2().Node().Cache(), + replicaCache: lhFactory.Longhorn().V1beta2().Replica().Cache(), } if err := start.All(ctx, threadiness, starters...); err != nil { diff --git a/deploy/charts/harvester-node-disk-manager/templates/rbac.yaml b/deploy/charts/harvester-node-disk-manager/templates/rbac.yaml index 593ee1466..69d067c9d 100644 --- a/deploy/charts/harvester-node-disk-manager/templates/rbac.yaml +++ b/deploy/charts/harvester-node-disk-manager/templates/rbac.yaml @@ -57,7 +57,7 @@ rules: resources: [ "storageclasses" ] verbs: [ "*" ] - apiGroups: [ "harvesterhci.io" ] - resources: [ "blockdevices", "lvmvolumegroups", "lvmvolumegroups/status" ] + resources: [ "blockdevices", "lvmvolumegroups", "lvmvolumegroups/status"] verbs: [ "*" ] - apiGroups: [ "apiregistration.k8s.io" ] resources: [ "apiservices" ] @@ -68,6 +68,9 @@ rules: - apiGroups: [ "admissionregistration.k8s.io" ] resources: [ "validatingwebhookconfigurations", "mutatingwebhookconfigurations" ] verbs: [ "*" ] + - apiGroups: ["longhorn.io"] + resources: ["volumes", "nodes", "backingimages", "replicas"] + verbs: [ "get", "watch", "list" ] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index 3a749962b..7cace3bbd 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -25,6 +25,8 @@ const ( LVMCSIDriver = "lvm.driver.harvesterhci.io" // LVMTopologyNodeKey is the key of LVM topology node LVMTopologyNodeKey = "topology.lvm.csi/node" + // LonghornSystemNamespaceName is the namespace containing longhorn components + LonghornSystemNamespaceName = "longhorn-system" ) var CmdTimeoutError error diff --git a/pkg/webhook/blockdevice/validator.go b/pkg/webhook/blockdevice/validator.go index c7e2afccf..2ba541268 100644 --- a/pkg/webhook/blockdevice/validator.go +++ b/pkg/webhook/blockdevice/validator.go @@ -2,9 +2,12 @@ package blockdevice import ( "fmt" + "strings" + lhv1beta2 "github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2" werror "github.com/harvester/webhook/pkg/error" "github.com/harvester/webhook/pkg/server/admission" + lhv1 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" ctlcorev1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" ctlstoragev1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/v1" "github.com/sirupsen/logrus" @@ -18,19 +21,40 @@ import ( "github.com/harvester/node-disk-manager/pkg/utils" ) +const ( + BackingImageByDiskUUID = "longhorn.io/backingimage-by-diskuuid" + NodeByBlockDeviceName = "longhorn.io/node-by-blockdevice-name" + ReplicaByDiskUUID = "longhorn.io/replica-by-disk-uuid" + ReplicaByVolume = "longhorn.io/replica-by-volume" +) + type Validator struct { admission.DefaultValidator BlockdeviceCache ctldiskv1.BlockDeviceCache storageClassCache ctlstoragev1.StorageClassCache pvCache ctlcorev1.PersistentVolumeCache + volumeCache lhv1beta2.VolumeCache + backingImageCache lhv1beta2.BackingImageCache + lhNodeCache lhv1beta2.NodeCache + replicaCache lhv1beta2.ReplicaCache } -func NewBlockdeviceValidator(blockdeviceCache ctldiskv1.BlockDeviceCache, storageClassCache ctlstoragev1.StorageClassCache, pvCache ctlcorev1.PersistentVolumeCache) *Validator { +func NewBlockdeviceValidator(blockdeviceCache ctldiskv1.BlockDeviceCache, storageClassCache ctlstoragev1.StorageClassCache, + pvCache ctlcorev1.PersistentVolumeCache, volumeCache lhv1beta2.VolumeCache, backingImageCache lhv1beta2.BackingImageCache, + lhNodeCache lhv1beta2.NodeCache, replicaCache lhv1beta2.ReplicaCache) *Validator { + backingImageCache.AddIndexer(BackingImageByDiskUUID, backingImageByDiskUUIDIndexer) + lhNodeCache.AddIndexer(NodeByBlockDeviceName, nodeByBlockDeviceNameIndexer) + replicaCache.AddIndexer(ReplicaByDiskUUID, replicaByDiskUUIDIndexer) + replicaCache.AddIndexer(ReplicaByVolume, replicaByVolumeIndexer) return &Validator{ BlockdeviceCache: blockdeviceCache, storageClassCache: storageClassCache, pvCache: pvCache, + volumeCache: volumeCache, + backingImageCache: backingImageCache, + lhNodeCache: lhNodeCache, + replicaCache: replicaCache, } } @@ -45,10 +69,14 @@ func (v *Validator) Create(_ *admission.Request, newObj runtime.Object) error { func (v *Validator) Update(_ *admission.Request, oldObj, newObj runtime.Object) error { newBd := newObj.(*diskv1.BlockDevice) oldBd := oldObj.(*diskv1.BlockDevice) + if err := v.validateProvisioner(newBd); err != nil { return err } - return v.validateLVMProvisioner(oldBd, newBd) + if err := v.validateLVMProvisioner(oldBd, newBd); err != nil { + return err + } + return v.validateLHDisk(oldBd, newBd) } func (v *Validator) validateProvisioner(bd *diskv1.BlockDevice) error { @@ -62,6 +90,22 @@ func (v *Validator) validateProvisioner(bd *diskv1.BlockDevice) error { return nil } +func (v *Validator) validateLHDisk(oldBd, newBd *diskv1.BlockDevice) error { + if oldBd.Spec.Provisioner != nil && newBd.Spec.Provisioner != nil && + oldBd.Spec.Provisioner.Longhorn != nil && newBd.Spec.Provisioner.Longhorn != nil && + oldBd.Spec.Provision && !newBd.Spec.Provision { + err := v.validateVolumes(oldBd) + if err != nil { + return err + } + err = v.validateBackingImages(oldBd) + if err != nil { + return err + } + } + return nil +} + // validateLVMProvisioner will check the block device with LVM provisioner and block // if there is already have any pvc created with in the target volume group func (v *Validator) validateLVMProvisioner(oldbd, newbd *diskv1.BlockDevice) error { @@ -127,6 +171,122 @@ func (v *Validator) validateVGIsAlreadyUsed(bd *diskv1.BlockDevice) error { return nil } +func (v *Validator) validateVolumes(old *diskv1.BlockDevice) error { + lhNode, err := v.lhNodeCache.Get(utils.LonghornSystemNamespaceName, old.Spec.NodeName) + if err != nil { + errStr := fmt.Sprintf("Failed to get longhorn node %s: %s", old.Spec.NodeName, err.Error()) + return werror.NewBadRequest(errStr) + } + + diskStatus, ok := lhNode.Status.DiskStatus[old.Name] + if !ok || diskStatus == nil { + return nil + } + + targetDiskUUID := diskStatus.DiskUUID + replicaObjs, err := v.replicaCache.GetByIndex(ReplicaByDiskUUID, targetDiskUUID) + if err != nil { + errStr := fmt.Sprintf("Failed to get replicas by disk UUID %s: %s", targetDiskUUID, err.Error()) + return werror.NewBadRequest(errStr) + } + if len(replicaObjs) == 0 { + return nil + } + + volumesToCheck := make(map[string]struct{}) + for _, replicaObj := range replicaObjs { + volumesToCheck[replicaObj.Spec.VolumeName] = struct{}{} + } + + var unsafeVolumes []string + for volName := range volumesToCheck { + replicaObjsForVolume, err := v.replicaCache.GetByIndex(ReplicaByVolume, volName) + if err != nil { + errStr := fmt.Sprintf("Failed to get replicas for volume %s from index: %s", volName, err.Error()) + return werror.NewBadRequest(errStr) + } + + var healthyReplicaCount int + var replicaOnTargetDiskIsHealthy bool + for _, rep := range replicaObjsForVolume { + if rep.Spec.FailedAt == "" { + healthyReplicaCount++ + if rep.Spec.DiskID == targetDiskUUID { + replicaOnTargetDiskIsHealthy = true + } + } + } + + if healthyReplicaCount == 1 && replicaOnTargetDiskIsHealthy { + unsafeVolumes = append(unsafeVolumes, volName) + } + } + + if len(unsafeVolumes) > 0 { + errStr := fmt.Sprintf("Cannot remove disk %s because it hosts the only healthy replica for the following volumes: %s", + old.Spec.DevPath, strings.Join(unsafeVolumes, ", ")) + return werror.NewBadRequest(errStr) + } + + return nil +} + +func (v *Validator) validateBackingImages(old *diskv1.BlockDevice) error { + lhNodes, err := v.lhNodeCache.GetByIndex(NodeByBlockDeviceName, old.Name) + if err != nil { + errStr := fmt.Sprintf("Error looking up node by blockdevice name %s: %s", old.Name, err.Error()) + return werror.NewBadRequest(errStr) + } + if len(lhNodes) != 1 || lhNodes[0] == nil { + return nil + } + + lhNode := lhNodes[0] + diskStatus, ok := lhNode.Status.DiskStatus[old.Name] + if !ok || diskStatus.DiskUUID == "" { + return nil + } + + uuid := diskStatus.DiskUUID + backingImages, err := v.backingImageCache.GetByIndex(BackingImageByDiskUUID, uuid) + if err != nil { + errStr := fmt.Sprintf("Error looking up backing images by disk UUID %s: %s", uuid, err.Error()) + return werror.NewBadRequest(errStr) + } + if len(backingImages) == 0 { + return nil + } + + var unsafeToRemoveBackingImages []string + for _, backingImage := range backingImages { + if backingImage == nil { + continue + } + readyCount := 0 + var readyDiskUUID string + for diskUUID, fileStatus := range backingImage.Status.DiskFileStatusMap { + if fileStatus == nil { + continue + } + if fileStatus.State == lhv1.BackingImageStateReady { + readyCount++ + readyDiskUUID = diskUUID + } + } + if readyCount == 1 && readyDiskUUID == uuid { + unsafeToRemoveBackingImages = append(unsafeToRemoveBackingImages, backingImage.Name) + } + } + + if len(unsafeToRemoveBackingImages) > 0 { + errStr := fmt.Sprintf("Cannot remove disk %s as it contains the only ready copy for the following backing images: %s", + old.Name, strings.Join(unsafeToRemoveBackingImages, ", ")) + return werror.NewBadRequest(errStr) + } + + return nil +} + func (v *Validator) Resource() admission.Resource { return admission.Resource{ Names: []string{"blockdevices"}, @@ -151,3 +311,39 @@ func getLVMTopologyNodes(sc *storagev1.StorageClass) string { } return "" } + +func backingImageByDiskUUIDIndexer(bi *lhv1.BackingImage) ([]string, error) { + if bi.Spec.DiskFileSpecMap == nil { + return []string{}, nil + } + diskUUIDs := make([]string, 0, len(bi.Status.DiskFileStatusMap)) + for key := range bi.Status.DiskFileStatusMap { + diskUUIDs = append(diskUUIDs, key) + } + return diskUUIDs, nil +} + +func nodeByBlockDeviceNameIndexer(node *lhv1.Node) ([]string, error) { + if node.Status.DiskStatus == nil { + return []string{}, nil + } + blockDeviceNames := make([]string, 0, len(node.Status.DiskStatus)) + for key := range node.Status.DiskStatus { + blockDeviceNames = append(blockDeviceNames, key) + } + return blockDeviceNames, nil +} + +func replicaByDiskUUIDIndexer(replica *lhv1.Replica) ([]string, error) { + if replica.Spec.DiskID == "" { + return []string{}, nil + } + return []string{replica.Spec.DiskID}, nil +} + +func replicaByVolumeIndexer(replica *lhv1.Replica) ([]string, error) { + if replica.Spec.VolumeName == "" { + return []string{}, nil + } + return []string{replica.Spec.VolumeName}, nil +} diff --git a/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/factory.go b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/factory.go new file mode 100644 index 000000000..8304f4252 --- /dev/null +++ b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/factory.go @@ -0,0 +1,72 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package longhorn + +import ( + "github.com/rancher/lasso/pkg/controller" + "github.com/rancher/wrangler/v3/pkg/generic" + "k8s.io/client-go/rest" +) + +type Factory struct { + *generic.Factory +} + +func NewFactoryFromConfigOrDie(config *rest.Config) *Factory { + f, err := NewFactoryFromConfig(config) + if err != nil { + panic(err) + } + return f +} + +func NewFactoryFromConfig(config *rest.Config) (*Factory, error) { + return NewFactoryFromConfigWithOptions(config, nil) +} + +func NewFactoryFromConfigWithNamespace(config *rest.Config, namespace string) (*Factory, error) { + return NewFactoryFromConfigWithOptions(config, &FactoryOptions{ + Namespace: namespace, + }) +} + +type FactoryOptions = generic.FactoryOptions + +func NewFactoryFromConfigWithOptions(config *rest.Config, opts *FactoryOptions) (*Factory, error) { + f, err := generic.NewFactoryFromConfigWithOptions(config, opts) + return &Factory{ + Factory: f, + }, err +} + +func NewFactoryFromConfigWithOptionsOrDie(config *rest.Config, opts *FactoryOptions) *Factory { + f, err := NewFactoryFromConfigWithOptions(config, opts) + if err != nil { + panic(err) + } + return f +} + +func (c *Factory) Longhorn() Interface { + return New(c.ControllerFactory()) +} + +func (c *Factory) WithAgent(userAgent string) Interface { + return New(controller.NewSharedControllerFactoryWithAgent(userAgent, c.ControllerFactory())) +} diff --git a/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/interface.go b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/interface.go new file mode 100644 index 000000000..fac85872d --- /dev/null +++ b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/interface.go @@ -0,0 +1,43 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package longhorn + +import ( + v1beta2 "github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2" + "github.com/rancher/lasso/pkg/controller" +) + +type Interface interface { + V1beta2() v1beta2.Interface +} + +type group struct { + controllerFactory controller.SharedControllerFactory +} + +// New returns a new Interface. +func New(controllerFactory controller.SharedControllerFactory) Interface { + return &group{ + controllerFactory: controllerFactory, + } +} + +func (g *group) V1beta2() v1beta2.Interface { + return v1beta2.New(g.controllerFactory) +} diff --git a/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backingimage.go b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backingimage.go new file mode 100644 index 000000000..3be057e2e --- /dev/null +++ b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backingimage.go @@ -0,0 +1,208 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + "sync" + "time" + + v1beta2 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// BackingImageController interface for managing BackingImage resources. +type BackingImageController interface { + generic.ControllerInterface[*v1beta2.BackingImage, *v1beta2.BackingImageList] +} + +// BackingImageClient interface for managing BackingImage resources in Kubernetes. +type BackingImageClient interface { + generic.ClientInterface[*v1beta2.BackingImage, *v1beta2.BackingImageList] +} + +// BackingImageCache interface for retrieving BackingImage resources in memory. +type BackingImageCache interface { + generic.CacheInterface[*v1beta2.BackingImage] +} + +// BackingImageStatusHandler is executed for every added or modified BackingImage. Should return the new status to be updated +type BackingImageStatusHandler func(obj *v1beta2.BackingImage, status v1beta2.BackingImageStatus) (v1beta2.BackingImageStatus, error) + +// BackingImageGeneratingHandler is the top-level handler that is executed for every BackingImage event. It extends BackingImageStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type BackingImageGeneratingHandler func(obj *v1beta2.BackingImage, status v1beta2.BackingImageStatus) ([]runtime.Object, v1beta2.BackingImageStatus, error) + +// RegisterBackingImageStatusHandler configures a BackingImageController to execute a BackingImageStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterBackingImageStatusHandler(ctx context.Context, controller BackingImageController, condition condition.Cond, name string, handler BackingImageStatusHandler) { + statusHandler := &backingImageStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterBackingImageGeneratingHandler configures a BackingImageController to execute a BackingImageGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterBackingImageGeneratingHandler(ctx context.Context, controller BackingImageController, apply apply.Apply, + condition condition.Cond, name string, handler BackingImageGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &backingImageGeneratingHandler{ + BackingImageGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterBackingImageStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type backingImageStatusHandler struct { + client BackingImageClient + condition condition.Cond + handler BackingImageStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *backingImageStatusHandler) sync(key string, obj *v1beta2.BackingImage) (*v1beta2.BackingImage, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type backingImageGeneratingHandler struct { + BackingImageGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *backingImageGeneratingHandler) Remove(key string, obj *v1beta2.BackingImage) (*v1beta2.BackingImage, error) { + if obj != nil { + return obj, nil + } + + obj = &v1beta2.BackingImage{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured BackingImageGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *backingImageGeneratingHandler) Handle(obj *v1beta2.BackingImage, status v1beta2.BackingImageStatus) (v1beta2.BackingImageStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.BackingImageGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *backingImageGeneratingHandler) isNewResourceVersion(obj *v1beta2.BackingImage) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *backingImageGeneratingHandler) storeResourceVersion(obj *v1beta2.BackingImage) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backingimagedatasource.go b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backingimagedatasource.go new file mode 100644 index 000000000..cc192114d --- /dev/null +++ b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backingimagedatasource.go @@ -0,0 +1,208 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + "sync" + "time" + + v1beta2 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// BackingImageDataSourceController interface for managing BackingImageDataSource resources. +type BackingImageDataSourceController interface { + generic.ControllerInterface[*v1beta2.BackingImageDataSource, *v1beta2.BackingImageDataSourceList] +} + +// BackingImageDataSourceClient interface for managing BackingImageDataSource resources in Kubernetes. +type BackingImageDataSourceClient interface { + generic.ClientInterface[*v1beta2.BackingImageDataSource, *v1beta2.BackingImageDataSourceList] +} + +// BackingImageDataSourceCache interface for retrieving BackingImageDataSource resources in memory. +type BackingImageDataSourceCache interface { + generic.CacheInterface[*v1beta2.BackingImageDataSource] +} + +// BackingImageDataSourceStatusHandler is executed for every added or modified BackingImageDataSource. Should return the new status to be updated +type BackingImageDataSourceStatusHandler func(obj *v1beta2.BackingImageDataSource, status v1beta2.BackingImageDataSourceStatus) (v1beta2.BackingImageDataSourceStatus, error) + +// BackingImageDataSourceGeneratingHandler is the top-level handler that is executed for every BackingImageDataSource event. It extends BackingImageDataSourceStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type BackingImageDataSourceGeneratingHandler func(obj *v1beta2.BackingImageDataSource, status v1beta2.BackingImageDataSourceStatus) ([]runtime.Object, v1beta2.BackingImageDataSourceStatus, error) + +// RegisterBackingImageDataSourceStatusHandler configures a BackingImageDataSourceController to execute a BackingImageDataSourceStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterBackingImageDataSourceStatusHandler(ctx context.Context, controller BackingImageDataSourceController, condition condition.Cond, name string, handler BackingImageDataSourceStatusHandler) { + statusHandler := &backingImageDataSourceStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterBackingImageDataSourceGeneratingHandler configures a BackingImageDataSourceController to execute a BackingImageDataSourceGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterBackingImageDataSourceGeneratingHandler(ctx context.Context, controller BackingImageDataSourceController, apply apply.Apply, + condition condition.Cond, name string, handler BackingImageDataSourceGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &backingImageDataSourceGeneratingHandler{ + BackingImageDataSourceGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterBackingImageDataSourceStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type backingImageDataSourceStatusHandler struct { + client BackingImageDataSourceClient + condition condition.Cond + handler BackingImageDataSourceStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *backingImageDataSourceStatusHandler) sync(key string, obj *v1beta2.BackingImageDataSource) (*v1beta2.BackingImageDataSource, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type backingImageDataSourceGeneratingHandler struct { + BackingImageDataSourceGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *backingImageDataSourceGeneratingHandler) Remove(key string, obj *v1beta2.BackingImageDataSource) (*v1beta2.BackingImageDataSource, error) { + if obj != nil { + return obj, nil + } + + obj = &v1beta2.BackingImageDataSource{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured BackingImageDataSourceGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *backingImageDataSourceGeneratingHandler) Handle(obj *v1beta2.BackingImageDataSource, status v1beta2.BackingImageDataSourceStatus) (v1beta2.BackingImageDataSourceStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.BackingImageDataSourceGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *backingImageDataSourceGeneratingHandler) isNewResourceVersion(obj *v1beta2.BackingImageDataSource) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *backingImageDataSourceGeneratingHandler) storeResourceVersion(obj *v1beta2.BackingImageDataSource) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backup.go b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backup.go new file mode 100644 index 000000000..7e20ee82d --- /dev/null +++ b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backup.go @@ -0,0 +1,208 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + "sync" + "time" + + v1beta2 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// BackupController interface for managing Backup resources. +type BackupController interface { + generic.ControllerInterface[*v1beta2.Backup, *v1beta2.BackupList] +} + +// BackupClient interface for managing Backup resources in Kubernetes. +type BackupClient interface { + generic.ClientInterface[*v1beta2.Backup, *v1beta2.BackupList] +} + +// BackupCache interface for retrieving Backup resources in memory. +type BackupCache interface { + generic.CacheInterface[*v1beta2.Backup] +} + +// BackupStatusHandler is executed for every added or modified Backup. Should return the new status to be updated +type BackupStatusHandler func(obj *v1beta2.Backup, status v1beta2.BackupStatus) (v1beta2.BackupStatus, error) + +// BackupGeneratingHandler is the top-level handler that is executed for every Backup event. It extends BackupStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type BackupGeneratingHandler func(obj *v1beta2.Backup, status v1beta2.BackupStatus) ([]runtime.Object, v1beta2.BackupStatus, error) + +// RegisterBackupStatusHandler configures a BackupController to execute a BackupStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterBackupStatusHandler(ctx context.Context, controller BackupController, condition condition.Cond, name string, handler BackupStatusHandler) { + statusHandler := &backupStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterBackupGeneratingHandler configures a BackupController to execute a BackupGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterBackupGeneratingHandler(ctx context.Context, controller BackupController, apply apply.Apply, + condition condition.Cond, name string, handler BackupGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &backupGeneratingHandler{ + BackupGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterBackupStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type backupStatusHandler struct { + client BackupClient + condition condition.Cond + handler BackupStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *backupStatusHandler) sync(key string, obj *v1beta2.Backup) (*v1beta2.Backup, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type backupGeneratingHandler struct { + BackupGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *backupGeneratingHandler) Remove(key string, obj *v1beta2.Backup) (*v1beta2.Backup, error) { + if obj != nil { + return obj, nil + } + + obj = &v1beta2.Backup{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured BackupGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *backupGeneratingHandler) Handle(obj *v1beta2.Backup, status v1beta2.BackupStatus) (v1beta2.BackupStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.BackupGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *backupGeneratingHandler) isNewResourceVersion(obj *v1beta2.Backup) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *backupGeneratingHandler) storeResourceVersion(obj *v1beta2.Backup) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backupbackingimage.go b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backupbackingimage.go new file mode 100644 index 000000000..7b8d1f84b --- /dev/null +++ b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backupbackingimage.go @@ -0,0 +1,208 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + "sync" + "time" + + v1beta2 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// BackupBackingImageController interface for managing BackupBackingImage resources. +type BackupBackingImageController interface { + generic.ControllerInterface[*v1beta2.BackupBackingImage, *v1beta2.BackupBackingImageList] +} + +// BackupBackingImageClient interface for managing BackupBackingImage resources in Kubernetes. +type BackupBackingImageClient interface { + generic.ClientInterface[*v1beta2.BackupBackingImage, *v1beta2.BackupBackingImageList] +} + +// BackupBackingImageCache interface for retrieving BackupBackingImage resources in memory. +type BackupBackingImageCache interface { + generic.CacheInterface[*v1beta2.BackupBackingImage] +} + +// BackupBackingImageStatusHandler is executed for every added or modified BackupBackingImage. Should return the new status to be updated +type BackupBackingImageStatusHandler func(obj *v1beta2.BackupBackingImage, status v1beta2.BackupBackingImageStatus) (v1beta2.BackupBackingImageStatus, error) + +// BackupBackingImageGeneratingHandler is the top-level handler that is executed for every BackupBackingImage event. It extends BackupBackingImageStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type BackupBackingImageGeneratingHandler func(obj *v1beta2.BackupBackingImage, status v1beta2.BackupBackingImageStatus) ([]runtime.Object, v1beta2.BackupBackingImageStatus, error) + +// RegisterBackupBackingImageStatusHandler configures a BackupBackingImageController to execute a BackupBackingImageStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterBackupBackingImageStatusHandler(ctx context.Context, controller BackupBackingImageController, condition condition.Cond, name string, handler BackupBackingImageStatusHandler) { + statusHandler := &backupBackingImageStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterBackupBackingImageGeneratingHandler configures a BackupBackingImageController to execute a BackupBackingImageGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterBackupBackingImageGeneratingHandler(ctx context.Context, controller BackupBackingImageController, apply apply.Apply, + condition condition.Cond, name string, handler BackupBackingImageGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &backupBackingImageGeneratingHandler{ + BackupBackingImageGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterBackupBackingImageStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type backupBackingImageStatusHandler struct { + client BackupBackingImageClient + condition condition.Cond + handler BackupBackingImageStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *backupBackingImageStatusHandler) sync(key string, obj *v1beta2.BackupBackingImage) (*v1beta2.BackupBackingImage, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type backupBackingImageGeneratingHandler struct { + BackupBackingImageGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *backupBackingImageGeneratingHandler) Remove(key string, obj *v1beta2.BackupBackingImage) (*v1beta2.BackupBackingImage, error) { + if obj != nil { + return obj, nil + } + + obj = &v1beta2.BackupBackingImage{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured BackupBackingImageGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *backupBackingImageGeneratingHandler) Handle(obj *v1beta2.BackupBackingImage, status v1beta2.BackupBackingImageStatus) (v1beta2.BackupBackingImageStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.BackupBackingImageGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *backupBackingImageGeneratingHandler) isNewResourceVersion(obj *v1beta2.BackupBackingImage) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *backupBackingImageGeneratingHandler) storeResourceVersion(obj *v1beta2.BackupBackingImage) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backuptarget.go b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backuptarget.go new file mode 100644 index 000000000..7837c390e --- /dev/null +++ b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backuptarget.go @@ -0,0 +1,208 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + "sync" + "time" + + v1beta2 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// BackupTargetController interface for managing BackupTarget resources. +type BackupTargetController interface { + generic.ControllerInterface[*v1beta2.BackupTarget, *v1beta2.BackupTargetList] +} + +// BackupTargetClient interface for managing BackupTarget resources in Kubernetes. +type BackupTargetClient interface { + generic.ClientInterface[*v1beta2.BackupTarget, *v1beta2.BackupTargetList] +} + +// BackupTargetCache interface for retrieving BackupTarget resources in memory. +type BackupTargetCache interface { + generic.CacheInterface[*v1beta2.BackupTarget] +} + +// BackupTargetStatusHandler is executed for every added or modified BackupTarget. Should return the new status to be updated +type BackupTargetStatusHandler func(obj *v1beta2.BackupTarget, status v1beta2.BackupTargetStatus) (v1beta2.BackupTargetStatus, error) + +// BackupTargetGeneratingHandler is the top-level handler that is executed for every BackupTarget event. It extends BackupTargetStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type BackupTargetGeneratingHandler func(obj *v1beta2.BackupTarget, status v1beta2.BackupTargetStatus) ([]runtime.Object, v1beta2.BackupTargetStatus, error) + +// RegisterBackupTargetStatusHandler configures a BackupTargetController to execute a BackupTargetStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterBackupTargetStatusHandler(ctx context.Context, controller BackupTargetController, condition condition.Cond, name string, handler BackupTargetStatusHandler) { + statusHandler := &backupTargetStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterBackupTargetGeneratingHandler configures a BackupTargetController to execute a BackupTargetGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterBackupTargetGeneratingHandler(ctx context.Context, controller BackupTargetController, apply apply.Apply, + condition condition.Cond, name string, handler BackupTargetGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &backupTargetGeneratingHandler{ + BackupTargetGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterBackupTargetStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type backupTargetStatusHandler struct { + client BackupTargetClient + condition condition.Cond + handler BackupTargetStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *backupTargetStatusHandler) sync(key string, obj *v1beta2.BackupTarget) (*v1beta2.BackupTarget, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type backupTargetGeneratingHandler struct { + BackupTargetGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *backupTargetGeneratingHandler) Remove(key string, obj *v1beta2.BackupTarget) (*v1beta2.BackupTarget, error) { + if obj != nil { + return obj, nil + } + + obj = &v1beta2.BackupTarget{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured BackupTargetGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *backupTargetGeneratingHandler) Handle(obj *v1beta2.BackupTarget, status v1beta2.BackupTargetStatus) (v1beta2.BackupTargetStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.BackupTargetGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *backupTargetGeneratingHandler) isNewResourceVersion(obj *v1beta2.BackupTarget) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *backupTargetGeneratingHandler) storeResourceVersion(obj *v1beta2.BackupTarget) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backupvolume.go b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backupvolume.go new file mode 100644 index 000000000..f809b8392 --- /dev/null +++ b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/backupvolume.go @@ -0,0 +1,208 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + "sync" + "time" + + v1beta2 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// BackupVolumeController interface for managing BackupVolume resources. +type BackupVolumeController interface { + generic.ControllerInterface[*v1beta2.BackupVolume, *v1beta2.BackupVolumeList] +} + +// BackupVolumeClient interface for managing BackupVolume resources in Kubernetes. +type BackupVolumeClient interface { + generic.ClientInterface[*v1beta2.BackupVolume, *v1beta2.BackupVolumeList] +} + +// BackupVolumeCache interface for retrieving BackupVolume resources in memory. +type BackupVolumeCache interface { + generic.CacheInterface[*v1beta2.BackupVolume] +} + +// BackupVolumeStatusHandler is executed for every added or modified BackupVolume. Should return the new status to be updated +type BackupVolumeStatusHandler func(obj *v1beta2.BackupVolume, status v1beta2.BackupVolumeStatus) (v1beta2.BackupVolumeStatus, error) + +// BackupVolumeGeneratingHandler is the top-level handler that is executed for every BackupVolume event. It extends BackupVolumeStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type BackupVolumeGeneratingHandler func(obj *v1beta2.BackupVolume, status v1beta2.BackupVolumeStatus) ([]runtime.Object, v1beta2.BackupVolumeStatus, error) + +// RegisterBackupVolumeStatusHandler configures a BackupVolumeController to execute a BackupVolumeStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterBackupVolumeStatusHandler(ctx context.Context, controller BackupVolumeController, condition condition.Cond, name string, handler BackupVolumeStatusHandler) { + statusHandler := &backupVolumeStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterBackupVolumeGeneratingHandler configures a BackupVolumeController to execute a BackupVolumeGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterBackupVolumeGeneratingHandler(ctx context.Context, controller BackupVolumeController, apply apply.Apply, + condition condition.Cond, name string, handler BackupVolumeGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &backupVolumeGeneratingHandler{ + BackupVolumeGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterBackupVolumeStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type backupVolumeStatusHandler struct { + client BackupVolumeClient + condition condition.Cond + handler BackupVolumeStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *backupVolumeStatusHandler) sync(key string, obj *v1beta2.BackupVolume) (*v1beta2.BackupVolume, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type backupVolumeGeneratingHandler struct { + BackupVolumeGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *backupVolumeGeneratingHandler) Remove(key string, obj *v1beta2.BackupVolume) (*v1beta2.BackupVolume, error) { + if obj != nil { + return obj, nil + } + + obj = &v1beta2.BackupVolume{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured BackupVolumeGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *backupVolumeGeneratingHandler) Handle(obj *v1beta2.BackupVolume, status v1beta2.BackupVolumeStatus) (v1beta2.BackupVolumeStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.BackupVolumeGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *backupVolumeGeneratingHandler) isNewResourceVersion(obj *v1beta2.BackupVolume) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *backupVolumeGeneratingHandler) storeResourceVersion(obj *v1beta2.BackupVolume) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/engine.go b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/engine.go new file mode 100644 index 000000000..dd86ecb07 --- /dev/null +++ b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/engine.go @@ -0,0 +1,208 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + "sync" + "time" + + v1beta2 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// EngineController interface for managing Engine resources. +type EngineController interface { + generic.ControllerInterface[*v1beta2.Engine, *v1beta2.EngineList] +} + +// EngineClient interface for managing Engine resources in Kubernetes. +type EngineClient interface { + generic.ClientInterface[*v1beta2.Engine, *v1beta2.EngineList] +} + +// EngineCache interface for retrieving Engine resources in memory. +type EngineCache interface { + generic.CacheInterface[*v1beta2.Engine] +} + +// EngineStatusHandler is executed for every added or modified Engine. Should return the new status to be updated +type EngineStatusHandler func(obj *v1beta2.Engine, status v1beta2.EngineStatus) (v1beta2.EngineStatus, error) + +// EngineGeneratingHandler is the top-level handler that is executed for every Engine event. It extends EngineStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type EngineGeneratingHandler func(obj *v1beta2.Engine, status v1beta2.EngineStatus) ([]runtime.Object, v1beta2.EngineStatus, error) + +// RegisterEngineStatusHandler configures a EngineController to execute a EngineStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterEngineStatusHandler(ctx context.Context, controller EngineController, condition condition.Cond, name string, handler EngineStatusHandler) { + statusHandler := &engineStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterEngineGeneratingHandler configures a EngineController to execute a EngineGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterEngineGeneratingHandler(ctx context.Context, controller EngineController, apply apply.Apply, + condition condition.Cond, name string, handler EngineGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &engineGeneratingHandler{ + EngineGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterEngineStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type engineStatusHandler struct { + client EngineClient + condition condition.Cond + handler EngineStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *engineStatusHandler) sync(key string, obj *v1beta2.Engine) (*v1beta2.Engine, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type engineGeneratingHandler struct { + EngineGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *engineGeneratingHandler) Remove(key string, obj *v1beta2.Engine) (*v1beta2.Engine, error) { + if obj != nil { + return obj, nil + } + + obj = &v1beta2.Engine{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured EngineGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *engineGeneratingHandler) Handle(obj *v1beta2.Engine, status v1beta2.EngineStatus) (v1beta2.EngineStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.EngineGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *engineGeneratingHandler) isNewResourceVersion(obj *v1beta2.Engine) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *engineGeneratingHandler) storeResourceVersion(obj *v1beta2.Engine) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/interface.go b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/interface.go new file mode 100644 index 000000000..85935eabc --- /dev/null +++ b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/interface.go @@ -0,0 +1,104 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/rancher/lasso/pkg/controller" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/schemes" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +func init() { + schemes.Register(v1beta2.AddToScheme) +} + +type Interface interface { + BackingImage() BackingImageController + BackingImageDataSource() BackingImageDataSourceController + Backup() BackupController + BackupBackingImage() BackupBackingImageController + BackupTarget() BackupTargetController + BackupVolume() BackupVolumeController + Engine() EngineController + Node() NodeController + Replica() ReplicaController + Setting() SettingController + Snapshot() SnapshotController + Volume() VolumeController +} + +func New(controllerFactory controller.SharedControllerFactory) Interface { + return &version{ + controllerFactory: controllerFactory, + } +} + +type version struct { + controllerFactory controller.SharedControllerFactory +} + +func (v *version) BackingImage() BackingImageController { + return generic.NewController[*v1beta2.BackingImage, *v1beta2.BackingImageList](schema.GroupVersionKind{Group: "longhorn.io", Version: "v1beta2", Kind: "BackingImage"}, "backingimages", true, v.controllerFactory) +} + +func (v *version) BackingImageDataSource() BackingImageDataSourceController { + return generic.NewController[*v1beta2.BackingImageDataSource, *v1beta2.BackingImageDataSourceList](schema.GroupVersionKind{Group: "longhorn.io", Version: "v1beta2", Kind: "BackingImageDataSource"}, "backingimagedatasources", true, v.controllerFactory) +} + +func (v *version) Backup() BackupController { + return generic.NewController[*v1beta2.Backup, *v1beta2.BackupList](schema.GroupVersionKind{Group: "longhorn.io", Version: "v1beta2", Kind: "Backup"}, "backups", true, v.controllerFactory) +} + +func (v *version) BackupBackingImage() BackupBackingImageController { + return generic.NewController[*v1beta2.BackupBackingImage, *v1beta2.BackupBackingImageList](schema.GroupVersionKind{Group: "longhorn.io", Version: "v1beta2", Kind: "BackupBackingImage"}, "backupbackingimages", true, v.controllerFactory) +} + +func (v *version) BackupTarget() BackupTargetController { + return generic.NewController[*v1beta2.BackupTarget, *v1beta2.BackupTargetList](schema.GroupVersionKind{Group: "longhorn.io", Version: "v1beta2", Kind: "BackupTarget"}, "backuptargets", true, v.controllerFactory) +} + +func (v *version) BackupVolume() BackupVolumeController { + return generic.NewController[*v1beta2.BackupVolume, *v1beta2.BackupVolumeList](schema.GroupVersionKind{Group: "longhorn.io", Version: "v1beta2", Kind: "BackupVolume"}, "backupvolumes", true, v.controllerFactory) +} + +func (v *version) Engine() EngineController { + return generic.NewController[*v1beta2.Engine, *v1beta2.EngineList](schema.GroupVersionKind{Group: "longhorn.io", Version: "v1beta2", Kind: "Engine"}, "engines", true, v.controllerFactory) +} + +func (v *version) Node() NodeController { + return generic.NewController[*v1beta2.Node, *v1beta2.NodeList](schema.GroupVersionKind{Group: "longhorn.io", Version: "v1beta2", Kind: "Node"}, "nodes", true, v.controllerFactory) +} + +func (v *version) Replica() ReplicaController { + return generic.NewController[*v1beta2.Replica, *v1beta2.ReplicaList](schema.GroupVersionKind{Group: "longhorn.io", Version: "v1beta2", Kind: "Replica"}, "replicas", true, v.controllerFactory) +} + +func (v *version) Setting() SettingController { + return generic.NewController[*v1beta2.Setting, *v1beta2.SettingList](schema.GroupVersionKind{Group: "longhorn.io", Version: "v1beta2", Kind: "Setting"}, "settings", true, v.controllerFactory) +} + +func (v *version) Snapshot() SnapshotController { + return generic.NewController[*v1beta2.Snapshot, *v1beta2.SnapshotList](schema.GroupVersionKind{Group: "longhorn.io", Version: "v1beta2", Kind: "Snapshot"}, "snapshots", true, v.controllerFactory) +} + +func (v *version) Volume() VolumeController { + return generic.NewController[*v1beta2.Volume, *v1beta2.VolumeList](schema.GroupVersionKind{Group: "longhorn.io", Version: "v1beta2", Kind: "Volume"}, "volumes", true, v.controllerFactory) +} diff --git a/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/node.go b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/node.go new file mode 100644 index 000000000..8b6a07acd --- /dev/null +++ b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/node.go @@ -0,0 +1,208 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + "sync" + "time" + + v1beta2 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// NodeController interface for managing Node resources. +type NodeController interface { + generic.ControllerInterface[*v1beta2.Node, *v1beta2.NodeList] +} + +// NodeClient interface for managing Node resources in Kubernetes. +type NodeClient interface { + generic.ClientInterface[*v1beta2.Node, *v1beta2.NodeList] +} + +// NodeCache interface for retrieving Node resources in memory. +type NodeCache interface { + generic.CacheInterface[*v1beta2.Node] +} + +// NodeStatusHandler is executed for every added or modified Node. Should return the new status to be updated +type NodeStatusHandler func(obj *v1beta2.Node, status v1beta2.NodeStatus) (v1beta2.NodeStatus, error) + +// NodeGeneratingHandler is the top-level handler that is executed for every Node event. It extends NodeStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type NodeGeneratingHandler func(obj *v1beta2.Node, status v1beta2.NodeStatus) ([]runtime.Object, v1beta2.NodeStatus, error) + +// RegisterNodeStatusHandler configures a NodeController to execute a NodeStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterNodeStatusHandler(ctx context.Context, controller NodeController, condition condition.Cond, name string, handler NodeStatusHandler) { + statusHandler := &nodeStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterNodeGeneratingHandler configures a NodeController to execute a NodeGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterNodeGeneratingHandler(ctx context.Context, controller NodeController, apply apply.Apply, + condition condition.Cond, name string, handler NodeGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &nodeGeneratingHandler{ + NodeGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterNodeStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type nodeStatusHandler struct { + client NodeClient + condition condition.Cond + handler NodeStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *nodeStatusHandler) sync(key string, obj *v1beta2.Node) (*v1beta2.Node, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type nodeGeneratingHandler struct { + NodeGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *nodeGeneratingHandler) Remove(key string, obj *v1beta2.Node) (*v1beta2.Node, error) { + if obj != nil { + return obj, nil + } + + obj = &v1beta2.Node{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured NodeGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *nodeGeneratingHandler) Handle(obj *v1beta2.Node, status v1beta2.NodeStatus) (v1beta2.NodeStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.NodeGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *nodeGeneratingHandler) isNewResourceVersion(obj *v1beta2.Node) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *nodeGeneratingHandler) storeResourceVersion(obj *v1beta2.Node) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/replica.go b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/replica.go new file mode 100644 index 000000000..7772fa934 --- /dev/null +++ b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/replica.go @@ -0,0 +1,208 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + "sync" + "time" + + v1beta2 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// ReplicaController interface for managing Replica resources. +type ReplicaController interface { + generic.ControllerInterface[*v1beta2.Replica, *v1beta2.ReplicaList] +} + +// ReplicaClient interface for managing Replica resources in Kubernetes. +type ReplicaClient interface { + generic.ClientInterface[*v1beta2.Replica, *v1beta2.ReplicaList] +} + +// ReplicaCache interface for retrieving Replica resources in memory. +type ReplicaCache interface { + generic.CacheInterface[*v1beta2.Replica] +} + +// ReplicaStatusHandler is executed for every added or modified Replica. Should return the new status to be updated +type ReplicaStatusHandler func(obj *v1beta2.Replica, status v1beta2.ReplicaStatus) (v1beta2.ReplicaStatus, error) + +// ReplicaGeneratingHandler is the top-level handler that is executed for every Replica event. It extends ReplicaStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type ReplicaGeneratingHandler func(obj *v1beta2.Replica, status v1beta2.ReplicaStatus) ([]runtime.Object, v1beta2.ReplicaStatus, error) + +// RegisterReplicaStatusHandler configures a ReplicaController to execute a ReplicaStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterReplicaStatusHandler(ctx context.Context, controller ReplicaController, condition condition.Cond, name string, handler ReplicaStatusHandler) { + statusHandler := &replicaStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterReplicaGeneratingHandler configures a ReplicaController to execute a ReplicaGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterReplicaGeneratingHandler(ctx context.Context, controller ReplicaController, apply apply.Apply, + condition condition.Cond, name string, handler ReplicaGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &replicaGeneratingHandler{ + ReplicaGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterReplicaStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type replicaStatusHandler struct { + client ReplicaClient + condition condition.Cond + handler ReplicaStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *replicaStatusHandler) sync(key string, obj *v1beta2.Replica) (*v1beta2.Replica, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type replicaGeneratingHandler struct { + ReplicaGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *replicaGeneratingHandler) Remove(key string, obj *v1beta2.Replica) (*v1beta2.Replica, error) { + if obj != nil { + return obj, nil + } + + obj = &v1beta2.Replica{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured ReplicaGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *replicaGeneratingHandler) Handle(obj *v1beta2.Replica, status v1beta2.ReplicaStatus) (v1beta2.ReplicaStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.ReplicaGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *replicaGeneratingHandler) isNewResourceVersion(obj *v1beta2.Replica) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *replicaGeneratingHandler) storeResourceVersion(obj *v1beta2.Replica) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/setting.go b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/setting.go new file mode 100644 index 000000000..dca865f0f --- /dev/null +++ b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/setting.go @@ -0,0 +1,208 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + "sync" + "time" + + v1beta2 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SettingController interface for managing Setting resources. +type SettingController interface { + generic.ControllerInterface[*v1beta2.Setting, *v1beta2.SettingList] +} + +// SettingClient interface for managing Setting resources in Kubernetes. +type SettingClient interface { + generic.ClientInterface[*v1beta2.Setting, *v1beta2.SettingList] +} + +// SettingCache interface for retrieving Setting resources in memory. +type SettingCache interface { + generic.CacheInterface[*v1beta2.Setting] +} + +// SettingStatusHandler is executed for every added or modified Setting. Should return the new status to be updated +type SettingStatusHandler func(obj *v1beta2.Setting, status v1beta2.SettingStatus) (v1beta2.SettingStatus, error) + +// SettingGeneratingHandler is the top-level handler that is executed for every Setting event. It extends SettingStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type SettingGeneratingHandler func(obj *v1beta2.Setting, status v1beta2.SettingStatus) ([]runtime.Object, v1beta2.SettingStatus, error) + +// RegisterSettingStatusHandler configures a SettingController to execute a SettingStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterSettingStatusHandler(ctx context.Context, controller SettingController, condition condition.Cond, name string, handler SettingStatusHandler) { + statusHandler := &settingStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterSettingGeneratingHandler configures a SettingController to execute a SettingGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterSettingGeneratingHandler(ctx context.Context, controller SettingController, apply apply.Apply, + condition condition.Cond, name string, handler SettingGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &settingGeneratingHandler{ + SettingGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterSettingStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type settingStatusHandler struct { + client SettingClient + condition condition.Cond + handler SettingStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *settingStatusHandler) sync(key string, obj *v1beta2.Setting) (*v1beta2.Setting, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type settingGeneratingHandler struct { + SettingGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *settingGeneratingHandler) Remove(key string, obj *v1beta2.Setting) (*v1beta2.Setting, error) { + if obj != nil { + return obj, nil + } + + obj = &v1beta2.Setting{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured SettingGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *settingGeneratingHandler) Handle(obj *v1beta2.Setting, status v1beta2.SettingStatus) (v1beta2.SettingStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.SettingGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *settingGeneratingHandler) isNewResourceVersion(obj *v1beta2.Setting) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *settingGeneratingHandler) storeResourceVersion(obj *v1beta2.Setting) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/snapshot.go b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/snapshot.go new file mode 100644 index 000000000..37e6d3926 --- /dev/null +++ b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/snapshot.go @@ -0,0 +1,208 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + "sync" + "time" + + v1beta2 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SnapshotController interface for managing Snapshot resources. +type SnapshotController interface { + generic.ControllerInterface[*v1beta2.Snapshot, *v1beta2.SnapshotList] +} + +// SnapshotClient interface for managing Snapshot resources in Kubernetes. +type SnapshotClient interface { + generic.ClientInterface[*v1beta2.Snapshot, *v1beta2.SnapshotList] +} + +// SnapshotCache interface for retrieving Snapshot resources in memory. +type SnapshotCache interface { + generic.CacheInterface[*v1beta2.Snapshot] +} + +// SnapshotStatusHandler is executed for every added or modified Snapshot. Should return the new status to be updated +type SnapshotStatusHandler func(obj *v1beta2.Snapshot, status v1beta2.SnapshotStatus) (v1beta2.SnapshotStatus, error) + +// SnapshotGeneratingHandler is the top-level handler that is executed for every Snapshot event. It extends SnapshotStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type SnapshotGeneratingHandler func(obj *v1beta2.Snapshot, status v1beta2.SnapshotStatus) ([]runtime.Object, v1beta2.SnapshotStatus, error) + +// RegisterSnapshotStatusHandler configures a SnapshotController to execute a SnapshotStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterSnapshotStatusHandler(ctx context.Context, controller SnapshotController, condition condition.Cond, name string, handler SnapshotStatusHandler) { + statusHandler := &snapshotStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterSnapshotGeneratingHandler configures a SnapshotController to execute a SnapshotGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterSnapshotGeneratingHandler(ctx context.Context, controller SnapshotController, apply apply.Apply, + condition condition.Cond, name string, handler SnapshotGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &snapshotGeneratingHandler{ + SnapshotGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterSnapshotStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type snapshotStatusHandler struct { + client SnapshotClient + condition condition.Cond + handler SnapshotStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *snapshotStatusHandler) sync(key string, obj *v1beta2.Snapshot) (*v1beta2.Snapshot, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type snapshotGeneratingHandler struct { + SnapshotGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *snapshotGeneratingHandler) Remove(key string, obj *v1beta2.Snapshot) (*v1beta2.Snapshot, error) { + if obj != nil { + return obj, nil + } + + obj = &v1beta2.Snapshot{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured SnapshotGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *snapshotGeneratingHandler) Handle(obj *v1beta2.Snapshot, status v1beta2.SnapshotStatus) (v1beta2.SnapshotStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.SnapshotGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *snapshotGeneratingHandler) isNewResourceVersion(obj *v1beta2.Snapshot) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *snapshotGeneratingHandler) storeResourceVersion(obj *v1beta2.Snapshot) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/volume.go b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/volume.go new file mode 100644 index 000000000..d8f9ccf2d --- /dev/null +++ b/vendor/github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2/volume.go @@ -0,0 +1,208 @@ +/* +Copyright 2025 Rancher Labs, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by main. DO NOT EDIT. + +package v1beta2 + +import ( + "context" + "sync" + "time" + + v1beta2 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/rancher/wrangler/v3/pkg/apply" + "github.com/rancher/wrangler/v3/pkg/condition" + "github.com/rancher/wrangler/v3/pkg/generic" + "github.com/rancher/wrangler/v3/pkg/kv" + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// VolumeController interface for managing Volume resources. +type VolumeController interface { + generic.ControllerInterface[*v1beta2.Volume, *v1beta2.VolumeList] +} + +// VolumeClient interface for managing Volume resources in Kubernetes. +type VolumeClient interface { + generic.ClientInterface[*v1beta2.Volume, *v1beta2.VolumeList] +} + +// VolumeCache interface for retrieving Volume resources in memory. +type VolumeCache interface { + generic.CacheInterface[*v1beta2.Volume] +} + +// VolumeStatusHandler is executed for every added or modified Volume. Should return the new status to be updated +type VolumeStatusHandler func(obj *v1beta2.Volume, status v1beta2.VolumeStatus) (v1beta2.VolumeStatus, error) + +// VolumeGeneratingHandler is the top-level handler that is executed for every Volume event. It extends VolumeStatusHandler by a returning a slice of child objects to be passed to apply.Apply +type VolumeGeneratingHandler func(obj *v1beta2.Volume, status v1beta2.VolumeStatus) ([]runtime.Object, v1beta2.VolumeStatus, error) + +// RegisterVolumeStatusHandler configures a VolumeController to execute a VolumeStatusHandler for every events observed. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterVolumeStatusHandler(ctx context.Context, controller VolumeController, condition condition.Cond, name string, handler VolumeStatusHandler) { + statusHandler := &volumeStatusHandler{ + client: controller, + condition: condition, + handler: handler, + } + controller.AddGenericHandler(ctx, name, generic.FromObjectHandlerToHandler(statusHandler.sync)) +} + +// RegisterVolumeGeneratingHandler configures a VolumeController to execute a VolumeGeneratingHandler for every events observed, passing the returned objects to the provided apply.Apply. +// If a non-empty condition is provided, it will be updated in the status conditions for every handler execution +func RegisterVolumeGeneratingHandler(ctx context.Context, controller VolumeController, apply apply.Apply, + condition condition.Cond, name string, handler VolumeGeneratingHandler, opts *generic.GeneratingHandlerOptions) { + statusHandler := &volumeGeneratingHandler{ + VolumeGeneratingHandler: handler, + apply: apply, + name: name, + gvk: controller.GroupVersionKind(), + } + if opts != nil { + statusHandler.opts = *opts + } + controller.OnChange(ctx, name, statusHandler.Remove) + RegisterVolumeStatusHandler(ctx, controller, condition, name, statusHandler.Handle) +} + +type volumeStatusHandler struct { + client VolumeClient + condition condition.Cond + handler VolumeStatusHandler +} + +// sync is executed on every resource addition or modification. Executes the configured handlers and sends the updated status to the Kubernetes API +func (a *volumeStatusHandler) sync(key string, obj *v1beta2.Volume) (*v1beta2.Volume, error) { + if obj == nil { + return obj, nil + } + + origStatus := obj.Status.DeepCopy() + obj = obj.DeepCopy() + newStatus, err := a.handler(obj, obj.Status) + if err != nil { + // Revert to old status on error + newStatus = *origStatus.DeepCopy() + } + + if a.condition != "" { + if errors.IsConflict(err) { + a.condition.SetError(&newStatus, "", nil) + } else { + a.condition.SetError(&newStatus, "", err) + } + } + if !equality.Semantic.DeepEqual(origStatus, &newStatus) { + if a.condition != "" { + // Since status has changed, update the lastUpdatedTime + a.condition.LastUpdated(&newStatus, time.Now().UTC().Format(time.RFC3339)) + } + + var newErr error + obj.Status = newStatus + newObj, newErr := a.client.UpdateStatus(obj) + if err == nil { + err = newErr + } + if newErr == nil { + obj = newObj + } + } + return obj, err +} + +type volumeGeneratingHandler struct { + VolumeGeneratingHandler + apply apply.Apply + opts generic.GeneratingHandlerOptions + gvk schema.GroupVersionKind + name string + seen sync.Map +} + +// Remove handles the observed deletion of a resource, cascade deleting every associated resource previously applied +func (a *volumeGeneratingHandler) Remove(key string, obj *v1beta2.Volume) (*v1beta2.Volume, error) { + if obj != nil { + return obj, nil + } + + obj = &v1beta2.Volume{} + obj.Namespace, obj.Name = kv.RSplit(key, "/") + obj.SetGroupVersionKind(a.gvk) + + if a.opts.UniqueApplyForResourceVersion { + a.seen.Delete(key) + } + + return nil, generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects() +} + +// Handle executes the configured VolumeGeneratingHandler and pass the resulting objects to apply.Apply, finally returning the new status of the resource +func (a *volumeGeneratingHandler) Handle(obj *v1beta2.Volume, status v1beta2.VolumeStatus) (v1beta2.VolumeStatus, error) { + if !obj.DeletionTimestamp.IsZero() { + return status, nil + } + + objs, newStatus, err := a.VolumeGeneratingHandler(obj, status) + if err != nil { + return newStatus, err + } + if !a.isNewResourceVersion(obj) { + return newStatus, nil + } + + err = generic.ConfigureApplyForObject(a.apply, obj, &a.opts). + WithOwner(obj). + WithSetID(a.name). + ApplyObjects(objs...) + if err != nil { + return newStatus, err + } + a.storeResourceVersion(obj) + return newStatus, nil +} + +// isNewResourceVersion detects if a specific resource version was already successfully processed. +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *volumeGeneratingHandler) isNewResourceVersion(obj *v1beta2.Volume) bool { + if !a.opts.UniqueApplyForResourceVersion { + return true + } + + // Apply once per resource version + key := obj.Namespace + "/" + obj.Name + previous, ok := a.seen.Load(key) + return !ok || previous != obj.ResourceVersion +} + +// storeResourceVersion keeps track of the latest resource version of an object for which Apply was executed +// Only used if UniqueApplyForResourceVersion is set in generic.GeneratingHandlerOptions +func (a *volumeGeneratingHandler) storeResourceVersion(obj *v1beta2.Volume) { + if !a.opts.UniqueApplyForResourceVersion { + return + } + + key := obj.Namespace + "/" + obj.Name + a.seen.Store(key, obj.ResourceVersion) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 6e377bcad..aba3554a4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -87,6 +87,8 @@ github.com/harvester/harvester/pkg/apis/harvesterhci.io github.com/harvester/harvester/pkg/apis/harvesterhci.io/v1beta1 github.com/harvester/harvester/pkg/generated/controllers/harvesterhci.io github.com/harvester/harvester/pkg/generated/controllers/harvesterhci.io/v1beta1 +github.com/harvester/harvester/pkg/generated/controllers/longhorn.io +github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2 # github.com/harvester/webhook v0.1.5 ## explicit; go 1.22.7 github.com/harvester/webhook/pkg/clients From fe49b5f1a64d11878bb0e654afd2655d3532f1c3 Mon Sep 17 00:00:00 2001 From: Martin Dekov Date: Mon, 13 Oct 2025 16:59:33 +0300 Subject: [PATCH 2/7] Add unit tests for blockdevice Adding unit tests for blockdevice validations' Update method. The change requires very basic fake implementation of the cache objects used. Also pipeline has additional step and very basic go test which can be further optimized to output the coverage in file. Signed-off-by: Martin Dekov --- .github/workflows/basic-ci.yaml | 3 + pkg/utils/fake/backingimage.go | 33 +++ pkg/utils/fake/blockdevice.go | 52 +++++ pkg/utils/fake/longhornnode.go | 33 +++ pkg/utils/fake/persistentvolumecache.go | 47 ++++ pkg/utils/fake/replicacache.go | 33 +++ pkg/utils/fake/storageclass.go | 48 ++++ pkg/utils/fake/volumecache.go | 50 ++++ pkg/webhook/blockdevice/validator_test.go | 263 ++++++++++++++++++++++ scripts/unit-test | 7 + 10 files changed, 569 insertions(+) create mode 100644 pkg/utils/fake/backingimage.go create mode 100644 pkg/utils/fake/blockdevice.go create mode 100644 pkg/utils/fake/longhornnode.go create mode 100644 pkg/utils/fake/persistentvolumecache.go create mode 100644 pkg/utils/fake/replicacache.go create mode 100644 pkg/utils/fake/storageclass.go create mode 100644 pkg/utils/fake/volumecache.go create mode 100644 pkg/webhook/blockdevice/validator_test.go create mode 100755 scripts/unit-test diff --git a/.github/workflows/basic-ci.yaml b/.github/workflows/basic-ci.yaml index eff113eb7..9b2f57b50 100644 --- a/.github/workflows/basic-ci.yaml +++ b/.github/workflows/basic-ci.yaml @@ -25,6 +25,9 @@ jobs: run: | make validate make validate-ci + - name: "Run unit-tests" + run: | + make unit-test job-new-installation: needs: validation runs-on: diff --git a/pkg/utils/fake/backingimage.go b/pkg/utils/fake/backingimage.go new file mode 100644 index 000000000..491350ac0 --- /dev/null +++ b/pkg/utils/fake/backingimage.go @@ -0,0 +1,33 @@ +package fake + +import ( + lhv1beta2 "github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2" + lhv1 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/rancher/wrangler/v3/pkg/generic" + "k8s.io/apimachinery/pkg/labels" +) + +type FakeBackingImageCache struct { + backingImages []*lhv1.BackingImage +} + +func NewBackingImageCache(backingImagesToServe []*lhv1.BackingImage) lhv1beta2.BackingImageCache { + return &FakeBackingImageCache{ + backingImages: backingImagesToServe, + } +} + +func (f *FakeBackingImageCache) AddIndexer(indexName string, indexer generic.Indexer[*lhv1.BackingImage]) { +} + +func (f *FakeBackingImageCache) Get(namespace string, name string) (*lhv1.BackingImage, error) { + panic("unimplemented") +} + +func (f *FakeBackingImageCache) GetByIndex(indexName string, key string) ([]*lhv1.BackingImage, error) { + return f.backingImages, nil +} + +func (f *FakeBackingImageCache) List(namespace string, selector labels.Selector) ([]*lhv1.BackingImage, error) { + panic("unimplemented") +} diff --git a/pkg/utils/fake/blockdevice.go b/pkg/utils/fake/blockdevice.go new file mode 100644 index 000000000..bcffdc098 --- /dev/null +++ b/pkg/utils/fake/blockdevice.go @@ -0,0 +1,52 @@ +package fake + +import ( + diskv1 "github.com/harvester/node-disk-manager/pkg/apis/harvesterhci.io/v1beta1" + ctldiskv1 "github.com/harvester/node-disk-manager/pkg/generated/controllers/harvesterhci.io/v1beta1" + "github.com/rancher/wrangler/v3/pkg/generic" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type FakeBlockDeviceCache struct { + devices []*diskv1.BlockDevice +} + +func NewBlockDeviceCache(devicesToServe []*diskv1.BlockDevice) ctldiskv1.BlockDeviceCache { + return &FakeBlockDeviceCache{ + devices: devicesToServe, + } +} + +func (c *FakeBlockDeviceCache) AddIndexer(indexName string, indexer generic.Indexer[*diskv1.BlockDevice]) { + panic("unimplemented") +} + +func (c *FakeBlockDeviceCache) Get(namespace, name string) (*diskv1.BlockDevice, error) { + for _, device := range c.devices { + if device.Namespace == namespace && device.Name == name { + return device.DeepCopy(), nil + } + } + return nil, errors.NewNotFound(schema.GroupResource{}, name) +} + +func (c *FakeBlockDeviceCache) GetByIndex(indexName, key string) ([]*diskv1.BlockDevice, error) { + panic("unimplemented") +} + +func (c *FakeBlockDeviceCache) List(namespace string, selector labels.Selector) ([]*diskv1.BlockDevice, error) { + var matching []*diskv1.BlockDevice + + for _, device := range c.devices { + if namespace != "" && device.Namespace != namespace { + continue + } + + if selector.Matches(labels.Set(device.GetLabels())) { + matching = append(matching, device.DeepCopy()) + } + } + return matching, nil +} diff --git a/pkg/utils/fake/longhornnode.go b/pkg/utils/fake/longhornnode.go new file mode 100644 index 000000000..690bbedaf --- /dev/null +++ b/pkg/utils/fake/longhornnode.go @@ -0,0 +1,33 @@ +package fake + +import ( + lhv1beta2 "github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2" + lhv1 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/rancher/wrangler/v3/pkg/generic" + "k8s.io/apimachinery/pkg/labels" +) + +type FakeLonghornNodeCache struct { + nodes []*lhv1.Node +} + +func NewLonghornNodeCache(nodesToServe []*lhv1.Node) lhv1beta2.NodeCache { + return &FakeLonghornNodeCache{ + nodes: nodesToServe, + } +} + +func (f *FakeLonghornNodeCache) AddIndexer(indexName string, indexer generic.Indexer[*lhv1.Node]) { +} + +func (f *FakeLonghornNodeCache) Get(namespace string, name string) (*lhv1.Node, error) { + return f.nodes[0], nil +} + +func (f *FakeLonghornNodeCache) GetByIndex(indexName string, key string) ([]*lhv1.Node, error) { + return f.nodes, nil +} + +func (f *FakeLonghornNodeCache) List(namespace string, selector labels.Selector) ([]*lhv1.Node, error) { + panic("unimplemented") +} diff --git a/pkg/utils/fake/persistentvolumecache.go b/pkg/utils/fake/persistentvolumecache.go new file mode 100644 index 000000000..ad39ed794 --- /dev/null +++ b/pkg/utils/fake/persistentvolumecache.go @@ -0,0 +1,47 @@ +package fake + +import ( + ctlcorev1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" + "github.com/rancher/wrangler/v3/pkg/generic" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type FakePersistentVolumeCache struct { + pvs []*v1.PersistentVolume +} + +func NewPersistentVolumeCache(pvsToServe []*v1.PersistentVolume) ctlcorev1.PersistentVolumeCache { + return &FakePersistentVolumeCache{ + pvs: pvsToServe, + } +} + +func (f *FakePersistentVolumeCache) AddIndexer(indexName string, indexer generic.Indexer[*v1.PersistentVolume]) { + panic("unimplemented") +} + +func (f *FakePersistentVolumeCache) Get(name string) (*v1.PersistentVolume, error) { + for _, pv := range f.pvs { + if pv.Name == name { + return pv.DeepCopy(), nil + } + } + return nil, errors.NewNotFound(schema.GroupResource{}, name) +} + +func (f *FakePersistentVolumeCache) GetByIndex(indexName string, key string) ([]*v1.PersistentVolume, error) { + panic("unimplemented") +} + +func (f *FakePersistentVolumeCache) List(selector labels.Selector) ([]*v1.PersistentVolume, error) { + var matchingPVs []*v1.PersistentVolume + for _, pv := range f.pvs { + if selector.Matches(labels.Set(pv.Labels)) { + matchingPVs = append(matchingPVs, pv.DeepCopy()) + } + } + return matchingPVs, nil +} diff --git a/pkg/utils/fake/replicacache.go b/pkg/utils/fake/replicacache.go new file mode 100644 index 000000000..162a24bb5 --- /dev/null +++ b/pkg/utils/fake/replicacache.go @@ -0,0 +1,33 @@ +package fake + +import ( + lhv1beta2 "github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2" + lhv1 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/rancher/wrangler/v3/pkg/generic" + "k8s.io/apimachinery/pkg/labels" +) + +type FakeReplicaCache struct { + replicas []*lhv1.Replica +} + +func NewReplicaCache(replicasToServe []*lhv1.Replica) lhv1beta2.ReplicaCache { + return &FakeReplicaCache{ + replicas: replicasToServe, + } +} + +func (f *FakeReplicaCache) AddIndexer(indexName string, indexer generic.Indexer[*lhv1.Replica]) { +} + +func (f *FakeReplicaCache) Get(namespace string, name string) (*lhv1.Replica, error) { + panic("unimplemented") +} + +func (f *FakeReplicaCache) GetByIndex(indexName string, key string) ([]*lhv1.Replica, error) { + return f.replicas, nil +} + +func (f *FakeReplicaCache) List(namespace string, selector labels.Selector) ([]*lhv1.Replica, error) { + panic("unimplemented") +} diff --git a/pkg/utils/fake/storageclass.go b/pkg/utils/fake/storageclass.go new file mode 100644 index 000000000..ef35d0d20 --- /dev/null +++ b/pkg/utils/fake/storageclass.go @@ -0,0 +1,48 @@ +package fake + +import ( + ctlstoragev1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/v1" + "github.com/rancher/wrangler/v3/pkg/generic" + storagev1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type FakeStorageClassCache struct { + scs []*storagev1.StorageClass +} + +func NewStorageClassCache(scsToServe []*storagev1.StorageClass) ctlstoragev1.StorageClassCache { + return &FakeStorageClassCache{ + scs: scsToServe, + } +} + +func (f *FakeStorageClassCache) AddIndexer(indexName string, indexer generic.Indexer[*storagev1.StorageClass]) { + panic("unimplemented") +} + +func (f *FakeStorageClassCache) Get(name string) (*storagev1.StorageClass, error) { + for _, storageClass := range f.scs { + if storageClass.Name == name { + return storageClass.DeepCopy(), nil + } + } + return nil, errors.NewNotFound(schema.GroupResource{}, name) +} + +func (f *FakeStorageClassCache) GetByIndex(indexName string, key string) ([]*storagev1.StorageClass, error) { + panic("unimplemented") +} + +func (f *FakeStorageClassCache) List(selector labels.Selector) ([]*storagev1.StorageClass, error) { + var matchingSCs []*storagev1.StorageClass + for _, storageClass := range f.scs { + // Check if the StorageClass's labels match the provided selector. + if selector.Matches(labels.Set(storageClass.Labels)) { + matchingSCs = append(matchingSCs, storageClass.DeepCopy()) + } + } + return matchingSCs, nil +} diff --git a/pkg/utils/fake/volumecache.go b/pkg/utils/fake/volumecache.go new file mode 100644 index 000000000..894922c06 --- /dev/null +++ b/pkg/utils/fake/volumecache.go @@ -0,0 +1,50 @@ +package fake + +import ( + lhv1 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + "github.com/rancher/wrangler/v3/pkg/generic" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + + lhv1beta2 "github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2" +) + +type FakeVolumeCache struct { + volumes []*lhv1.Volume +} + +func NewVolumeCache(volsToServe []*lhv1.Volume) lhv1beta2.VolumeCache { + return &FakeVolumeCache{ + volumes: volsToServe, + } +} + +func (f *FakeVolumeCache) AddIndexer(indexName string, indexer generic.Indexer[*lhv1.Volume]) { + panic("unimplemented") +} + +func (f *FakeVolumeCache) Get(namespace string, name string) (*lhv1.Volume, error) { + for _, volume := range f.volumes { + if volume.Namespace == namespace && volume.Name == name { + return volume.DeepCopy(), nil + } + } + return nil, errors.NewNotFound(schema.GroupResource{}, name) +} + +func (f *FakeVolumeCache) GetByIndex(indexName string, key string) ([]*lhv1.Volume, error) { + panic("unimplemented") +} + +func (f *FakeVolumeCache) List(namespace string, selector labels.Selector) ([]*lhv1.Volume, error) { + var matchingVolumes []*lhv1.Volume + for _, volume := range f.volumes { + if namespace == "" || volume.Namespace == namespace { + if selector.Matches(labels.Set(volume.Labels)) { + matchingVolumes = append(matchingVolumes, volume.DeepCopy()) + } + } + } + return matchingVolumes, nil +} diff --git a/pkg/webhook/blockdevice/validator_test.go b/pkg/webhook/blockdevice/validator_test.go new file mode 100644 index 000000000..62a5b2e75 --- /dev/null +++ b/pkg/webhook/blockdevice/validator_test.go @@ -0,0 +1,263 @@ +package blockdevice + +import ( + "testing" + + lhv1beta2 "github.com/harvester/harvester/pkg/generated/controllers/longhorn.io/v1beta2" + diskv1 "github.com/harvester/node-disk-manager/pkg/apis/harvesterhci.io/v1beta1" + ctldiskv1 "github.com/harvester/node-disk-manager/pkg/generated/controllers/harvesterhci.io/v1beta1" + "github.com/harvester/node-disk-manager/pkg/utils" + "github.com/harvester/node-disk-manager/pkg/utils/fake" + lhv1 "github.com/longhorn/longhorn-manager/k8s/pkg/apis/longhorn/v1beta2" + ctlcorev1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/core/v1" + ctlstoragev1 "github.com/rancher/wrangler/v3/pkg/generated/controllers/storage/v1" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestUpdate(t *testing.T) { + tests := []struct { + name string + blockDeviceToCache []*diskv1.BlockDevice + scsToCache []*storagev1.StorageClass + pvsToCache []*v1.PersistentVolume + volsToCache []*lhv1.Volume + nodesToCache []*v1.Node + biToCache []*lhv1.BackingImage + lhNodesToCache []*lhv1.Node + replicasToCache []*lhv1.Replica + oldBlockDevice *diskv1.BlockDevice + newBlockDeice *diskv1.BlockDevice + expectedErr bool + }{ + { + name: "disk removal with a volume and backingimage that has healthy replicas elsewhere", + replicasToCache: []*lhv1.Replica{ + newReplica("rep-1", "vol-1", "node-1", "disk-uuid-1", false), + newReplica("rep-2", "vol-1", "node-2", "disk-uuid-2", false), + }, + volsToCache: []*lhv1.Volume{ + newVolume("vol-1"), + }, + lhNodesToCache: []*lhv1.Node{ + newLHNode("node-1", map[string]string{"blockdevice-to-remove": "disk-uuid-1"}), + }, + biToCache: []*lhv1.BackingImage{ + newBackingImage("safe-image", map[string]lhv1.BackingImageState{ + "disk-uuid-1": lhv1.BackingImageStateReady, + "disk-uuid-2": lhv1.BackingImageStateReady, + }), + }, + oldBlockDevice: newBlockDevice("blockdevice-to-remove", "node-1", true), + newBlockDeice: newBlockDevice("blockdevice-to-remove", "node-1", false), + expectedErr: false, + }, + { + name: "disk removal rejected with a volume with single replica and backing image with multiple replicas", + replicasToCache: []*lhv1.Replica{ + newReplica("rep-1", "vol-1", "node-1", "disk-uuid-1", false), + }, + volsToCache: []*lhv1.Volume{ + newVolume("vol-1"), + }, + lhNodesToCache: []*lhv1.Node{ + newLHNode("node-1", map[string]string{"blockdevice-to-remove": "disk-uuid-1"}), + }, + biToCache: []*lhv1.BackingImage{ + newBackingImage("safe-image", map[string]lhv1.BackingImageState{ + "disk-uuid-1": lhv1.BackingImageStateReady, + "disk-uuid-2": lhv1.BackingImageStateReady, + }), + }, + oldBlockDevice: newBlockDevice("blockdevice-to-remove", "node-1", true), + newBlockDeice: newBlockDevice("blockdevice-to-remove", "node-1", false), + expectedErr: true, + }, + { + name: "disk removal rejected with replicated volume but single healthy backing image", + replicasToCache: []*lhv1.Replica{ + newReplica("rep-1", "vol-1", "node-1", "disk-uuid-1", false), + newReplica("rep-2", "vol-1", "node-2", "disk-uuid-2", false), + }, + volsToCache: []*lhv1.Volume{ + newVolume("vol-1"), + }, + lhNodesToCache: []*lhv1.Node{ + newLHNode("node-1", map[string]string{"blockdevice-to-remove": "disk-uuid-1"}), + }, + biToCache: []*lhv1.BackingImage{ + newBackingImage("safe-image", map[string]lhv1.BackingImageState{ + "disk-uuid-1": lhv1.BackingImageStateReady, + }), + }, + oldBlockDevice: newBlockDevice("blockdevice-to-remove", "node-1", true), + newBlockDeice: newBlockDevice("blockdevice-to-remove", "node-1", false), + expectedErr: true, + }, + { + name: "disk removal allowed when volumes contain all failed replicas and replicated backing image", + replicasToCache: []*lhv1.Replica{ + newReplica("rep-1", "vol-1", "node-1", "disk-uuid-1", true), + newReplica("rep-2", "vol-1", "node-2", "disk-uuid-2", true), + }, + volsToCache: []*lhv1.Volume{ + newVolume("vol-1"), + }, + lhNodesToCache: []*lhv1.Node{ + newLHNode("node-1", map[string]string{"blockdevice-to-remove": "disk-uuid-1"}), + }, + biToCache: []*lhv1.BackingImage{ + newBackingImage("safe-image", map[string]lhv1.BackingImageState{ + "disk-uuid-1": lhv1.BackingImageStateReady, + "disk-uuid-2": lhv1.BackingImageStateReady, + }), + }, + oldBlockDevice: newBlockDevice("blockdevice-to-remove", "node-1", true), + newBlockDeice: newBlockDevice("blockdevice-to-remove", "node-1", false), + expectedErr: false, + }, + { + name: "disk removal allowed when volumes are all failed and backing images are all in non ready state", + replicasToCache: []*lhv1.Replica{ + newReplica("rep-1", "vol-1", "node-1", "disk-uuid-1", true), + newReplica("rep-2", "vol-1", "node-2", "disk-uuid-2", true), + }, + volsToCache: []*lhv1.Volume{ + newVolume("vol-1"), + }, + lhNodesToCache: []*lhv1.Node{ + newLHNode("node-1", map[string]string{"blockdevice-to-remove": "disk-uuid-1"}), + }, + biToCache: []*lhv1.BackingImage{ + newBackingImage("safe-image", map[string]lhv1.BackingImageState{ + "disk-uuid-1": lhv1.BackingImageStateFailed, + "disk-uuid-2": lhv1.BackingImageStateInProgress, + }), + }, + oldBlockDevice: newBlockDevice("blockdevice-to-remove", "node-1", true), + newBlockDeice: newBlockDevice("blockdevice-to-remove", "node-1", false), + expectedErr: false, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var bdCache ctldiskv1.BlockDeviceCache + var scCache ctlstoragev1.StorageClassCache + var pvCache ctlcorev1.PersistentVolumeCache + var volCache lhv1beta2.VolumeCache + var lhNodeCache lhv1beta2.NodeCache + var backingImageCache lhv1beta2.BackingImageCache + var replicaCache lhv1beta2.ReplicaCache + if test.blockDeviceToCache != nil { + bdCache = fake.NewBlockDeviceCache(test.blockDeviceToCache) + } + if test.scsToCache != nil { + scCache = fake.NewStorageClassCache(test.scsToCache) + } + if test.pvsToCache != nil { + pvCache = fake.NewPersistentVolumeCache(test.pvsToCache) + } + if test.volsToCache != nil { + volCache = fake.NewVolumeCache(test.volsToCache) + } + if test.lhNodesToCache != nil { + lhNodeCache = fake.NewLonghornNodeCache(test.lhNodesToCache) + } + if test.biToCache != nil { + backingImageCache = fake.NewBackingImageCache(test.biToCache) + } + if test.replicasToCache != nil { + replicaCache = fake.NewReplicaCache(test.replicasToCache) + } + validator := NewBlockdeviceValidator(bdCache, scCache, pvCache, volCache, backingImageCache, lhNodeCache, replicaCache) + err := validator.Update(nil, test.oldBlockDevice, test.newBlockDeice) + if test.expectedErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func newBlockDevice(name, nodeName string, provision bool) *diskv1.BlockDevice { + return &diskv1.BlockDevice{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: diskv1.BlockDeviceSpec{ + Provisioner: &diskv1.ProvisionerInfo{ + Longhorn: &diskv1.LonghornProvisionerInfo{}, + }, + Provision: provision, + NodeName: nodeName, + }, + } +} + +func newLHNode(name string, disks map[string]string) *lhv1.Node { + diskStatus := make(map[string]*lhv1.DiskStatus) + for bdName, uuid := range disks { + diskStatus[bdName] = &lhv1.DiskStatus{DiskUUID: uuid} + } + + return &lhv1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: utils.LonghornSystemNamespaceName, + }, + Status: lhv1.NodeStatus{ + DiskStatus: diskStatus, + }, + } +} + +func newVolume(name string) *lhv1.Volume { + return &lhv1.Volume{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: utils.LonghornSystemNamespaceName, + }, + } +} + +func newReplica(name, volName, nodeID, diskID string, isFailed bool) *lhv1.Replica { + failedAt := "" + if isFailed { + failedAt = "2025-10-13T10:00:00Z" + } + return &lhv1.Replica{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: utils.LonghornSystemNamespaceName, + }, + Spec: lhv1.ReplicaSpec{ + InstanceSpec: lhv1.InstanceSpec{ + VolumeName: volName, + NodeID: nodeID, + }, + DiskID: diskID, + FailedAt: failedAt, + }, + } +} + +func newBackingImage(name string, diskStatuses map[string]lhv1.BackingImageState) *lhv1.BackingImage { + statusMap := make(map[string]*lhv1.BackingImageDiskFileStatus) + for uuid, state := range diskStatuses { + statusMap[uuid] = &lhv1.BackingImageDiskFileStatus{State: state} + } + + return &lhv1.BackingImage{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: utils.LonghornSystemNamespaceName, + }, + Status: lhv1.BackingImageStatus{ + DiskFileStatusMap: statusMap, + }, + } +} diff --git a/scripts/unit-test b/scripts/unit-test new file mode 100755 index 000000000..da56b8dc4 --- /dev/null +++ b/scripts/unit-test @@ -0,0 +1,7 @@ +#!/bin/bash +set -e + +cd $(dirname $0)/.. + +echo Running unit tests +go test -v -cover ./pkg/... \ No newline at end of file From a521a4e0fde1791965b3c2a146b52f575ef907b0 Mon Sep 17 00:00:00 2001 From: Martin Dekov Date: Fri, 17 Oct 2025 15:32:06 +0300 Subject: [PATCH 3/7] Refactor code to reduce complexity Refactoring code so logic in: * validateLHDisk - is series of checks which return early in case of no matches to reduce the previous complex if statement * validateVolumes - extracted diskUUID retrival in stand alone function to be reused and for each general step extracted in stand alone methods * validateBackingImages - reused the diskUUID from the volumes and extracted the logic of determining whether it's safe or not to delete backingimage to stand alone mehod Signed-off-by: Martin Dekov --- pkg/webhook/blockdevice/validator.go | 165 ++++++++++++++++----------- 1 file changed, 98 insertions(+), 67 deletions(-) diff --git a/pkg/webhook/blockdevice/validator.go b/pkg/webhook/blockdevice/validator.go index 2ba541268..473065444 100644 --- a/pkg/webhook/blockdevice/validator.go +++ b/pkg/webhook/blockdevice/validator.go @@ -91,17 +91,22 @@ func (v *Validator) validateProvisioner(bd *diskv1.BlockDevice) error { } func (v *Validator) validateLHDisk(oldBd, newBd *diskv1.BlockDevice) error { - if oldBd.Spec.Provisioner != nil && newBd.Spec.Provisioner != nil && - oldBd.Spec.Provisioner.Longhorn != nil && newBd.Spec.Provisioner.Longhorn != nil && - oldBd.Spec.Provision && !newBd.Spec.Provision { - err := v.validateVolumes(oldBd) - if err != nil { - return err - } - err = v.validateBackingImages(oldBd) - if err != nil { - return err - } + if oldBd.Spec.Provisioner == nil || newBd.Spec.Provisioner == nil { + return nil + } + if oldBd.Spec.Provisioner.Longhorn == nil || newBd.Spec.Provisioner.Longhorn == nil { + return nil + } + if !oldBd.Spec.Provision || newBd.Spec.Provision { + return nil + } + err := v.validateVolumes(oldBd) + if err != nil { + return err + } + err = v.validateBackingImages(oldBd) + if err != nil { + return err } return nil } @@ -172,38 +177,102 @@ func (v *Validator) validateVGIsAlreadyUsed(bd *diskv1.BlockDevice) error { } func (v *Validator) validateVolumes(old *diskv1.BlockDevice) error { - lhNode, err := v.lhNodeCache.Get(utils.LonghornSystemNamespaceName, old.Spec.NodeName) + uuid, err := v.getDiskUUID(old) + if err != nil { + return err + } + if uuid == "" { + return nil + } + + volumesToCheck, err := v.getVolumesOnDisk(uuid) if err != nil { - errStr := fmt.Sprintf("Failed to get longhorn node %s: %s", old.Spec.NodeName, err.Error()) + return err + } + + unsafeVolumes, err := v.findUnsafeVolumes(volumesToCheck, uuid) + if err != nil { + return err + } + + if len(unsafeVolumes) > 0 { + errStr := fmt.Sprintf("Cannot remove disk %s because it hosts the only healthy replica for the following volumes: %s", + old.Spec.DevPath, strings.Join(unsafeVolumes, ", ")) return werror.NewBadRequest(errStr) } - diskStatus, ok := lhNode.Status.DiskStatus[old.Name] - if !ok || diskStatus == nil { + return nil +} + +func (v *Validator) validateBackingImages(old *diskv1.BlockDevice) error { + uuid, err := v.getDiskUUID(old) + if err != nil { + return err + } + if uuid == "" { return nil } - targetDiskUUID := diskStatus.DiskUUID - replicaObjs, err := v.replicaCache.GetByIndex(ReplicaByDiskUUID, targetDiskUUID) + backingImages, err := v.backingImageCache.GetByIndex(BackingImageByDiskUUID, uuid) if err != nil { - errStr := fmt.Sprintf("Failed to get replicas by disk UUID %s: %s", targetDiskUUID, err.Error()) + errStr := fmt.Sprintf("Error looking up backing images by disk UUID %s: %s", uuid, err.Error()) return werror.NewBadRequest(errStr) } - if len(replicaObjs) == 0 { + if len(backingImages) == 0 { return nil } - volumesToCheck := make(map[string]struct{}) + unsafeToRemoveBackingImages := v.findUnsafeUnsafeBackingImages(backingImages, uuid) + if len(unsafeToRemoveBackingImages) > 0 { + errStr := fmt.Sprintf("Cannot remove disk %s as it contains the only ready copy for the following backing images: %s", + old.Name, strings.Join(unsafeToRemoveBackingImages, ", ")) + return werror.NewBadRequest(errStr) + } + + return nil +} + +func (v *Validator) getDiskUUID(bd *diskv1.BlockDevice) (string, error) { + lhNodes, err := v.lhNodeCache.GetByIndex(NodeByBlockDeviceName, bd.Name) + if err != nil { + errStr := fmt.Sprintf("Error looking up node by blockdevice name %s: %s", bd.Name, err.Error()) + return "", werror.NewBadRequest(errStr) + } + if len(lhNodes) != 1 || lhNodes[0] == nil { + return "", nil + } + + lhNode := lhNodes[0] + diskStatus, ok := lhNode.Status.DiskStatus[bd.Name] + if !ok || diskStatus.DiskUUID == "" { + return "", nil + } + + return diskStatus.DiskUUID, nil +} + +func (v *Validator) getVolumesOnDisk(targetDiskUUID string) ([]string, error) { + replicaObjs, err := v.replicaCache.GetByIndex(ReplicaByDiskUUID, targetDiskUUID) + if err != nil { + errStr := fmt.Sprintf("Failed to get replicas by disk UUID %s: %s", targetDiskUUID, err.Error()) + return nil, werror.NewBadRequest(errStr) + } + + volumesToCheck := make([]string, 0, len(replicaObjs)) for _, replicaObj := range replicaObjs { - volumesToCheck[replicaObj.Spec.VolumeName] = struct{}{} + volumesToCheck = append(volumesToCheck, replicaObj.Spec.VolumeName) } - var unsafeVolumes []string - for volName := range volumesToCheck { + return volumesToCheck, nil +} + +func (v *Validator) findUnsafeVolumes(volumesToCheck []string, targetDiskUUID string) ([]string, error) { + unsafeVolumes := make([]string, 0, len(volumesToCheck)) + for _, volName := range volumesToCheck { replicaObjsForVolume, err := v.replicaCache.GetByIndex(ReplicaByVolume, volName) if err != nil { errStr := fmt.Sprintf("Failed to get replicas for volume %s from index: %s", volName, err.Error()) - return werror.NewBadRequest(errStr) + return nil, werror.NewBadRequest(errStr) } var healthyReplicaCount int @@ -222,42 +291,11 @@ func (v *Validator) validateVolumes(old *diskv1.BlockDevice) error { } } - if len(unsafeVolumes) > 0 { - errStr := fmt.Sprintf("Cannot remove disk %s because it hosts the only healthy replica for the following volumes: %s", - old.Spec.DevPath, strings.Join(unsafeVolumes, ", ")) - return werror.NewBadRequest(errStr) - } - - return nil + return unsafeVolumes, nil } -func (v *Validator) validateBackingImages(old *diskv1.BlockDevice) error { - lhNodes, err := v.lhNodeCache.GetByIndex(NodeByBlockDeviceName, old.Name) - if err != nil { - errStr := fmt.Sprintf("Error looking up node by blockdevice name %s: %s", old.Name, err.Error()) - return werror.NewBadRequest(errStr) - } - if len(lhNodes) != 1 || lhNodes[0] == nil { - return nil - } - - lhNode := lhNodes[0] - diskStatus, ok := lhNode.Status.DiskStatus[old.Name] - if !ok || diskStatus.DiskUUID == "" { - return nil - } - - uuid := diskStatus.DiskUUID - backingImages, err := v.backingImageCache.GetByIndex(BackingImageByDiskUUID, uuid) - if err != nil { - errStr := fmt.Sprintf("Error looking up backing images by disk UUID %s: %s", uuid, err.Error()) - return werror.NewBadRequest(errStr) - } - if len(backingImages) == 0 { - return nil - } - - var unsafeToRemoveBackingImages []string +func (v *Validator) findUnsafeUnsafeBackingImages(backingImages []*lhv1.BackingImage, targetDiskUUID string) []string { + unsafeToRemoveBackingImages := make([]string, 0, len(backingImages)) for _, backingImage := range backingImages { if backingImage == nil { continue @@ -273,18 +311,11 @@ func (v *Validator) validateBackingImages(old *diskv1.BlockDevice) error { readyDiskUUID = diskUUID } } - if readyCount == 1 && readyDiskUUID == uuid { + if readyCount == 1 && readyDiskUUID == targetDiskUUID { unsafeToRemoveBackingImages = append(unsafeToRemoveBackingImages, backingImage.Name) } } - - if len(unsafeToRemoveBackingImages) > 0 { - errStr := fmt.Sprintf("Cannot remove disk %s as it contains the only ready copy for the following backing images: %s", - old.Name, strings.Join(unsafeToRemoveBackingImages, ", ")) - return werror.NewBadRequest(errStr) - } - - return nil + return unsafeToRemoveBackingImages } func (v *Validator) Resource() admission.Resource { From a47773839a23a8688664e007bf71e01b173ee023 Mon Sep 17 00:00:00 2001 From: Martin Dekov Date: Tue, 21 Oct 2025 12:41:48 +0300 Subject: [PATCH 4/7] Include HealthyAt in replica health condition Including HealthyAt in addition to FailedAt field when deciding whether replica is failed or healthy. Updating tests as well when constructing replicas. The condiiton follows the established approach by the longhorn team which use this condition as well: https://github.com/longhorn/longhorn-manager/blob/master/datastore/longhorn.go#L2172-L2184 https://github.com/longhorn/longhorn-manager/blob/master/controller/volume_controller.go#L935-L951 Signed-off-by: Martin Dekov --- pkg/webhook/blockdevice/validator.go | 2 +- pkg/webhook/blockdevice/validator_test.go | 29 +++++++++++++---------- 2 files changed, 18 insertions(+), 13 deletions(-) diff --git a/pkg/webhook/blockdevice/validator.go b/pkg/webhook/blockdevice/validator.go index 473065444..2aac9a4fb 100644 --- a/pkg/webhook/blockdevice/validator.go +++ b/pkg/webhook/blockdevice/validator.go @@ -278,7 +278,7 @@ func (v *Validator) findUnsafeVolumes(volumesToCheck []string, targetDiskUUID st var healthyReplicaCount int var replicaOnTargetDiskIsHealthy bool for _, rep := range replicaObjsForVolume { - if rep.Spec.FailedAt == "" { + if rep.Spec.FailedAt == "" && rep.Spec.HealthyAt != "" { healthyReplicaCount++ if rep.Spec.DiskID == targetDiskUUID { replicaOnTargetDiskIsHealthy = true diff --git a/pkg/webhook/blockdevice/validator_test.go b/pkg/webhook/blockdevice/validator_test.go index 62a5b2e75..82bb26cc5 100644 --- a/pkg/webhook/blockdevice/validator_test.go +++ b/pkg/webhook/blockdevice/validator_test.go @@ -35,8 +35,8 @@ func TestUpdate(t *testing.T) { { name: "disk removal with a volume and backingimage that has healthy replicas elsewhere", replicasToCache: []*lhv1.Replica{ - newReplica("rep-1", "vol-1", "node-1", "disk-uuid-1", false), - newReplica("rep-2", "vol-1", "node-2", "disk-uuid-2", false), + newReplica("rep-1", "vol-1", "node-1", "disk-uuid-1", false, true), + newReplica("rep-2", "vol-1", "node-2", "disk-uuid-2", false, true), }, volsToCache: []*lhv1.Volume{ newVolume("vol-1"), @@ -57,7 +57,7 @@ func TestUpdate(t *testing.T) { { name: "disk removal rejected with a volume with single replica and backing image with multiple replicas", replicasToCache: []*lhv1.Replica{ - newReplica("rep-1", "vol-1", "node-1", "disk-uuid-1", false), + newReplica("rep-1", "vol-1", "node-1", "disk-uuid-1", false, true), }, volsToCache: []*lhv1.Volume{ newVolume("vol-1"), @@ -78,8 +78,8 @@ func TestUpdate(t *testing.T) { { name: "disk removal rejected with replicated volume but single healthy backing image", replicasToCache: []*lhv1.Replica{ - newReplica("rep-1", "vol-1", "node-1", "disk-uuid-1", false), - newReplica("rep-2", "vol-1", "node-2", "disk-uuid-2", false), + newReplica("rep-1", "vol-1", "node-1", "disk-uuid-1", false, true), + newReplica("rep-2", "vol-1", "node-2", "disk-uuid-2", false, true), }, volsToCache: []*lhv1.Volume{ newVolume("vol-1"), @@ -99,8 +99,8 @@ func TestUpdate(t *testing.T) { { name: "disk removal allowed when volumes contain all failed replicas and replicated backing image", replicasToCache: []*lhv1.Replica{ - newReplica("rep-1", "vol-1", "node-1", "disk-uuid-1", true), - newReplica("rep-2", "vol-1", "node-2", "disk-uuid-2", true), + newReplica("rep-1", "vol-1", "node-1", "disk-uuid-1", true, false), + newReplica("rep-2", "vol-1", "node-2", "disk-uuid-2", true, false), }, volsToCache: []*lhv1.Volume{ newVolume("vol-1"), @@ -121,8 +121,8 @@ func TestUpdate(t *testing.T) { { name: "disk removal allowed when volumes are all failed and backing images are all in non ready state", replicasToCache: []*lhv1.Replica{ - newReplica("rep-1", "vol-1", "node-1", "disk-uuid-1", true), - newReplica("rep-2", "vol-1", "node-2", "disk-uuid-2", true), + newReplica("rep-1", "vol-1", "node-1", "disk-uuid-1", true, false), + newReplica("rep-2", "vol-1", "node-2", "disk-uuid-2", true, false), }, volsToCache: []*lhv1.Volume{ newVolume("vol-1"), @@ -224,11 +224,15 @@ func newVolume(name string) *lhv1.Volume { } } -func newReplica(name, volName, nodeID, diskID string, isFailed bool) *lhv1.Replica { +func newReplica(name, volName, nodeID, diskID string, isFailed, isHealthy bool) *lhv1.Replica { failedAt := "" if isFailed { failedAt = "2025-10-13T10:00:00Z" } + healthyAt := "" + if isHealthy { + healthyAt = "2025-10-20T20:00:00Z" + } return &lhv1.Replica{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -239,8 +243,9 @@ func newReplica(name, volName, nodeID, diskID string, isFailed bool) *lhv1.Repli VolumeName: volName, NodeID: nodeID, }, - DiskID: diskID, - FailedAt: failedAt, + DiskID: diskID, + FailedAt: failedAt, + HealthyAt: healthyAt, }, } } From ab77c9e55d1709e4acc97ff006598ffb6300194a Mon Sep 17 00:00:00 2001 From: Martin Dekov Date: Wed, 22 Oct 2025 12:05:50 +0300 Subject: [PATCH 5/7] Address feedback Addressing feedback from Vicente including the following: * renamed getDiskUUID to validateDiskInNode and I call it outside directly in validateLHDisk to avoid duplication * renamed some functions and varibles to reduce lenght due to new functions on counting backing images and healthy replicas * Added trailing spaces which were previously removed * Prepend longhorn related object keys with lh as longhorn has a lot of common objects with k8s like volumes/replicas/nodes which can be confused Signed-off-by: Martin Dekov --- cmd/node-disk-manager-webhook/main.go | 34 ++--- .../templates/rbac.yaml | 4 +- pkg/webhook/blockdevice/validator.go | 139 +++++++++--------- 3 files changed, 88 insertions(+), 89 deletions(-) diff --git a/cmd/node-disk-manager-webhook/main.go b/cmd/node-disk-manager-webhook/main.go index bea1baea9..7ee9ed854 100644 --- a/cmd/node-disk-manager-webhook/main.go +++ b/cmd/node-disk-manager-webhook/main.go @@ -30,14 +30,14 @@ import ( const webhookName = "harvester-node-disk-manager-webhook" type resourceCaches struct { - bdCache ctldiskv1.BlockDeviceCache - lvmVGCache ctldiskv1.LVMVolumeGroupCache - storageClassCache ctlstoragev1.StorageClassCache - pvCache ctlcorev1.PersistentVolumeCache - volumeCache lhv1beta2.VolumeCache - backingImageCache lhv1beta2.BackingImageCache - lhNodeCache lhv1beta2.NodeCache - replicaCache lhv1beta2.ReplicaCache + bdCache ctldiskv1.BlockDeviceCache + lvmVGCache ctldiskv1.LVMVolumeGroupCache + storageClassCache ctlstoragev1.StorageClassCache + pvCache ctlcorev1.PersistentVolumeCache + lhVolumeCache lhv1beta2.VolumeCache + lhBackingImageCache lhv1beta2.BackingImageCache + lhNodeCache lhv1beta2.NodeCache + lhReplicaCache lhv1beta2.ReplicaCache } func main() { @@ -124,7 +124,7 @@ func runWebhookServer(ctx context.Context, cfg *rest.Config, options *config.Opt } bdValidator := blockdevice.NewBlockdeviceValidator(resourceCaches.bdCache, resourceCaches.storageClassCache, resourceCaches.pvCache, - resourceCaches.volumeCache, resourceCaches.backingImageCache, resourceCaches.lhNodeCache, resourceCaches.replicaCache) + resourceCaches.lhVolumeCache, resourceCaches.lhBackingImageCache, resourceCaches.lhNodeCache, resourceCaches.lhReplicaCache) scValidator := storageclass.NewStorageClassValidator(resourceCaches.lvmVGCache) var validators = []admission.Validator{ bdValidator, @@ -170,14 +170,14 @@ func newCaches(ctx context.Context, cfg *rest.Config, threadiness int) (*resourc starters = append(starters, disks, storageFactory, coreFactory, lhFactory) resourceCaches := &resourceCaches{ - bdCache: disks.Harvesterhci().V1beta1().BlockDevice().Cache(), - lvmVGCache: disks.Harvesterhci().V1beta1().LVMVolumeGroup().Cache(), - storageClassCache: storageFactory.Storage().V1().StorageClass().Cache(), - pvCache: coreFactory.Core().V1().PersistentVolume().Cache(), - volumeCache: lhFactory.Longhorn().V1beta2().Volume().Cache(), - backingImageCache: lhFactory.Longhorn().V1beta2().BackingImage().Cache(), - lhNodeCache: lhFactory.Longhorn().V1beta2().Node().Cache(), - replicaCache: lhFactory.Longhorn().V1beta2().Replica().Cache(), + bdCache: disks.Harvesterhci().V1beta1().BlockDevice().Cache(), + lvmVGCache: disks.Harvesterhci().V1beta1().LVMVolumeGroup().Cache(), + storageClassCache: storageFactory.Storage().V1().StorageClass().Cache(), + pvCache: coreFactory.Core().V1().PersistentVolume().Cache(), + lhVolumeCache: lhFactory.Longhorn().V1beta2().Volume().Cache(), + lhBackingImageCache: lhFactory.Longhorn().V1beta2().BackingImage().Cache(), + lhNodeCache: lhFactory.Longhorn().V1beta2().Node().Cache(), + lhReplicaCache: lhFactory.Longhorn().V1beta2().Replica().Cache(), } if err := start.All(ctx, threadiness, starters...); err != nil { diff --git a/deploy/charts/harvester-node-disk-manager/templates/rbac.yaml b/deploy/charts/harvester-node-disk-manager/templates/rbac.yaml index 69d067c9d..0507fe8b8 100644 --- a/deploy/charts/harvester-node-disk-manager/templates/rbac.yaml +++ b/deploy/charts/harvester-node-disk-manager/templates/rbac.yaml @@ -57,7 +57,7 @@ rules: resources: [ "storageclasses" ] verbs: [ "*" ] - apiGroups: [ "harvesterhci.io" ] - resources: [ "blockdevices", "lvmvolumegroups", "lvmvolumegroups/status"] + resources: [ "blockdevices", "lvmvolumegroups", "lvmvolumegroups/status" ] verbs: [ "*" ] - apiGroups: [ "apiregistration.k8s.io" ] resources: [ "apiservices" ] @@ -69,7 +69,7 @@ rules: resources: [ "validatingwebhookconfigurations", "mutatingwebhookconfigurations" ] verbs: [ "*" ] - apiGroups: ["longhorn.io"] - resources: ["volumes", "nodes", "backingimages", "replicas"] + resources: [ "volumes", "nodes", "backingimages", "replicas" ] verbs: [ "get", "watch", "list" ] --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/pkg/webhook/blockdevice/validator.go b/pkg/webhook/blockdevice/validator.go index 2aac9a4fb..b2c74269a 100644 --- a/pkg/webhook/blockdevice/validator.go +++ b/pkg/webhook/blockdevice/validator.go @@ -31,30 +31,30 @@ const ( type Validator struct { admission.DefaultValidator - BlockdeviceCache ctldiskv1.BlockDeviceCache - storageClassCache ctlstoragev1.StorageClassCache - pvCache ctlcorev1.PersistentVolumeCache - volumeCache lhv1beta2.VolumeCache - backingImageCache lhv1beta2.BackingImageCache - lhNodeCache lhv1beta2.NodeCache - replicaCache lhv1beta2.ReplicaCache + BlockdeviceCache ctldiskv1.BlockDeviceCache + storageClassCache ctlstoragev1.StorageClassCache + pvCache ctlcorev1.PersistentVolumeCache + lhVolumeCache lhv1beta2.VolumeCache + lhBackingImageCache lhv1beta2.BackingImageCache + lhNodeCache lhv1beta2.NodeCache + lhReplicaCache lhv1beta2.ReplicaCache } func NewBlockdeviceValidator(blockdeviceCache ctldiskv1.BlockDeviceCache, storageClassCache ctlstoragev1.StorageClassCache, - pvCache ctlcorev1.PersistentVolumeCache, volumeCache lhv1beta2.VolumeCache, backingImageCache lhv1beta2.BackingImageCache, - lhNodeCache lhv1beta2.NodeCache, replicaCache lhv1beta2.ReplicaCache) *Validator { - backingImageCache.AddIndexer(BackingImageByDiskUUID, backingImageByDiskUUIDIndexer) + pvCache ctlcorev1.PersistentVolumeCache, lhVolumeCache lhv1beta2.VolumeCache, lhBackingImageCache lhv1beta2.BackingImageCache, + lhNodeCache lhv1beta2.NodeCache, lhReplicaCache lhv1beta2.ReplicaCache) *Validator { + lhBackingImageCache.AddIndexer(BackingImageByDiskUUID, backingImageByDiskUUIDIndexer) lhNodeCache.AddIndexer(NodeByBlockDeviceName, nodeByBlockDeviceNameIndexer) - replicaCache.AddIndexer(ReplicaByDiskUUID, replicaByDiskUUIDIndexer) - replicaCache.AddIndexer(ReplicaByVolume, replicaByVolumeIndexer) + lhReplicaCache.AddIndexer(ReplicaByDiskUUID, replicaByDiskUUIDIndexer) + lhReplicaCache.AddIndexer(ReplicaByVolume, replicaByVolumeIndexer) return &Validator{ - BlockdeviceCache: blockdeviceCache, - storageClassCache: storageClassCache, - pvCache: pvCache, - volumeCache: volumeCache, - backingImageCache: backingImageCache, - lhNodeCache: lhNodeCache, - replicaCache: replicaCache, + BlockdeviceCache: blockdeviceCache, + storageClassCache: storageClassCache, + pvCache: pvCache, + lhVolumeCache: lhVolumeCache, + lhBackingImageCache: lhBackingImageCache, + lhNodeCache: lhNodeCache, + lhReplicaCache: lhReplicaCache, } } @@ -100,11 +100,18 @@ func (v *Validator) validateLHDisk(oldBd, newBd *diskv1.BlockDevice) error { if !oldBd.Spec.Provision || newBd.Spec.Provision { return nil } - err := v.validateVolumes(oldBd) + uuid, err := v.validateDiskInNode(oldBd) if err != nil { return err } - err = v.validateBackingImages(oldBd) + if uuid == "" { + return nil + } + err = v.validateVolumes(oldBd, uuid) + if err != nil { + return err + } + err = v.validateBackingImages(oldBd, uuid) if err != nil { return err } @@ -176,15 +183,7 @@ func (v *Validator) validateVGIsAlreadyUsed(bd *diskv1.BlockDevice) error { return nil } -func (v *Validator) validateVolumes(old *diskv1.BlockDevice) error { - uuid, err := v.getDiskUUID(old) - if err != nil { - return err - } - if uuid == "" { - return nil - } - +func (v *Validator) validateVolumes(old *diskv1.BlockDevice, uuid string) error { volumesToCheck, err := v.getVolumesOnDisk(uuid) if err != nil { return err @@ -204,16 +203,8 @@ func (v *Validator) validateVolumes(old *diskv1.BlockDevice) error { return nil } -func (v *Validator) validateBackingImages(old *diskv1.BlockDevice) error { - uuid, err := v.getDiskUUID(old) - if err != nil { - return err - } - if uuid == "" { - return nil - } - - backingImages, err := v.backingImageCache.GetByIndex(BackingImageByDiskUUID, uuid) +func (v *Validator) validateBackingImages(old *diskv1.BlockDevice, uuid string) error { + backingImages, err := v.lhBackingImageCache.GetByIndex(BackingImageByDiskUUID, uuid) if err != nil { errStr := fmt.Sprintf("Error looking up backing images by disk UUID %s: %s", uuid, err.Error()) return werror.NewBadRequest(errStr) @@ -222,7 +213,7 @@ func (v *Validator) validateBackingImages(old *diskv1.BlockDevice) error { return nil } - unsafeToRemoveBackingImages := v.findUnsafeUnsafeBackingImages(backingImages, uuid) + unsafeToRemoveBackingImages := v.findUnsafeBackingImages(backingImages, uuid) if len(unsafeToRemoveBackingImages) > 0 { errStr := fmt.Sprintf("Cannot remove disk %s as it contains the only ready copy for the following backing images: %s", old.Name, strings.Join(unsafeToRemoveBackingImages, ", ")) @@ -232,7 +223,7 @@ func (v *Validator) validateBackingImages(old *diskv1.BlockDevice) error { return nil } -func (v *Validator) getDiskUUID(bd *diskv1.BlockDevice) (string, error) { +func (v *Validator) validateDiskInNode(bd *diskv1.BlockDevice) (string, error) { lhNodes, err := v.lhNodeCache.GetByIndex(NodeByBlockDeviceName, bd.Name) if err != nil { errStr := fmt.Sprintf("Error looking up node by blockdevice name %s: %s", bd.Name, err.Error()) @@ -252,7 +243,7 @@ func (v *Validator) getDiskUUID(bd *diskv1.BlockDevice) (string, error) { } func (v *Validator) getVolumesOnDisk(targetDiskUUID string) ([]string, error) { - replicaObjs, err := v.replicaCache.GetByIndex(ReplicaByDiskUUID, targetDiskUUID) + replicaObjs, err := v.lhReplicaCache.GetByIndex(ReplicaByDiskUUID, targetDiskUUID) if err != nil { errStr := fmt.Sprintf("Failed to get replicas by disk UUID %s: %s", targetDiskUUID, err.Error()) return nil, werror.NewBadRequest(errStr) @@ -266,27 +257,16 @@ func (v *Validator) getVolumesOnDisk(targetDiskUUID string) ([]string, error) { return volumesToCheck, nil } -func (v *Validator) findUnsafeVolumes(volumesToCheck []string, targetDiskUUID string) ([]string, error) { +func (v *Validator) findUnsafeVolumes(volumesToCheck []string, uuid string) ([]string, error) { unsafeVolumes := make([]string, 0, len(volumesToCheck)) for _, volName := range volumesToCheck { - replicaObjsForVolume, err := v.replicaCache.GetByIndex(ReplicaByVolume, volName) + replicaObjsForVolume, err := v.lhReplicaCache.GetByIndex(ReplicaByVolume, volName) if err != nil { errStr := fmt.Sprintf("Failed to get replicas for volume %s from index: %s", volName, err.Error()) return nil, werror.NewBadRequest(errStr) } - - var healthyReplicaCount int - var replicaOnTargetDiskIsHealthy bool - for _, rep := range replicaObjsForVolume { - if rep.Spec.FailedAt == "" && rep.Spec.HealthyAt != "" { - healthyReplicaCount++ - if rep.Spec.DiskID == targetDiskUUID { - replicaOnTargetDiskIsHealthy = true - } - } - } - - if healthyReplicaCount == 1 && replicaOnTargetDiskIsHealthy { + replicaCount, replicaIsHealthy := countHealthyReplicaOnDisk(replicaObjsForVolume, uuid) + if replicaCount == 1 && replicaIsHealthy { unsafeVolumes = append(unsafeVolumes, volName) } } @@ -294,23 +274,27 @@ func (v *Validator) findUnsafeVolumes(volumesToCheck []string, targetDiskUUID st return unsafeVolumes, nil } -func (v *Validator) findUnsafeUnsafeBackingImages(backingImages []*lhv1.BackingImage, targetDiskUUID string) []string { +func countHealthyReplicaOnDisk(replicas []*lhv1.Replica, uuid string) (int, bool) { + var healthyReplicaCount int + var replicaOnTargetDiskIsHealthy bool + for _, replica := range replicas { + if replica.Spec.FailedAt == "" && replica.Spec.HealthyAt != "" { + healthyReplicaCount++ + if replica.Spec.DiskID == uuid { + replicaOnTargetDiskIsHealthy = true + } + } + } + return healthyReplicaCount, replicaOnTargetDiskIsHealthy +} + +func (v *Validator) findUnsafeBackingImages(backingImages []*lhv1.BackingImage, targetDiskUUID string) []string { unsafeToRemoveBackingImages := make([]string, 0, len(backingImages)) for _, backingImage := range backingImages { if backingImage == nil { continue } - readyCount := 0 - var readyDiskUUID string - for diskUUID, fileStatus := range backingImage.Status.DiskFileStatusMap { - if fileStatus == nil { - continue - } - if fileStatus.State == lhv1.BackingImageStateReady { - readyCount++ - readyDiskUUID = diskUUID - } - } + readyCount, readyDiskUUID := countReadyBackingImages(backingImage) if readyCount == 1 && readyDiskUUID == targetDiskUUID { unsafeToRemoveBackingImages = append(unsafeToRemoveBackingImages, backingImage.Name) } @@ -318,6 +302,21 @@ func (v *Validator) findUnsafeUnsafeBackingImages(backingImages []*lhv1.BackingI return unsafeToRemoveBackingImages } +func countReadyBackingImages(backingImage *lhv1.BackingImage) (int, string) { + var readyCount int + var readyDiskUUID string + for diskUUID, fileStatus := range backingImage.Status.DiskFileStatusMap { + if fileStatus == nil { + continue + } + if fileStatus.State == lhv1.BackingImageStateReady { + readyCount++ + readyDiskUUID = diskUUID + } + } + return readyCount, readyDiskUUID +} + func (v *Validator) Resource() admission.Resource { return admission.Resource{ Names: []string{"blockdevices"}, From 45a9bab200098b23bd8af62b21f26df9c694821a Mon Sep 17 00:00:00 2001 From: Martin Dekov Date: Wed, 22 Oct 2025 16:08:49 +0300 Subject: [PATCH 6/7] Add comment above index constants Signed-off-by: Martin Dekov --- pkg/webhook/blockdevice/validator.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/webhook/blockdevice/validator.go b/pkg/webhook/blockdevice/validator.go index b2c74269a..8ed6a9d1e 100644 --- a/pkg/webhook/blockdevice/validator.go +++ b/pkg/webhook/blockdevice/validator.go @@ -21,6 +21,7 @@ import ( "github.com/harvester/node-disk-manager/pkg/utils" ) +// Constants representing the names of the indexes const ( BackingImageByDiskUUID = "longhorn.io/backingimage-by-diskuuid" NodeByBlockDeviceName = "longhorn.io/node-by-blockdevice-name" From af0dc0efb35effe51d96b5fc3abea252908e2f59 Mon Sep 17 00:00:00 2001 From: Martin Dekov Date: Wed, 22 Oct 2025 18:28:15 +0300 Subject: [PATCH 7/7] Rename pipeline integration test step Renaming pipeline integration image build step from: "Build the Image for the Integration Test" to: "Build Integration Test Image and run Unit Tests" To make it obvious when unit tests are being ran as we don't mention this anywhere explicitly. Due to this also removed the `unit-test` script and step. Signed-off-by: Martin Dekov --- .github/workflows/basic-ci.yaml | 7 ++----- scripts/unit-test | 7 ------- 2 files changed, 2 insertions(+), 12 deletions(-) delete mode 100755 scripts/unit-test diff --git a/.github/workflows/basic-ci.yaml b/.github/workflows/basic-ci.yaml index 9b2f57b50..fe88ba492 100644 --- a/.github/workflows/basic-ci.yaml +++ b/.github/workflows/basic-ci.yaml @@ -25,9 +25,6 @@ jobs: run: | make validate make validate-ci - - name: "Run unit-tests" - run: | - make unit-test job-new-installation: needs: validation runs-on: @@ -36,7 +33,7 @@ jobs: steps: - name: "Clone and check" uses: actions/checkout@v3 - - name: "Build the Image for the Integration Test" + - name: "Build Integration Test Image and run Unit Tests" run: | BUILD_FOR_CI=true make ./ci/scripts/patch-ttl-repo.sh @@ -117,7 +114,7 @@ jobs: steps: - name: "Clone and check" uses: actions/checkout@v3 - - name: "Build the Image for the Integration Test" + - name: "Build Integration Test Image and run Unit Tests" run: | BUILD_FOR_CI=true make ./ci/scripts/patch-ttl-repo.sh diff --git a/scripts/unit-test b/scripts/unit-test deleted file mode 100755 index da56b8dc4..000000000 --- a/scripts/unit-test +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -set -e - -cd $(dirname $0)/.. - -echo Running unit tests -go test -v -cover ./pkg/... \ No newline at end of file