From 730eee0da1ccb44819d2cd1ba66a5f5d23e9fc35 Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Thu, 15 Jan 2026 16:56:49 +0200 Subject: [PATCH 01/22] Gather Kubernetes data about pod volumes and add K8s context to filesystem metrics --- api/v1/kube/kube_api.pb.go | 325 +++++++- api/v1/kube/kube_api.proto | 27 + api/v1/kube/kube_api_grpc.pb.go | 54 +- api/v1/runtime/common.pb.go | 2 +- api/v1/runtime/runtime_agent_api.pb.go | 691 ++++++------------ api/v1/runtime/runtime_agent_api_grpc.pb.go | 22 +- cmd/agent/daemon/app/app.go | 19 +- cmd/agent/daemon/pipeline/controller.go | 15 + cmd/agent/daemon/pipeline/controller_test.go | 13 +- .../daemon/pipeline/storage_info_provider.go | 155 +++- cmd/agent/daemon/pipeline/storage_pipeline.go | 34 +- cmd/agent/daemon/pipeline/volume_mapper.go | 30 + .../daemon/pipeline/volume_mapper_test.go | 84 +++ cmd/controller/kube/client.go | 35 + cmd/controller/kube/index.go | 44 ++ cmd/controller/kube/server.go | 83 +++ 16 files changed, 1112 insertions(+), 521 deletions(-) create mode 100644 cmd/agent/daemon/pipeline/volume_mapper.go create mode 100644 cmd/agent/daemon/pipeline/volume_mapper_test.go diff --git a/api/v1/kube/kube_api.pb.go b/api/v1/kube/kube_api.pb.go index 35c5666a..4485a5ef 100644 --- a/api/v1/kube/kube_api.pb.go +++ b/api/v1/kube/kube_api.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc v6.33.0 // source: api/v1/kube/kube_api.proto @@ -1321,6 +1321,250 @@ func (x *RuntimeStats) GetContainerFs() *FsStats { return nil } +type GetPodVolumesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + NodeName string `protobuf:"bytes,1,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetPodVolumesRequest) Reset() { + *x = GetPodVolumesRequest{} + mi := &file_api_v1_kube_kube_api_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetPodVolumesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPodVolumesRequest) ProtoMessage() {} + +func (x *GetPodVolumesRequest) ProtoReflect() protoreflect.Message { + mi := &file_api_v1_kube_kube_api_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPodVolumesRequest.ProtoReflect.Descriptor instead. +func (*GetPodVolumesRequest) Descriptor() ([]byte, []int) { + return file_api_v1_kube_kube_api_proto_rawDescGZIP(), []int{21} +} + +func (x *GetPodVolumesRequest) GetNodeName() string { + if x != nil { + return x.NodeName + } + return "" +} + +type GetPodVolumesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Volumes []*PodVolumeInfo `protobuf:"bytes,1,rep,name=volumes,proto3" json:"volumes,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetPodVolumesResponse) Reset() { + *x = GetPodVolumesResponse{} + mi := &file_api_v1_kube_kube_api_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetPodVolumesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetPodVolumesResponse) ProtoMessage() {} + +func (x *GetPodVolumesResponse) ProtoReflect() protoreflect.Message { + mi := &file_api_v1_kube_kube_api_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetPodVolumesResponse.ProtoReflect.Descriptor instead. +func (*GetPodVolumesResponse) Descriptor() ([]byte, []int) { + return file_api_v1_kube_kube_api_proto_rawDescGZIP(), []int{22} +} + +func (x *GetPodVolumesResponse) GetVolumes() []*PodVolumeInfo { + if x != nil { + return x.Volumes + } + return nil +} + +type PodVolumeInfo struct { + state protoimpl.MessageState `protogen:"open.v1"` + Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"` + PodName string `protobuf:"bytes,2,opt,name=pod_name,json=podName,proto3" json:"pod_name,omitempty"` + PodUid string `protobuf:"bytes,3,opt,name=pod_uid,json=podUid,proto3" json:"pod_uid,omitempty"` + ControllerKind string `protobuf:"bytes,4,opt,name=controller_kind,json=controllerKind,proto3" json:"controller_kind,omitempty"` + ControllerName string `protobuf:"bytes,5,opt,name=controller_name,json=controllerName,proto3" json:"controller_name,omitempty"` + ContainerName string `protobuf:"bytes,6,opt,name=container_name,json=containerName,proto3" json:"container_name,omitempty"` + VolumeName string `protobuf:"bytes,7,opt,name=volume_name,json=volumeName,proto3" json:"volume_name,omitempty"` + MountPath string `protobuf:"bytes,8,opt,name=mount_path,json=mountPath,proto3" json:"mount_path,omitempty"` + PvcName string `protobuf:"bytes,9,opt,name=pvc_name,json=pvcName,proto3" json:"pvc_name,omitempty"` + PvcUid string `protobuf:"bytes,10,opt,name=pvc_uid,json=pvcUid,proto3" json:"pvc_uid,omitempty"` + RequestedSizeBytes int64 `protobuf:"varint,11,opt,name=requested_size_bytes,json=requestedSizeBytes,proto3" json:"requested_size_bytes,omitempty"` + PvName string `protobuf:"bytes,12,opt,name=pv_name,json=pvName,proto3" json:"pv_name,omitempty"` + StorageClass string `protobuf:"bytes,13,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` + CsiDriver string `protobuf:"bytes,14,opt,name=csi_driver,json=csiDriver,proto3" json:"csi_driver,omitempty"` + CsiVolumeHandle string `protobuf:"bytes,15,opt,name=csi_volume_handle,json=csiVolumeHandle,proto3" json:"csi_volume_handle,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PodVolumeInfo) Reset() { + *x = PodVolumeInfo{} + mi := &file_api_v1_kube_kube_api_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PodVolumeInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PodVolumeInfo) ProtoMessage() {} + +func (x *PodVolumeInfo) ProtoReflect() protoreflect.Message { + mi := &file_api_v1_kube_kube_api_proto_msgTypes[23] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PodVolumeInfo.ProtoReflect.Descriptor instead. +func (*PodVolumeInfo) Descriptor() ([]byte, []int) { + return file_api_v1_kube_kube_api_proto_rawDescGZIP(), []int{23} +} + +func (x *PodVolumeInfo) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *PodVolumeInfo) GetPodName() string { + if x != nil { + return x.PodName + } + return "" +} + +func (x *PodVolumeInfo) GetPodUid() string { + if x != nil { + return x.PodUid + } + return "" +} + +func (x *PodVolumeInfo) GetControllerKind() string { + if x != nil { + return x.ControllerKind + } + return "" +} + +func (x *PodVolumeInfo) GetControllerName() string { + if x != nil { + return x.ControllerName + } + return "" +} + +func (x *PodVolumeInfo) GetContainerName() string { + if x != nil { + return x.ContainerName + } + return "" +} + +func (x *PodVolumeInfo) GetVolumeName() string { + if x != nil { + return x.VolumeName + } + return "" +} + +func (x *PodVolumeInfo) GetMountPath() string { + if x != nil { + return x.MountPath + } + return "" +} + +func (x *PodVolumeInfo) GetPvcName() string { + if x != nil { + return x.PvcName + } + return "" +} + +func (x *PodVolumeInfo) GetPvcUid() string { + if x != nil { + return x.PvcUid + } + return "" +} + +func (x *PodVolumeInfo) GetRequestedSizeBytes() int64 { + if x != nil { + return x.RequestedSizeBytes + } + return 0 +} + +func (x *PodVolumeInfo) GetPvName() string { + if x != nil { + return x.PvName + } + return "" +} + +func (x *PodVolumeInfo) GetStorageClass() string { + if x != nil { + return x.StorageClass + } + return "" +} + +func (x *PodVolumeInfo) GetCsiDriver() string { + if x != nil { + return x.CsiDriver + } + return "" +} + +func (x *PodVolumeInfo) GetCsiVolumeHandle() string { + if x != nil { + return x.CsiVolumeHandle + } + return "" +} + var File_api_v1_kube_kube_api_proto protoreflect.FileDescriptor const file_api_v1_kube_kube_api_proto_rawDesc = "" + @@ -1414,7 +1658,31 @@ const file_api_v1_kube_kube_api_proto_rawDesc = "" + "\fRuntimeStats\x12!\n" + "\ftime_seconds\x18\x01 \x01(\x03R\vtimeSeconds\x12+\n" + "\bimage_fs\x18\x02 \x01(\v2\x10.kube.v1.FsStatsR\aimageFs\x123\n" + - "\fcontainer_fs\x18\x03 \x01(\v2\x10.kube.v1.FsStatsR\vcontainerFs*\xed\x01\n" + + "\fcontainer_fs\x18\x03 \x01(\v2\x10.kube.v1.FsStatsR\vcontainerFs\"3\n" + + "\x14GetPodVolumesRequest\x12\x1b\n" + + "\tnode_name\x18\x01 \x01(\tR\bnodeName\"I\n" + + "\x15GetPodVolumesResponse\x120\n" + + "\avolumes\x18\x01 \x03(\v2\x16.kube.v1.PodVolumeInfoR\avolumes\"\x89\x04\n" + + "\rPodVolumeInfo\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12\x19\n" + + "\bpod_name\x18\x02 \x01(\tR\apodName\x12\x17\n" + + "\apod_uid\x18\x03 \x01(\tR\x06podUid\x12'\n" + + "\x0fcontroller_kind\x18\x04 \x01(\tR\x0econtrollerKind\x12'\n" + + "\x0fcontroller_name\x18\x05 \x01(\tR\x0econtrollerName\x12%\n" + + "\x0econtainer_name\x18\x06 \x01(\tR\rcontainerName\x12\x1f\n" + + "\vvolume_name\x18\a \x01(\tR\n" + + "volumeName\x12\x1d\n" + + "\n" + + "mount_path\x18\b \x01(\tR\tmountPath\x12\x19\n" + + "\bpvc_name\x18\t \x01(\tR\apvcName\x12\x17\n" + + "\apvc_uid\x18\n" + + " \x01(\tR\x06pvcUid\x120\n" + + "\x14requested_size_bytes\x18\v \x01(\x03R\x12requestedSizeBytes\x12\x17\n" + + "\apv_name\x18\f \x01(\tR\x06pvName\x12#\n" + + "\rstorage_class\x18\r \x01(\tR\fstorageClass\x12\x1d\n" + + "\n" + + "csi_driver\x18\x0e \x01(\tR\tcsiDriver\x12*\n" + + "\x11csi_volume_handle\x18\x0f \x01(\tR\x0fcsiVolumeHandle*\xed\x01\n" + "\fWorkloadKind\x12\x19\n" + "\x15WORKLOAD_KIND_UNKNOWN\x10\x00\x12\x1c\n" + "\x18WORKLOAD_KIND_DEPLOYMENT\x10\x01\x12\x1d\n" + @@ -1423,7 +1691,7 @@ const file_api_v1_kube_kube_api_proto_rawDesc = "" + "\x18WORKLOAD_KIND_DAEMON_SET\x10\x04\x12\x15\n" + "\x11WORKLOAD_KIND_JOB\x10\x05\x12\x19\n" + "\x15WORKLOAD_KIND_CRONJOB\x10\x06\x12\x15\n" + - "\x11WORKLOAD_KIND_POD\x10\a2\xc2\x03\n" + + "\x11WORKLOAD_KIND_POD\x10\a2\x92\x04\n" + "\aKubeAPI\x12Q\n" + "\x0eGetClusterInfo\x12\x1e.kube.v1.GetClusterInfoRequest\x1a\x1f.kube.v1.GetClusterInfoResponse\x12B\n" + "\tGetIPInfo\x12\x19.kube.v1.GetIPInfoRequest\x1a\x1a.kube.v1.GetIPInfoResponse\x12E\n" + @@ -1431,7 +1699,8 @@ const file_api_v1_kube_kube_api_proto_rawDesc = "" + "GetIPsInfo\x12\x1a.kube.v1.GetIPsInfoRequest\x1a\x1b.kube.v1.GetIPsInfoResponse\x129\n" + "\x06GetPod\x12\x16.kube.v1.GetPodRequest\x1a\x17.kube.v1.GetPodResponse\x12<\n" + "\aGetNode\x12\x17.kube.v1.GetNodeRequest\x1a\x18.kube.v1.GetNodeResponse\x12`\n" + - "\x13GetNodeStatsSummary\x12#.kube.v1.GetNodeStatsSummaryRequest\x1a$.kube.v1.GetNodeStatsSummaryResponseB&Z$github.com/castai/kvisor/api/kube/v1b\x06proto3" + "\x13GetNodeStatsSummary\x12#.kube.v1.GetNodeStatsSummaryRequest\x1a$.kube.v1.GetNodeStatsSummaryResponse\x12N\n" + + "\rGetPodVolumes\x12\x1d.kube.v1.GetPodVolumesRequest\x1a\x1e.kube.v1.GetPodVolumesResponseB&Z$github.com/castai/kvisor/api/kube/v1b\x06proto3" var ( file_api_v1_kube_kube_api_proto_rawDescOnce sync.Once @@ -1446,7 +1715,7 @@ func file_api_v1_kube_kube_api_proto_rawDescGZIP() []byte { } var file_api_v1_kube_kube_api_proto_enumTypes = make([]protoimpl.EnumInfo, 1) -var file_api_v1_kube_kube_api_proto_msgTypes = make([]protoimpl.MessageInfo, 22) +var file_api_v1_kube_kube_api_proto_msgTypes = make([]protoimpl.MessageInfo, 25) var file_api_v1_kube_kube_api_proto_goTypes = []any{ (WorkloadKind)(0), // 0: kube.v1.WorkloadKind (*GetClusterInfoRequest)(nil), // 1: kube.v1.GetClusterInfoRequest @@ -1470,7 +1739,10 @@ var file_api_v1_kube_kube_api_proto_goTypes = []any{ (*NetworkStats)(nil), // 19: kube.v1.NetworkStats (*FsStats)(nil), // 20: kube.v1.FsStats (*RuntimeStats)(nil), // 21: kube.v1.RuntimeStats - nil, // 22: kube.v1.Node.LabelsEntry + (*GetPodVolumesRequest)(nil), // 22: kube.v1.GetPodVolumesRequest + (*GetPodVolumesResponse)(nil), // 23: kube.v1.GetPodVolumesResponse + (*PodVolumeInfo)(nil), // 24: kube.v1.PodVolumeInfo + nil, // 25: kube.v1.Node.LabelsEntry } var file_api_v1_kube_kube_api_proto_depIdxs = []int32{ 7, // 0: kube.v1.GetIPInfoResponse.info:type_name -> kube.v1.IPInfo @@ -1478,7 +1750,7 @@ var file_api_v1_kube_kube_api_proto_depIdxs = []int32{ 10, // 2: kube.v1.GetPodResponse.pod:type_name -> kube.v1.Pod 0, // 3: kube.v1.Pod.workload_kind:type_name -> kube.v1.WorkloadKind 13, // 4: kube.v1.GetNodeResponse.node:type_name -> kube.v1.Node - 22, // 5: kube.v1.Node.labels:type_name -> kube.v1.Node.LabelsEntry + 25, // 5: kube.v1.Node.labels:type_name -> kube.v1.Node.LabelsEntry 16, // 6: kube.v1.GetNodeStatsSummaryResponse.node:type_name -> kube.v1.NodeStats 17, // 7: kube.v1.NodeStats.cpu:type_name -> kube.v1.CPUStats 18, // 8: kube.v1.NodeStats.memory:type_name -> kube.v1.MemoryStats @@ -1487,23 +1759,26 @@ var file_api_v1_kube_kube_api_proto_depIdxs = []int32{ 21, // 11: kube.v1.NodeStats.runtime:type_name -> kube.v1.RuntimeStats 20, // 12: kube.v1.RuntimeStats.image_fs:type_name -> kube.v1.FsStats 20, // 13: kube.v1.RuntimeStats.container_fs:type_name -> kube.v1.FsStats - 1, // 14: kube.v1.KubeAPI.GetClusterInfo:input_type -> kube.v1.GetClusterInfoRequest - 3, // 15: kube.v1.KubeAPI.GetIPInfo:input_type -> kube.v1.GetIPInfoRequest - 5, // 16: kube.v1.KubeAPI.GetIPsInfo:input_type -> kube.v1.GetIPsInfoRequest - 8, // 17: kube.v1.KubeAPI.GetPod:input_type -> kube.v1.GetPodRequest - 11, // 18: kube.v1.KubeAPI.GetNode:input_type -> kube.v1.GetNodeRequest - 14, // 19: kube.v1.KubeAPI.GetNodeStatsSummary:input_type -> kube.v1.GetNodeStatsSummaryRequest - 2, // 20: kube.v1.KubeAPI.GetClusterInfo:output_type -> kube.v1.GetClusterInfoResponse - 4, // 21: kube.v1.KubeAPI.GetIPInfo:output_type -> kube.v1.GetIPInfoResponse - 6, // 22: kube.v1.KubeAPI.GetIPsInfo:output_type -> kube.v1.GetIPsInfoResponse - 9, // 23: kube.v1.KubeAPI.GetPod:output_type -> kube.v1.GetPodResponse - 12, // 24: kube.v1.KubeAPI.GetNode:output_type -> kube.v1.GetNodeResponse - 15, // 25: kube.v1.KubeAPI.GetNodeStatsSummary:output_type -> kube.v1.GetNodeStatsSummaryResponse - 20, // [20:26] is the sub-list for method output_type - 14, // [14:20] is the sub-list for method input_type - 14, // [14:14] is the sub-list for extension type_name - 14, // [14:14] is the sub-list for extension extendee - 0, // [0:14] is the sub-list for field type_name + 24, // 14: kube.v1.GetPodVolumesResponse.volumes:type_name -> kube.v1.PodVolumeInfo + 1, // 15: kube.v1.KubeAPI.GetClusterInfo:input_type -> kube.v1.GetClusterInfoRequest + 3, // 16: kube.v1.KubeAPI.GetIPInfo:input_type -> kube.v1.GetIPInfoRequest + 5, // 17: kube.v1.KubeAPI.GetIPsInfo:input_type -> kube.v1.GetIPsInfoRequest + 8, // 18: kube.v1.KubeAPI.GetPod:input_type -> kube.v1.GetPodRequest + 11, // 19: kube.v1.KubeAPI.GetNode:input_type -> kube.v1.GetNodeRequest + 14, // 20: kube.v1.KubeAPI.GetNodeStatsSummary:input_type -> kube.v1.GetNodeStatsSummaryRequest + 22, // 21: kube.v1.KubeAPI.GetPodVolumes:input_type -> kube.v1.GetPodVolumesRequest + 2, // 22: kube.v1.KubeAPI.GetClusterInfo:output_type -> kube.v1.GetClusterInfoResponse + 4, // 23: kube.v1.KubeAPI.GetIPInfo:output_type -> kube.v1.GetIPInfoResponse + 6, // 24: kube.v1.KubeAPI.GetIPsInfo:output_type -> kube.v1.GetIPsInfoResponse + 9, // 25: kube.v1.KubeAPI.GetPod:output_type -> kube.v1.GetPodResponse + 12, // 26: kube.v1.KubeAPI.GetNode:output_type -> kube.v1.GetNodeResponse + 15, // 27: kube.v1.KubeAPI.GetNodeStatsSummary:output_type -> kube.v1.GetNodeStatsSummaryResponse + 23, // 28: kube.v1.KubeAPI.GetPodVolumes:output_type -> kube.v1.GetPodVolumesResponse + 22, // [22:29] is the sub-list for method output_type + 15, // [15:22] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name } func init() { file_api_v1_kube_kube_api_proto_init() } @@ -1517,7 +1792,7 @@ func file_api_v1_kube_kube_api_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_api_v1_kube_kube_api_proto_rawDesc), len(file_api_v1_kube_kube_api_proto_rawDesc)), NumEnums: 1, - NumMessages: 22, + NumMessages: 25, NumExtensions: 0, NumServices: 1, }, diff --git a/api/v1/kube/kube_api.proto b/api/v1/kube/kube_api.proto index 8dea7f63..259126d6 100644 --- a/api/v1/kube/kube_api.proto +++ b/api/v1/kube/kube_api.proto @@ -12,6 +12,7 @@ service KubeAPI { rpc GetPod(GetPodRequest) returns (GetPodResponse); rpc GetNode(GetNodeRequest) returns (GetNodeResponse); rpc GetNodeStatsSummary(GetNodeStatsSummaryRequest) returns (GetNodeStatsSummaryResponse); + rpc GetPodVolumes(GetPodVolumesRequest) returns (GetPodVolumesResponse); } message GetClusterInfoRequest {} @@ -145,3 +146,29 @@ message RuntimeStats { FsStats image_fs = 2; FsStats container_fs = 3; } + +message GetPodVolumesRequest { + string node_name = 1; +} + +message GetPodVolumesResponse { + repeated PodVolumeInfo volumes = 1; +} + +message PodVolumeInfo { + string namespace = 1; + string pod_name = 2; + string pod_uid = 3; + string controller_kind = 4; + string controller_name = 5; + string container_name = 6; + string volume_name = 7; + string mount_path = 8; + string pvc_name = 9; + string pvc_uid = 10; + int64 requested_size_bytes = 11; + string pv_name = 12; + string storage_class = 13; + string csi_driver = 14; + string csi_volume_handle = 15; +} diff --git a/api/v1/kube/kube_api_grpc.pb.go b/api/v1/kube/kube_api_grpc.pb.go index 119fcbb1..a85380b5 100644 --- a/api/v1/kube/kube_api_grpc.pb.go +++ b/api/v1/kube/kube_api_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.5.1 +// - protoc-gen-go-grpc v1.6.0 // - protoc v6.33.0 // source: api/v1/kube/kube_api.proto @@ -25,6 +25,7 @@ const ( KubeAPI_GetPod_FullMethodName = "/kube.v1.KubeAPI/GetPod" KubeAPI_GetNode_FullMethodName = "/kube.v1.KubeAPI/GetNode" KubeAPI_GetNodeStatsSummary_FullMethodName = "/kube.v1.KubeAPI/GetNodeStatsSummary" + KubeAPI_GetPodVolumes_FullMethodName = "/kube.v1.KubeAPI/GetPodVolumes" ) // KubeAPIClient is the client API for KubeAPI service. @@ -38,6 +39,7 @@ type KubeAPIClient interface { GetPod(ctx context.Context, in *GetPodRequest, opts ...grpc.CallOption) (*GetPodResponse, error) GetNode(ctx context.Context, in *GetNodeRequest, opts ...grpc.CallOption) (*GetNodeResponse, error) GetNodeStatsSummary(ctx context.Context, in *GetNodeStatsSummaryRequest, opts ...grpc.CallOption) (*GetNodeStatsSummaryResponse, error) + GetPodVolumes(ctx context.Context, in *GetPodVolumesRequest, opts ...grpc.CallOption) (*GetPodVolumesResponse, error) } type kubeAPIClient struct { @@ -108,6 +110,16 @@ func (c *kubeAPIClient) GetNodeStatsSummary(ctx context.Context, in *GetNodeStat return out, nil } +func (c *kubeAPIClient) GetPodVolumes(ctx context.Context, in *GetPodVolumesRequest, opts ...grpc.CallOption) (*GetPodVolumesResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetPodVolumesResponse) + err := c.cc.Invoke(ctx, KubeAPI_GetPodVolumes_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + // KubeAPIServer is the server API for KubeAPI service. // All implementations should embed UnimplementedKubeAPIServer // for forward compatibility. @@ -119,6 +131,7 @@ type KubeAPIServer interface { GetPod(context.Context, *GetPodRequest) (*GetPodResponse, error) GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) GetNodeStatsSummary(context.Context, *GetNodeStatsSummaryRequest) (*GetNodeStatsSummaryResponse, error) + GetPodVolumes(context.Context, *GetPodVolumesRequest) (*GetPodVolumesResponse, error) } // UnimplementedKubeAPIServer should be embedded to have @@ -129,22 +142,25 @@ type KubeAPIServer interface { type UnimplementedKubeAPIServer struct{} func (UnimplementedKubeAPIServer) GetClusterInfo(context.Context, *GetClusterInfoRequest) (*GetClusterInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetClusterInfo not implemented") + return nil, status.Error(codes.Unimplemented, "method GetClusterInfo not implemented") } func (UnimplementedKubeAPIServer) GetIPInfo(context.Context, *GetIPInfoRequest) (*GetIPInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetIPInfo not implemented") + return nil, status.Error(codes.Unimplemented, "method GetIPInfo not implemented") } func (UnimplementedKubeAPIServer) GetIPsInfo(context.Context, *GetIPsInfoRequest) (*GetIPsInfoResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetIPsInfo not implemented") + return nil, status.Error(codes.Unimplemented, "method GetIPsInfo not implemented") } func (UnimplementedKubeAPIServer) GetPod(context.Context, *GetPodRequest) (*GetPodResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetPod not implemented") + return nil, status.Error(codes.Unimplemented, "method GetPod not implemented") } func (UnimplementedKubeAPIServer) GetNode(context.Context, *GetNodeRequest) (*GetNodeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetNode not implemented") + return nil, status.Error(codes.Unimplemented, "method GetNode not implemented") } func (UnimplementedKubeAPIServer) GetNodeStatsSummary(context.Context, *GetNodeStatsSummaryRequest) (*GetNodeStatsSummaryResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetNodeStatsSummary not implemented") + return nil, status.Error(codes.Unimplemented, "method GetNodeStatsSummary not implemented") +} +func (UnimplementedKubeAPIServer) GetPodVolumes(context.Context, *GetPodVolumesRequest) (*GetPodVolumesResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetPodVolumes not implemented") } func (UnimplementedKubeAPIServer) testEmbeddedByValue() {} @@ -156,7 +172,7 @@ type UnsafeKubeAPIServer interface { } func RegisterKubeAPIServer(s grpc.ServiceRegistrar, srv KubeAPIServer) { - // If the following call pancis, it indicates UnimplementedKubeAPIServer was + // If the following call panics, it indicates UnimplementedKubeAPIServer was // embedded by pointer and is nil. This will cause panics if an // unimplemented method is ever invoked, so we test this at initialization // time to prevent it from happening at runtime later due to I/O. @@ -274,6 +290,24 @@ func _KubeAPI_GetNodeStatsSummary_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } +func _KubeAPI_GetPodVolumes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetPodVolumesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KubeAPIServer).GetPodVolumes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: KubeAPI_GetPodVolumes_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KubeAPIServer).GetPodVolumes(ctx, req.(*GetPodVolumesRequest)) + } + return interceptor(ctx, in, info, handler) +} + // KubeAPI_ServiceDesc is the grpc.ServiceDesc for KubeAPI service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -305,6 +339,10 @@ var KubeAPI_ServiceDesc = grpc.ServiceDesc{ MethodName: "GetNodeStatsSummary", Handler: _KubeAPI_GetNodeStatsSummary_Handler, }, + { + MethodName: "GetPodVolumes", + Handler: _KubeAPI_GetPodVolumes_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "api/v1/kube/kube_api.proto", diff --git a/api/v1/runtime/common.pb.go b/api/v1/runtime/common.pb.go index 07e18317..2fc4d5d5 100644 --- a/api/v1/runtime/common.pb.go +++ b/api/v1/runtime/common.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.10 +// protoc-gen-go v1.36.11 // protoc v6.33.0 // source: api/v1/runtime/common.proto diff --git a/api/v1/runtime/runtime_agent_api.pb.go b/api/v1/runtime/runtime_agent_api.pb.go index b062fc9e..3d6b1145 100644 --- a/api/v1/runtime/runtime_agent_api.pb.go +++ b/api/v1/runtime/runtime_agent_api.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 -// protoc v5.29.3 +// protoc-gen-go v1.36.11 +// protoc v6.33.0 // source: api/v1/runtime/runtime_agent_api.proto package v1 @@ -2618,465 +2618,234 @@ func (x *ProcessTreeEvent) GetEvents() []*ProcessEvent { var File_api_v1_runtime_runtime_agent_api_proto protoreflect.FileDescriptor -var file_api_v1_runtime_runtime_agent_api_proto_rawDesc = string([]byte{ - 0x0a, 0x26, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, - 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x61, - 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, - 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x23, 0x61, 0x70, 0x69, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x61, 0x70, 0x69, 0x2f, 0x76, - 0x31, 0x2f, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x48, 0x0a, 0x15, 0x57, 0x72, 0x69, 0x74, 0x65, 0x44, - 0x61, 0x74, 0x61, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, - 0x2f, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x61, 0x74, 0x61, - 0x42, 0x61, 0x74, 0x63, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, - 0x22, 0x18, 0x0a, 0x16, 0x57, 0x72, 0x69, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x42, 0x61, 0x74, - 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xd4, 0x02, 0x0a, 0x0d, 0x44, - 0x61, 0x74, 0x61, 0x42, 0x61, 0x74, 0x63, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x45, 0x0a, 0x0f, - 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, - 0x73, 0x48, 0x00, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x53, 0x74, - 0x61, 0x74, 0x73, 0x12, 0x36, 0x0a, 0x0a, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, - 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, - 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x48, 0x00, - 0x52, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x48, 0x0a, 0x10, 0x63, - 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x73, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x6e, 0x65, 0x74, 0x66, 0x6c, 0x6f, 0x77, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x4e, 0x65, 0x74, 0x66, 0x6c, 0x6f, 0x77, 0x48, 0x00, 0x52, 0x07, 0x6e, - 0x65, 0x74, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x41, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, - 0x73, 0x5f, 0x74, 0x72, 0x65, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x72, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, - 0x73, 0x54, 0x72, 0x65, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x72, - 0x6f, 0x63, 0x65, 0x73, 0x73, 0x54, 0x72, 0x65, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, - 0x61, 0x22, 0x71, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0a, - 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, - 0x48, 0x00, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x12, 0x16, - 0x0a, 0x05, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, - 0x05, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x42, 0x10, 0x0a, 0x0e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, - 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, - 0x08, 0x02, 0x10, 0x03, 0x22, 0x4d, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x31, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x22, 0x0f, 0x0a, 0x0d, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x42, 0x0a, 0x0f, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x49, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x81, 0x06, 0x0a, 0x0f, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x1b, 0x0a, 0x09, - 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, - 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, - 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x6c, - 0x6f, 0x61, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, - 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x69, 0x64, 0x12, - 0x3d, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6b, 0x69, 0x6e, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x18, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x4b, 0x69, 0x6e, 0x64, - 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x19, - 0x0a, 0x08, 0x70, 0x6f, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x70, 0x6f, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x6f, 0x64, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6f, 0x64, 0x55, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, - 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x0e, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, - 0x52, 0x0a, 0x0d, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x73, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0c, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x12, 0x61, 0x0a, 0x12, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x32, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x11, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6e, 0x6e, 0x6f, 0x74, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, - 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x63, 0x67, 0x72, 0x6f, 0x75, - 0x70, 0x49, 0x64, 0x12, 0x30, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, - 0x69, 0x74, 0x65, 0x6d, 0x73, 0x1a, 0x3f, 0x0a, 0x11, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, - 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x44, 0x0a, 0x16, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xae, 0x06, 0x0a, - 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, - 0x34, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x12, 0x2c, 0x0a, 0x12, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x10, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, - 0x65, 0x12, 0x19, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x70, 0x69, 0x64, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x0d, 0x52, 0x07, 0x68, 0x6f, 0x73, 0x74, 0x50, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, - 0x70, 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, 0x12, - 0x0a, 0x04, 0x70, 0x70, 0x69, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x70, - 0x69, 0x64, 0x12, 0x39, 0x0a, 0x19, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, - 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x16, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x50, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x21, 0x0a, - 0x0c, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x26, 0x0a, 0x04, 0x65, 0x78, 0x65, 0x63, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, - 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, - 0x48, 0x00, 0x52, 0x04, 0x65, 0x78, 0x65, 0x63, 0x12, 0x23, 0x0a, 0x03, 0x64, 0x6e, 0x73, 0x18, - 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x44, 0x4e, 0x53, 0x48, 0x00, 0x52, 0x03, 0x64, 0x6e, 0x73, 0x12, 0x26, 0x0a, - 0x04, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x72, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x48, 0x00, 0x52, - 0x04, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x29, 0x0a, 0x05, 0x74, 0x75, 0x70, 0x6c, 0x65, 0x18, 0x18, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x54, 0x75, 0x70, 0x6c, 0x65, 0x48, 0x00, 0x52, 0x05, 0x74, 0x75, 0x70, 0x6c, 0x65, - 0x12, 0x3a, 0x0a, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x18, 0x19, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x48, - 0x00, 0x52, 0x09, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x12, 0x23, 0x0a, 0x03, - 0x61, 0x6e, 0x79, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x72, 0x75, 0x6e, 0x74, - 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x03, 0x61, 0x6e, - 0x79, 0x12, 0x4d, 0x0a, 0x10, 0x73, 0x74, 0x64, 0x69, 0x6f, 0x5f, 0x76, 0x69, 0x61, 0x5f, 0x73, - 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x72, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x64, 0x69, 0x6f, 0x56, 0x69, - 0x61, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, 0x46, 0x69, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x48, 0x00, - 0x52, 0x0e, 0x73, 0x74, 0x64, 0x69, 0x6f, 0x56, 0x69, 0x61, 0x53, 0x6f, 0x63, 0x6b, 0x65, 0x74, - 0x12, 0x27, 0x0a, 0x03, 0x73, 0x73, 0x68, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, - 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x53, 0x48, 0x44, 0x61, - 0x74, 0x61, 0x48, 0x00, 0x52, 0x03, 0x73, 0x73, 0x68, 0x12, 0x3c, 0x0a, 0x0c, 0x70, 0x72, 0x6f, - 0x63, 0x65, 0x73, 0x73, 0x5f, 0x66, 0x6f, 0x72, 0x6b, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, - 0x63, 0x65, 0x73, 0x73, 0x46, 0x6f, 0x72, 0x6b, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x63, - 0x65, 0x73, 0x73, 0x46, 0x6f, 0x72, 0x6b, 0x12, 0x3c, 0x0a, 0x0c, 0x70, 0x72, 0x6f, 0x63, 0x65, - 0x73, 0x73, 0x5f, 0x65, 0x78, 0x69, 0x74, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, - 0x73, 0x73, 0x45, 0x78, 0x69, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, - 0x73, 0x45, 0x78, 0x69, 0x74, 0x42, 0x06, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x15, 0x0a, - 0x13, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x12, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x4c, 0x6f, 0x67, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x97, 0x05, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x6e, - 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x6f, 0x64, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x6f, 0x64, - 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, - 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, - 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x4e, 0x61, 0x6d, 0x65, - 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x6b, 0x69, 0x6e, - 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, - 0x64, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x70, 0x6f, 0x64, 0x5f, 0x75, 0x69, 0x64, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6f, 0x64, 0x55, 0x69, 0x64, 0x12, 0x21, - 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, - 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x08, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, - 0x0a, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x55, 0x69, - 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, - 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x44, 0x69, - 0x67, 0x65, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, - 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x63, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, - 0x64, 0x12, 0x31, 0x0a, 0x09, 0x63, 0x70, 0x75, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x15, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x43, 0x70, 0x75, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x08, 0x63, 0x70, 0x75, 0x53, - 0x74, 0x61, 0x74, 0x73, 0x12, 0x3a, 0x0a, 0x0c, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x73, - 0x74, 0x61, 0x74, 0x73, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x74, - 0x61, 0x74, 0x73, 0x52, 0x0b, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, - 0x12, 0x34, 0x0a, 0x0a, 0x70, 0x69, 0x64, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x17, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x50, 0x69, 0x64, 0x73, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x09, 0x70, 0x69, 0x64, - 0x73, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x2e, 0x0a, 0x08, 0x69, 0x6f, 0x5f, 0x73, 0x74, 0x61, - 0x74, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, - 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x4f, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x07, 0x69, - 0x6f, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x4a, 0x0a, 0x12, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x5f, - 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x19, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x46, 0x69, 0x6c, 0x65, 0x73, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x53, 0x74, 0x61, 0x74, 0x73, - 0x52, 0x10, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x53, 0x74, 0x61, - 0x74, 0x73, 0x22, 0x3e, 0x0a, 0x10, 0x46, 0x69, 0x6c, 0x65, 0x73, 0x41, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x12, 0x14, 0x0a, 0x05, - 0x72, 0x65, 0x61, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0d, 0x52, 0x05, 0x72, 0x65, 0x61, - 0x64, 0x73, 0x22, 0xc7, 0x01, 0x0a, 0x09, 0x4e, 0x6f, 0x64, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, - 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x31, 0x0a, - 0x09, 0x63, 0x70, 0x75, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x70, - 0x75, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x08, 0x63, 0x70, 0x75, 0x53, 0x74, 0x61, 0x74, 0x73, - 0x12, 0x3a, 0x0a, 0x0c, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, - 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, - 0x0b, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x2e, 0x0a, 0x08, - 0x69, 0x6f, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x4f, 0x53, 0x74, - 0x61, 0x74, 0x73, 0x52, 0x07, 0x69, 0x6f, 0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0x99, 0x03, 0x0a, - 0x0d, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1d, - 0x0a, 0x0a, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x19, 0x0a, - 0x08, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x69, 0x6d, 0x61, 0x67, - 0x65, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x69, 0x6d, 0x61, 0x67, 0x65, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x44, 0x69, 0x67, 0x65, 0x73, 0x74, 0x12, 0x22, - 0x0a, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, - 0x72, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x6f, 0x73, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x73, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x63, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x08, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, - 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6d, 0x61, 0x6e, 0x69, 0x66, 0x65, 0x73, - 0x74, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x1d, 0x0a, 0x1b, 0x49, 0x6d, 0x61, 0x67, - 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x53, 0x79, - 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1b, - 0x0a, 0x09, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x08, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x49, 0x64, 0x73, 0x22, 0x4b, 0x0a, 0x14, 0x47, - 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x06, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x22, 0x6e, 0x0a, 0x0f, 0x49, 0x6d, 0x61, 0x67, - 0x65, 0x73, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x66, - 0x75, 0x6c, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, - 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x12, 0x66, 0x75, 0x6c, 0x6c, 0x52, - 0x65, 0x73, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x29, 0x0a, - 0x06, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, - 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, - 0x52, 0x06, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x73, 0x22, 0xcf, 0x01, 0x0a, 0x05, 0x49, 0x6d, 0x61, - 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, - 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x72, - 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x73, 0x12, 0x3c, 0x0a, - 0x0b, 0x73, 0x63, 0x61, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x49, 0x6d, 0x61, 0x67, 0x65, 0x53, 0x63, 0x61, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x0a, 0x73, 0x63, 0x61, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, - 0x63, 0x61, 0x6e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x73, 0x63, 0x61, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x68, 0x0a, 0x16, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x75, 0x6c, 0x6c, 0x5f, 0x73, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x66, 0x75, 0x6c, - 0x6c, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x29, 0x0a, 0x06, 0x69, 0x6d, 0x61, - 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x72, 0x75, 0x6e, 0x74, - 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x52, 0x06, 0x69, 0x6d, - 0x61, 0x67, 0x65, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x79, - 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x7b, 0x0a, 0x0f, 0x4b, 0x75, 0x62, 0x65, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x52, 0x65, 0x70, 0x6f, - 0x72, 0x74, 0x12, 0x39, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x4b, 0x75, 0x62, 0x65, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x73, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x73, 0x12, 0x2d, 0x0a, - 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x72, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x75, 0x62, 0x65, 0x42, 0x65, 0x6e, - 0x63, 0x68, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x22, 0x1f, 0x0a, 0x1d, - 0x4b, 0x75, 0x62, 0x65, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x49, - 0x6e, 0x67, 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4f, 0x0a, - 0x0d, 0x4b, 0x75, 0x62, 0x65, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1b, - 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x69, 0x64, 0x22, 0x61, - 0x0a, 0x11, 0x4b, 0x75, 0x62, 0x65, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x43, 0x6f, 0x6e, 0x74, 0x72, - 0x6f, 0x6c, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, - 0x06, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x75, 0x62, 0x65, 0x42, - 0x65, 0x6e, 0x63, 0x68, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x06, 0x67, 0x72, 0x6f, 0x75, 0x70, - 0x73, 0x22, 0x44, 0x0a, 0x0e, 0x4b, 0x75, 0x62, 0x65, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x47, 0x72, - 0x6f, 0x75, 0x70, 0x12, 0x32, 0x0a, 0x06, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, - 0x2e, 0x4b, 0x75, 0x62, 0x65, 0x42, 0x65, 0x6e, 0x63, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x52, - 0x06, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x22, 0x67, 0x0a, 0x0e, 0x4b, 0x75, 0x62, 0x65, 0x42, - 0x65, 0x6e, 0x63, 0x68, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x78, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x65, 0x78, 0x74, 0x12, 0x1b, 0x0a, - 0x09, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x08, 0x74, 0x65, 0x73, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, - 0x22, 0x47, 0x0a, 0x10, 0x4b, 0x75, 0x62, 0x65, 0x4c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x65, - 0x70, 0x6f, 0x72, 0x74, 0x12, 0x33, 0x0a, 0x06, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x4b, 0x75, 0x62, 0x65, 0x4c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x52, 0x06, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x73, 0x22, 0x20, 0x0a, 0x1e, 0x4b, 0x75, 0x62, - 0x65, 0x4c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x67, - 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x64, 0x0a, 0x0f, 0x4b, - 0x75, 0x62, 0x65, 0x4c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x21, - 0x0a, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x75, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x69, - 0x64, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x06, 0x70, 0x61, 0x73, 0x73, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x61, 0x69, - 0x6c, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x66, 0x61, 0x69, 0x6c, 0x65, - 0x64, 0x22, 0xc7, 0x01, 0x0a, 0x07, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x10, 0x0a, - 0x03, 0x70, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x03, 0x70, 0x69, 0x64, 0x12, - 0x12, 0x0a, 0x04, 0x70, 0x70, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, - 0x70, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x61, 0x72, 0x67, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x61, 0x72, - 0x67, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1b, - 0x0a, 0x09, 0x65, 0x78, 0x69, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x08, 0x65, 0x78, 0x69, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x22, 0xb1, 0x01, 0x0a, 0x0c, - 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, - 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x2d, 0x0a, - 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x63, - 0x65, 0x73, 0x73, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x31, 0x0a, 0x06, - 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x19, 0x2e, 0x72, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, - 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x5e, 0x0a, 0x10, 0x50, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x54, 0x72, 0x65, 0x65, 0x45, 0x76, - 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x12, 0x30, 0x0a, - 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, - 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x65, - 0x73, 0x73, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2a, - 0xf0, 0x02, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, - 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x56, - 0x45, 0x4e, 0x54, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x45, 0x56, - 0x45, 0x4e, 0x54, 0x5f, 0x44, 0x4e, 0x53, 0x10, 0x02, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x56, 0x45, - 0x4e, 0x54, 0x5f, 0x54, 0x43, 0x50, 0x5f, 0x43, 0x4f, 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x10, 0x03, - 0x12, 0x1b, 0x0a, 0x17, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x43, 0x50, 0x5f, 0x43, 0x4f, - 0x4e, 0x4e, 0x45, 0x43, 0x54, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x04, 0x12, 0x14, 0x0a, - 0x10, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x43, 0x50, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x45, - 0x4e, 0x10, 0x05, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x46, 0x49, 0x4c, - 0x45, 0x5f, 0x43, 0x48, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x56, - 0x45, 0x4e, 0x54, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x4f, 0x4f, 0x4d, 0x10, - 0x07, 0x12, 0x15, 0x0a, 0x11, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x4d, 0x41, 0x47, 0x49, 0x43, - 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, 0x10, 0x08, 0x12, 0x13, 0x0a, 0x0f, 0x45, 0x56, 0x45, 0x4e, - 0x54, 0x5f, 0x53, 0x49, 0x47, 0x4e, 0x41, 0x54, 0x55, 0x52, 0x45, 0x10, 0x09, 0x12, 0x13, 0x0a, - 0x0f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x54, 0x54, 0x59, 0x5f, 0x57, 0x52, 0x49, 0x54, 0x45, - 0x10, 0x0a, 0x12, 0x1a, 0x0a, 0x16, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x54, 0x44, 0x49, - 0x4f, 0x5f, 0x56, 0x49, 0x41, 0x5f, 0x53, 0x4f, 0x43, 0x4b, 0x45, 0x54, 0x10, 0x0b, 0x12, 0x0d, - 0x0a, 0x09, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x53, 0x48, 0x10, 0x0c, 0x12, 0x16, 0x0a, - 0x12, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x46, - 0x4f, 0x52, 0x4b, 0x10, 0x0d, 0x12, 0x16, 0x0a, 0x12, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x50, - 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x45, 0x58, 0x49, 0x54, 0x10, 0x0e, 0x12, 0x13, 0x0a, - 0x0f, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x46, 0x49, 0x4c, 0x45, 0x5f, 0x4f, 0x50, 0x45, 0x4e, - 0x10, 0x0f, 0x12, 0x0e, 0x0a, 0x09, 0x45, 0x56, 0x45, 0x4e, 0x54, 0x5f, 0x41, 0x4e, 0x59, 0x10, - 0xe7, 0x07, 0x2a, 0xcf, 0x01, 0x0a, 0x0c, 0x57, 0x6f, 0x72, 0x6b, 0x6c, 0x6f, 0x61, 0x64, 0x4b, - 0x69, 0x6e, 0x64, 0x12, 0x19, 0x0a, 0x15, 0x57, 0x4f, 0x52, 0x4b, 0x4c, 0x4f, 0x41, 0x44, 0x5f, - 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1c, - 0x0a, 0x18, 0x57, 0x4f, 0x52, 0x4b, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, - 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, - 0x57, 0x4f, 0x52, 0x4b, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x52, 0x45, - 0x50, 0x4c, 0x49, 0x43, 0x41, 0x5f, 0x53, 0x45, 0x54, 0x10, 0x02, 0x12, 0x1e, 0x0a, 0x1a, 0x57, - 0x4f, 0x52, 0x4b, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x53, 0x54, 0x41, - 0x54, 0x45, 0x46, 0x55, 0x4c, 0x5f, 0x53, 0x45, 0x54, 0x10, 0x03, 0x12, 0x15, 0x0a, 0x11, 0x57, - 0x4f, 0x52, 0x4b, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x4a, 0x4f, 0x42, - 0x10, 0x04, 0x12, 0x19, 0x0a, 0x15, 0x57, 0x4f, 0x52, 0x4b, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x4b, - 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x52, 0x4f, 0x4e, 0x4a, 0x4f, 0x42, 0x10, 0x05, 0x12, 0x15, 0x0a, - 0x11, 0x57, 0x4f, 0x52, 0x4b, 0x4c, 0x4f, 0x41, 0x44, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x50, - 0x4f, 0x44, 0x10, 0x06, 0x2a, 0x90, 0x01, 0x0a, 0x0f, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x53, 0x63, - 0x61, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1d, 0x0a, 0x19, 0x49, 0x4d, 0x41, 0x47, - 0x45, 0x5f, 0x53, 0x43, 0x41, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x55, 0x4e, - 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x49, 0x4d, 0x41, 0x47, 0x45, - 0x5f, 0x53, 0x43, 0x41, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x53, 0x43, 0x41, - 0x4e, 0x4e, 0x45, 0x44, 0x10, 0x01, 0x12, 0x1d, 0x0a, 0x19, 0x49, 0x4d, 0x41, 0x47, 0x45, 0x5f, - 0x53, 0x43, 0x41, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, - 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x20, 0x0a, 0x1c, 0x49, 0x4d, 0x41, 0x47, 0x45, 0x5f, 0x53, - 0x43, 0x41, 0x4e, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x53, 0x43, 0x41, 0x4e, 0x5f, - 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x2a, 0x76, 0x0a, 0x0d, 0x50, 0x72, 0x6f, 0x63, 0x65, - 0x73, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x16, 0x50, 0x52, 0x4f, 0x43, - 0x45, 0x53, 0x53, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, - 0x57, 0x4e, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x5f, - 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x45, 0x58, 0x45, 0x43, 0x10, 0x01, 0x12, 0x17, 0x0a, - 0x13, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, 0x53, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, - 0x46, 0x4f, 0x52, 0x4b, 0x10, 0x02, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x52, 0x4f, 0x43, 0x45, 0x53, - 0x53, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x45, 0x58, 0x49, 0x54, 0x10, 0x03, 0x32, - 0xfc, 0x05, 0x0a, 0x17, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x50, 0x49, 0x12, 0x5f, 0x0a, 0x10, 0x47, - 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x23, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, - 0x31, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0f, - 0x4c, 0x6f, 0x67, 0x73, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x12, - 0x14, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x6f, 0x67, - 0x45, 0x76, 0x65, 0x6e, 0x74, 0x1a, 0x1f, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x12, 0x59, 0x0a, 0x0e, 0x57, 0x72, - 0x69, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x42, 0x61, 0x74, 0x63, 0x68, 0x12, 0x21, 0x2e, 0x72, - 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x44, - 0x61, 0x74, 0x61, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x22, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x69, - 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x42, 0x61, 0x74, 0x63, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x53, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, - 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, - 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, - 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5c, 0x0a, 0x0f, 0x55, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x22, 0x2e, - 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x23, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5b, 0x0a, 0x13, 0x49, 0x6d, 0x61, 0x67, - 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x12, - 0x19, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x61, - 0x67, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x27, 0x2e, 0x72, 0x75, 0x6e, - 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x61, 0x0a, 0x15, 0x4b, 0x75, 0x62, 0x65, 0x42, 0x65, 0x6e, - 0x63, 0x68, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x12, 0x1b, - 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x75, 0x62, 0x65, - 0x42, 0x65, 0x6e, 0x63, 0x68, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x1a, 0x29, 0x2e, 0x72, 0x75, - 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x75, 0x62, 0x65, 0x42, 0x65, 0x6e, - 0x63, 0x68, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x67, 0x65, 0x73, 0x74, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x64, 0x0a, 0x16, 0x4b, 0x75, 0x62, 0x65, - 0x4c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, 0x67, 0x65, - 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, - 0x4b, 0x75, 0x62, 0x65, 0x4c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, - 0x1a, 0x2a, 0x2e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x75, - 0x62, 0x65, 0x4c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x49, 0x6e, - 0x67, 0x65, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x2a, - 0x5a, 0x28, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x61, 0x73, - 0x74, 0x61, 0x69, 0x2f, 0x6b, 0x76, 0x69, 0x73, 0x6f, 0x72, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x2f, - 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, -}) +const file_api_v1_runtime_runtime_agent_api_proto_rawDesc = "" + + "\n" + + "&api/v1/runtime/runtime_agent_api.proto\x12\n" + + "runtime.v1\x1a#api/google/protobuf/timestamp.proto\x1a\x1bapi/v1/runtime/common.proto\"H\n" + + "\x15WriteDataBatchRequest\x12/\n" + + "\x05items\x18\n" + + " \x03(\v2\x19.runtime.v1.DataBatchItemR\x05items\"\x18\n" + + "\x16WriteDataBatchResponse\"\xd4\x02\n" + + "\rDataBatchItem\x12E\n" + + "\x0fcontainer_stats\x18\n" + + " \x01(\v2\x1a.runtime.v1.ContainerStatsH\x00R\x0econtainerStats\x126\n" + + "\n" + + "node_stats\x18\v \x01(\v2\x15.runtime.v1.NodeStatsH\x00R\tnodeStats\x12H\n" + + "\x10container_events\x18\f \x01(\v2\x1b.runtime.v1.ContainerEventsH\x00R\x0fcontainerEvents\x12/\n" + + "\anetflow\x18\r \x01(\v2\x13.runtime.v1.NetflowH\x00R\anetflow\x12A\n" + + "\fprocess_tree\x18\x0e \x01(\v2\x1c.runtime.v1.ProcessTreeEventH\x00R\vprocessTreeB\x06\n" + + "\x04data\"q\n" + + "\x17GetConfigurationRequest\x12 \n" + + "\n" + + "controller\x18\x03 \x01(\fH\x00R\n" + + "controller\x12\x16\n" + + "\x05agent\x18\x04 \x01(\fH\x00R\x05agentB\x10\n" + + "\x0ecurrent_configJ\x04\b\x01\x10\x02J\x04\b\x02\x10\x03\"M\n" + + "\x18GetConfigurationResponse\x121\n" + + "\x06config\x18\x01 \x01(\v2\x19.runtime.v1.ConfigurationR\x06config\"\x0f\n" + + "\rConfiguration\"B\n" + + "\x0fProcessIdentity\x12\x10\n" + + "\x03pid\x18\x01 \x01(\rR\x03pid\x12\x1d\n" + + "\n" + + "start_time\x18\x02 \x01(\x04R\tstartTime\"\x81\x06\n" + + "\x0fContainerEvents\x12\x1b\n" + + "\tnode_name\x18\x01 \x01(\tR\bnodeName\x12\x1c\n" + + "\tnamespace\x18\x02 \x01(\tR\tnamespace\x12#\n" + + "\rworkload_name\x18\x03 \x01(\tR\fworkloadName\x12!\n" + + "\fworkload_uid\x18\x04 \x01(\tR\vworkloadUid\x12=\n" + + "\rworkload_kind\x18\x05 \x01(\x0e2\x18.runtime.v1.WorkloadKindR\fworkloadKind\x12\x19\n" + + "\bpod_name\x18\x06 \x01(\tR\apodName\x12%\n" + + "\x0econtainer_name\x18\a \x01(\tR\rcontainerName\x12!\n" + + "\fcontainer_id\x18\b \x01(\tR\vcontainerId\x12\x17\n" + + "\apod_uid\x18\t \x01(\tR\x06podUid\x12!\n" + + "\fimage_digest\x18\x0e \x01(\tR\vimageDigest\x12R\n" + + "\robject_labels\x18\n" + + " \x03(\v2-.runtime.v1.ContainerEvents.ObjectLabelsEntryR\fobjectLabels\x12a\n" + + "\x12object_annotations\x18\v \x03(\v22.runtime.v1.ContainerEvents.ObjectAnnotationsEntryR\x11objectAnnotations\x12\x1b\n" + + "\tcgroup_id\x18\f \x01(\x04R\bcgroupId\x120\n" + + "\x05items\x18\r \x03(\v2\x1a.runtime.v1.ContainerEventR\x05items\x1a?\n" + + "\x11ObjectLabelsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\x1aD\n" + + "\x16ObjectAnnotationsEntry\x12\x10\n" + + "\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xae\x06\n" + + "\x0eContainerEvent\x124\n" + + "\n" + + "event_type\x18\x01 \x01(\x0e2\x15.runtime.v1.EventTypeR\teventType\x12\x1c\n" + + "\ttimestamp\x18\x02 \x01(\x04R\ttimestamp\x12,\n" + + "\x12process_start_time\x18\x04 \x01(\x04R\x10processStartTime\x12\x19\n" + + "\bhost_pid\x18\x05 \x01(\rR\ahostPid\x12\x10\n" + + "\x03pid\x18\x06 \x01(\rR\x03pid\x12\x12\n" + + "\x04ppid\x18\a \x01(\rR\x04ppid\x129\n" + + "\x19process_parent_start_time\x18\b \x01(\x04R\x16processParentStartTime\x12!\n" + + "\fprocess_name\x18\t \x01(\tR\vprocessName\x12&\n" + + "\x04exec\x18\x15 \x01(\v2\x10.runtime.v1.ExecH\x00R\x04exec\x12#\n" + + "\x03dns\x18\x16 \x01(\v2\x0f.runtime.v1.DNSH\x00R\x03dns\x12&\n" + + "\x04file\x18\x17 \x01(\v2\x10.runtime.v1.FileH\x00R\x04file\x12)\n" + + "\x05tuple\x18\x18 \x01(\v2\x11.runtime.v1.TupleH\x00R\x05tuple\x12:\n" + + "\tsignature\x18\x19 \x01(\v2\x1a.runtime.v1.SignatureEventH\x00R\tsignature\x12#\n" + + "\x03any\x18\x1a \x01(\v2\x0f.runtime.v1.AnyH\x00R\x03any\x12M\n" + + "\x10stdio_via_socket\x18\x1b \x01(\v2!.runtime.v1.StdioViaSocketFindingH\x00R\x0estdioViaSocket\x12'\n" + + "\x03ssh\x18\x1c \x01(\v2\x13.runtime.v1.SSHDataH\x00R\x03ssh\x12<\n" + + "\fprocess_fork\x18\x1d \x01(\v2\x17.runtime.v1.ProcessForkH\x00R\vprocessFork\x12<\n" + + "\fprocess_exit\x18\x1e \x01(\v2\x17.runtime.v1.ProcessExitH\x00R\vprocessExitB\x06\n" + + "\x04data\"\x15\n" + + "\x13WriteStreamResponse\"\x12\n" + + "\x10SendLogsResponse\"\x97\x05\n" + + "\x0eContainerStats\x12\x1c\n" + + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12\x19\n" + + "\bpod_name\x18\x02 \x01(\tR\apodName\x12%\n" + + "\x0econtainer_name\x18\x03 \x01(\tR\rcontainerName\x12#\n" + + "\rworkload_name\x18\x04 \x01(\tR\fworkloadName\x12#\n" + + "\rworkload_kind\x18\a \x01(\tR\fworkloadKind\x12\x17\n" + + "\apod_uid\x18\x05 \x01(\tR\x06podUid\x12!\n" + + "\fcontainer_id\x18\x06 \x01(\tR\vcontainerId\x12\x1b\n" + + "\tnode_name\x18\b \x01(\tR\bnodeName\x12!\n" + + "\fworkload_uid\x18\t \x01(\tR\vworkloadUid\x12!\n" + + "\fimage_digest\x18\v \x01(\tR\vimageDigest\x12\x1b\n" + + "\tcgroup_id\x18\n" + + " \x01(\x04R\bcgroupId\x121\n" + + "\tcpu_stats\x18\x15 \x01(\v2\x14.runtime.v1.CpuStatsR\bcpuStats\x12:\n" + + "\fmemory_stats\x18\x16 \x01(\v2\x17.runtime.v1.MemoryStatsR\vmemoryStats\x124\n" + + "\n" + + "pids_stats\x18\x17 \x01(\v2\x15.runtime.v1.PidsStatsR\tpidsStats\x12.\n" + + "\bio_stats\x18\x18 \x01(\v2\x13.runtime.v1.IOStatsR\aioStats\x12J\n" + + "\x12files_access_stats\x18\x19 \x01(\v2\x1c.runtime.v1.FilesAccessStatsR\x10filesAccessStats\">\n" + + "\x10FilesAccessStats\x12\x14\n" + + "\x05paths\x18\x01 \x03(\tR\x05paths\x12\x14\n" + + "\x05reads\x18\x02 \x03(\rR\x05reads\"\xc7\x01\n" + + "\tNodeStats\x12\x1b\n" + + "\tnode_name\x18\x01 \x01(\tR\bnodeName\x121\n" + + "\tcpu_stats\x18\x15 \x01(\v2\x14.runtime.v1.CpuStatsR\bcpuStats\x12:\n" + + "\fmemory_stats\x18\x16 \x01(\v2\x17.runtime.v1.MemoryStatsR\vmemoryStats\x12.\n" + + "\bio_stats\x18\x17 \x01(\v2\x13.runtime.v1.IOStatsR\aioStats\"\x99\x03\n" + + "\rImageMetadata\x12\x1d\n" + + "\n" + + "image_name\x18\x01 \x01(\tR\timageName\x12\x19\n" + + "\bimage_id\x18\x02 \x01(\tR\aimageId\x12!\n" + + "\fimage_digest\x18\x03 \x01(\tR\vimageDigest\x12!\n" + + "\findex_digest\x18\r \x01(\tR\vindexDigest\x12\"\n" + + "\farchitecture\x18\x05 \x01(\tR\farchitecture\x12\x17\n" + + "\aos_name\x18\x06 \x01(\tR\x06osName\x129\n" + + "\n" + + "created_at\x18\a \x01(\v2\x1a.google.protobuf.TimestampR\tcreatedAt\x12!\n" + + "\fresource_ids\x18\b \x03(\tR\vresourceIds\x12\x1a\n" + + "\bpackages\x18\t \x01(\fR\bpackages\x12\x1a\n" + + "\bmanifest\x18\n" + + " \x01(\fR\bmanifest\x12\x14\n" + + "\x05index\x18\v \x01(\fR\x05index\x12\x1f\n" + + "\vconfig_file\x18\f \x01(\fR\n" + + "configFile\"\x1d\n" + + "\x1bImageMetadataIngestResponse\"2\n" + + "\x13GetSyncStateRequest\x12\x1b\n" + + "\timage_ids\x18\x01 \x03(\tR\bimageIds\"K\n" + + "\x14GetSyncStateResponse\x123\n" + + "\x06images\x18\x01 \x01(\v2\x1b.runtime.v1.ImagesSyncStateR\x06images\"n\n" + + "\x0fImagesSyncState\x120\n" + + "\x14full_resync_required\x18\x01 \x01(\bR\x12fullResyncRequired\x12)\n" + + "\x06images\x18\x02 \x03(\v2\x11.runtime.v1.ImageR\x06images\"\xcf\x01\n" + + "\x05Image\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + + "\x04name\x18\x02 \x01(\tR\x04name\x12\"\n" + + "\farchitecture\x18\x03 \x01(\tR\farchitecture\x12!\n" + + "\fresource_ids\x18\x04 \x03(\tR\vresourceIds\x12<\n" + + "\vscan_status\x18\x05 \x01(\x0e2\x1b.runtime.v1.ImageScanStatusR\n" + + "scanStatus\x12\x1d\n" + + "\n" + + "scan_error\x18\x06 \x01(\tR\tscanError\"h\n" + + "\x16UpdateSyncStateRequest\x12#\n" + + "\rfull_snapshot\x18\x01 \x01(\bR\ffullSnapshot\x12)\n" + + "\x06images\x18\x02 \x03(\v2\x11.runtime.v1.ImageR\x06images\"\x19\n" + + "\x17UpdateSyncStateResponse\"{\n" + + "\x0fKubeBenchReport\x129\n" + + "\bcontrols\x18\x01 \x03(\v2\x1d.runtime.v1.KubeBenchControlsR\bcontrols\x12-\n" + + "\x04node\x18\x02 \x01(\v2\x19.runtime.v1.KubeBenchNodeR\x04node\"\x1f\n" + + "\x1dKubeBenchReportIngestResponse\"O\n" + + "\rKubeBenchNode\x12\x1b\n" + + "\tnode_name\x18\x01 \x01(\tR\bnodeName\x12!\n" + + "\fresource_uid\x18\x02 \x01(\tR\vresourceUid\"a\n" + + "\x11KubeBenchControls\x12\x18\n" + + "\aversion\x18\x01 \x01(\tR\aversion\x122\n" + + "\x06groups\x18\x02 \x03(\v2\x1a.runtime.v1.KubeBenchGroupR\x06groups\"D\n" + + "\x0eKubeBenchGroup\x122\n" + + "\x06checks\x18\x01 \x03(\v2\x1a.runtime.v1.KubeBenchCheckR\x06checks\"g\n" + + "\x0eKubeBenchCheck\x12\x0e\n" + + "\x02id\x18\x01 \x01(\tR\x02id\x12\x12\n" + + "\x04text\x18\x02 \x01(\tR\x04text\x12\x1b\n" + + "\ttest_info\x18\x03 \x03(\tR\btestInfo\x12\x14\n" + + "\x05state\x18\x04 \x01(\tR\x05state\"G\n" + + "\x10KubeLinterReport\x123\n" + + "\x06checks\x18\x01 \x03(\v2\x1b.runtime.v1.KubeLinterCheckR\x06checks\" \n" + + "\x1eKubeLinterReportIngestResponse\"d\n" + + "\x0fKubeLinterCheck\x12!\n" + + "\fresource_uid\x18\x01 \x01(\tR\vresourceUid\x12\x16\n" + + "\x06passed\x18\x02 \x01(\x04R\x06passed\x12\x16\n" + + "\x06failed\x18\x03 \x01(\x04R\x06failed\"\xc7\x01\n" + + "\aProcess\x12\x10\n" + + "\x03pid\x18\x01 \x01(\rR\x03pid\x12\x12\n" + + "\x04ppid\x18\x02 \x01(\rR\x04ppid\x12\x1d\n" + + "\n" + + "start_time\x18\x03 \x01(\x04R\tstartTime\x12*\n" + + "\x11parent_start_time\x18\x04 \x01(\x04R\x0fparentStartTime\x12\x12\n" + + "\x04args\x18\x05 \x03(\tR\x04args\x12\x1a\n" + + "\bfilepath\x18\x06 \x01(\tR\bfilepath\x12\x1b\n" + + "\texit_time\x18\a \x01(\x04R\bexitTime\"\xb1\x01\n" + + "\fProcessEvent\x12\x1c\n" + + "\ttimestamp\x18\x01 \x01(\x04R\ttimestamp\x12!\n" + + "\fcontainer_id\x18\x02 \x01(\tR\vcontainerId\x12-\n" + + "\aprocess\x18\x03 \x01(\v2\x13.runtime.v1.ProcessR\aprocess\x121\n" + + "\x06action\x18\x04 \x01(\x0e2\x19.runtime.v1.ProcessActionR\x06action\"^\n" + + "\x10ProcessTreeEvent\x12\x18\n" + + "\ainitial\x18\x01 \x01(\bR\ainitial\x120\n" + + "\x06events\x18\x02 \x03(\v2\x18.runtime.v1.ProcessEventR\x06events*\xf0\x02\n" + + "\tEventType\x12\v\n" + + "\aUNKNOWN\x10\x00\x12\x0e\n" + + "\n" + + "EVENT_EXEC\x10\x01\x12\r\n" + + "\tEVENT_DNS\x10\x02\x12\x15\n" + + "\x11EVENT_TCP_CONNECT\x10\x03\x12\x1b\n" + + "\x17EVENT_TCP_CONNECT_ERROR\x10\x04\x12\x14\n" + + "\x10EVENT_TCP_LISTEN\x10\x05\x12\x15\n" + + "\x11EVENT_FILE_CHANGE\x10\x06\x12\x15\n" + + "\x11EVENT_PROCESS_OOM\x10\a\x12\x15\n" + + "\x11EVENT_MAGIC_WRITE\x10\b\x12\x13\n" + + "\x0fEVENT_SIGNATURE\x10\t\x12\x13\n" + + "\x0fEVENT_TTY_WRITE\x10\n" + + "\x12\x1a\n" + + "\x16EVENT_STDIO_VIA_SOCKET\x10\v\x12\r\n" + + "\tEVENT_SSH\x10\f\x12\x16\n" + + "\x12EVENT_PROCESS_FORK\x10\r\x12\x16\n" + + "\x12EVENT_PROCESS_EXIT\x10\x0e\x12\x13\n" + + "\x0fEVENT_FILE_OPEN\x10\x0f\x12\x0e\n" + + "\tEVENT_ANY\x10\xe7\a*\xcf\x01\n" + + "\fWorkloadKind\x12\x19\n" + + "\x15WORKLOAD_KIND_UNKNOWN\x10\x00\x12\x1c\n" + + "\x18WORKLOAD_KIND_DEPLOYMENT\x10\x01\x12\x1d\n" + + "\x19WORKLOAD_KIND_REPLICA_SET\x10\x02\x12\x1e\n" + + "\x1aWORKLOAD_KIND_STATEFUL_SET\x10\x03\x12\x15\n" + + "\x11WORKLOAD_KIND_JOB\x10\x04\x12\x19\n" + + "\x15WORKLOAD_KIND_CRONJOB\x10\x05\x12\x15\n" + + "\x11WORKLOAD_KIND_POD\x10\x06*\x90\x01\n" + + "\x0fImageScanStatus\x12\x1d\n" + + "\x19IMAGE_SCAN_STATUS_UNKNOWN\x10\x00\x12\x1d\n" + + "\x19IMAGE_SCAN_STATUS_SCANNED\x10\x01\x12\x1d\n" + + "\x19IMAGE_SCAN_STATUS_PENDING\x10\x02\x12 \n" + + "\x1cIMAGE_SCAN_STATUS_SCAN_ERROR\x10\x03*v\n" + + "\rProcessAction\x12\x1a\n" + + "\x16PROCESS_ACTION_UNKNOWN\x10\x00\x12\x17\n" + + "\x13PROCESS_ACTION_EXEC\x10\x01\x12\x17\n" + + "\x13PROCESS_ACTION_FORK\x10\x02\x12\x17\n" + + "\x13PROCESS_ACTION_EXIT\x10\x032\xfc\x05\n" + + "\x17RuntimeSecurityAgentAPI\x12_\n" + + "\x10GetConfiguration\x12#.runtime.v1.GetConfigurationRequest\x1a$.runtime.v1.GetConfigurationResponse\"\x00\x12L\n" + + "\x0fLogsWriteStream\x12\x14.runtime.v1.LogEvent\x1a\x1f.runtime.v1.WriteStreamResponse\"\x00(\x01\x12Y\n" + + "\x0eWriteDataBatch\x12!.runtime.v1.WriteDataBatchRequest\x1a\".runtime.v1.WriteDataBatchResponse\"\x00\x12S\n" + + "\fGetSyncState\x12\x1f.runtime.v1.GetSyncStateRequest\x1a .runtime.v1.GetSyncStateResponse\"\x00\x12\\\n" + + "\x0fUpdateSyncState\x12\".runtime.v1.UpdateSyncStateRequest\x1a#.runtime.v1.UpdateSyncStateResponse\"\x00\x12[\n" + + "\x13ImageMetadataIngest\x12\x19.runtime.v1.ImageMetadata\x1a'.runtime.v1.ImageMetadataIngestResponse\"\x00\x12a\n" + + "\x15KubeBenchReportIngest\x12\x1b.runtime.v1.KubeBenchReport\x1a).runtime.v1.KubeBenchReportIngestResponse\"\x00\x12d\n" + + "\x16KubeLinterReportIngest\x12\x1c.runtime.v1.KubeLinterReport\x1a*.runtime.v1.KubeLinterReportIngestResponse\"\x00B*Z(github.com/castai/kvisord/api/runtime/v1b\x06proto3" var ( file_api_v1_runtime_runtime_agent_api_proto_rawDescOnce sync.Once diff --git a/api/v1/runtime/runtime_agent_api_grpc.pb.go b/api/v1/runtime/runtime_agent_api_grpc.pb.go index 425c5cb6..b54758de 100644 --- a/api/v1/runtime/runtime_agent_api_grpc.pb.go +++ b/api/v1/runtime/runtime_agent_api_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v5.29.3 +// - protoc-gen-go-grpc v1.6.0 +// - protoc v6.33.0 // source: api/v1/runtime/runtime_agent_api.proto package v1 @@ -160,28 +160,28 @@ type RuntimeSecurityAgentAPIServer interface { type UnimplementedRuntimeSecurityAgentAPIServer struct{} func (UnimplementedRuntimeSecurityAgentAPIServer) GetConfiguration(context.Context, *GetConfigurationRequest) (*GetConfigurationResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetConfiguration not implemented") + return nil, status.Error(codes.Unimplemented, "method GetConfiguration not implemented") } func (UnimplementedRuntimeSecurityAgentAPIServer) LogsWriteStream(grpc.ClientStreamingServer[LogEvent, WriteStreamResponse]) error { - return status.Errorf(codes.Unimplemented, "method LogsWriteStream not implemented") + return status.Error(codes.Unimplemented, "method LogsWriteStream not implemented") } func (UnimplementedRuntimeSecurityAgentAPIServer) WriteDataBatch(context.Context, *WriteDataBatchRequest) (*WriteDataBatchResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method WriteDataBatch not implemented") + return nil, status.Error(codes.Unimplemented, "method WriteDataBatch not implemented") } func (UnimplementedRuntimeSecurityAgentAPIServer) GetSyncState(context.Context, *GetSyncStateRequest) (*GetSyncStateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetSyncState not implemented") + return nil, status.Error(codes.Unimplemented, "method GetSyncState not implemented") } func (UnimplementedRuntimeSecurityAgentAPIServer) UpdateSyncState(context.Context, *UpdateSyncStateRequest) (*UpdateSyncStateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateSyncState not implemented") + return nil, status.Error(codes.Unimplemented, "method UpdateSyncState not implemented") } func (UnimplementedRuntimeSecurityAgentAPIServer) ImageMetadataIngest(context.Context, *ImageMetadata) (*ImageMetadataIngestResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ImageMetadataIngest not implemented") + return nil, status.Error(codes.Unimplemented, "method ImageMetadataIngest not implemented") } func (UnimplementedRuntimeSecurityAgentAPIServer) KubeBenchReportIngest(context.Context, *KubeBenchReport) (*KubeBenchReportIngestResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method KubeBenchReportIngest not implemented") + return nil, status.Error(codes.Unimplemented, "method KubeBenchReportIngest not implemented") } func (UnimplementedRuntimeSecurityAgentAPIServer) KubeLinterReportIngest(context.Context, *KubeLinterReport) (*KubeLinterReportIngestResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method KubeLinterReportIngest not implemented") + return nil, status.Error(codes.Unimplemented, "method KubeLinterReportIngest not implemented") } func (UnimplementedRuntimeSecurityAgentAPIServer) testEmbeddedByValue() {} @@ -193,7 +193,7 @@ type UnsafeRuntimeSecurityAgentAPIServer interface { } func RegisterRuntimeSecurityAgentAPIServer(s grpc.ServiceRegistrar, srv RuntimeSecurityAgentAPIServer) { - // If the following call pancis, it indicates UnimplementedRuntimeSecurityAgentAPIServer was + // If the following call panics, it indicates UnimplementedRuntimeSecurityAgentAPIServer was // embedded by pointer and is nil. This will cause panics if an // unimplemented method is ever invoked, so we test this at initialization // time to prevent it from happening at runtime later due to I/O. diff --git a/cmd/agent/daemon/app/app.go b/cmd/agent/daemon/app/app.go index 6b8e24df..a6fc39da 100644 --- a/cmd/agent/daemon/app/app.go +++ b/cmd/agent/daemon/app/app.go @@ -255,6 +255,7 @@ func (a *App) Run(ctx context.Context) error { var blockDeviceMetricsWriter pipeline.BlockDeviceMetricsWriter var filesystemMetricsWriter pipeline.FilesystemMetricsWriter var nodeStatsSummaryWriter pipeline.NodeStatsSummaryWriter + var podVolumeMetricsWriter pipeline.K8sPodVolumeMetricsWriter var storageInfoProvider pipeline.StorageInfoProvider if cfg.Stats.StorageEnabled { metricsClient, err := createMetricsClient(cfg) @@ -268,7 +269,7 @@ func (a *App) Run(ctx context.Context) error { } }() - blockDeviceMetricsWriter, filesystemMetricsWriter, nodeStatsSummaryWriter, err = setupStorageMetrics(metricsClient) + blockDeviceMetricsWriter, filesystemMetricsWriter, nodeStatsSummaryWriter, podVolumeMetricsWriter, err = setupStorageMetrics(metricsClient) if err != nil { return fmt.Errorf("failed to setup storage metrics: %w", err) } @@ -301,6 +302,7 @@ func (a *App) Run(ctx context.Context) error { filesystemMetricsWriter, storageInfoProvider, nodeStatsSummaryWriter, + podVolumeMetricsWriter, ) for _, namespace := range cfg.MutedNamespaces { @@ -569,23 +571,28 @@ func waitWithTimeout(errg *errgroup.Group, timeout time.Duration) error { } } -func setupStorageMetrics(metricsClient custommetrics.MetricClient) (pipeline.BlockDeviceMetricsWriter, pipeline.FilesystemMetricsWriter, pipeline.NodeStatsSummaryWriter, error) { +func setupStorageMetrics(metricsClient custommetrics.MetricClient) (pipeline.BlockDeviceMetricsWriter, pipeline.FilesystemMetricsWriter, pipeline.NodeStatsSummaryWriter, pipeline.K8sPodVolumeMetricsWriter, error) { blockDeviceMetrics, err := pipeline.NewBlockDeviceMetricsWriter(metricsClient) if err != nil { - return nil, nil, nil, fmt.Errorf("failed to create block device metrics writer: %w", err) + return nil, nil, nil, nil, fmt.Errorf("failed to create block device metrics writer: %w", err) } filesystemMetrics, err := pipeline.NewFilesystemMetricsWriter(metricsClient) if err != nil { - return nil, nil, nil, fmt.Errorf("failed to create filesystem metrics writer: %w", err) + return nil, nil, nil, nil, fmt.Errorf("failed to create filesystem metrics writer: %w", err) } nodeStatsSummaryWriter, err := pipeline.NewNodeStatsSummaryWriter(metricsClient) if err != nil { - return nil, nil, nil, fmt.Errorf("failed to create node storage stats summary writer: %w", err) + return nil, nil, nil, nil, fmt.Errorf("failed to create node storage stats summary writer: %w", err) } - return blockDeviceMetrics, filesystemMetrics, nodeStatsSummaryWriter, nil + podVolumeMetricsWriter, err := pipeline.NewK8sPodVolumeMetricsWriter(metricsClient) + if err != nil { + return nil, nil, nil, nil, fmt.Errorf("failed to create pod volume metrics writer: %w", err) + } + + return blockDeviceMetrics, filesystemMetrics, nodeStatsSummaryWriter, podVolumeMetricsWriter, nil } // resolveMetricsAddr transforms kvisor.* addresses to telemetry.* addresses diff --git a/cmd/agent/daemon/pipeline/controller.go b/cmd/agent/daemon/pipeline/controller.go index 857edcc3..b50b124a 100644 --- a/cmd/agent/daemon/pipeline/controller.go +++ b/cmd/agent/daemon/pipeline/controller.go @@ -96,6 +96,10 @@ type NodeStatsSummaryWriter interface { Write(metrics ...NodeStatsSummaryMetric) error } +type K8sPodVolumeMetricsWriter interface { + Write(metrics ...K8sPodVolumeMetric) error +} + func NewBlockDeviceMetricsWriter(metricsClient custommetrics.MetricClient) (BlockDeviceMetricsWriter, error) { return custommetrics.NewMetric[BlockDeviceMetric]( metricsClient, @@ -120,6 +124,14 @@ func NewNodeStatsSummaryWriter(metricsClient custommetrics.MetricClient) (NodeSt ) } +func NewK8sPodVolumeMetricsWriter(metricsClient custommetrics.MetricClient) (K8sPodVolumeMetricsWriter, error) { + return custommetrics.NewMetric[K8sPodVolumeMetric]( + metricsClient, + custommetrics.WithCollectionName[K8sPodVolumeMetric]("k8s_pod_volume"), + custommetrics.WithSkipTimestamp[K8sPodVolumeMetric](), + ) +} + func NewController( log *logging.Logger, cfg Config, @@ -136,6 +148,7 @@ func NewController( filesystemMetricsWriter FilesystemMetricsWriter, storageInfoProvider StorageInfoProvider, nodeStatsSummaryWriter NodeStatsSummaryWriter, + podVolumeMetricsWriter K8sPodVolumeMetricsWriter, ) *Controller { podCache, err := freelru.NewSynced[string, *kubepb.Pod](256, func(k string) uint32 { return uint32(xxhash.Sum64String(k)) // nolint:gosec @@ -167,6 +180,7 @@ func NewController( filesystemMetricsWriter: filesystemMetricsWriter, storageInfoProvider: storageInfoProvider, nodeStatsSummaryWriter: nodeStatsSummaryWriter, + podVolumeMetricsWriter: podVolumeMetricsWriter, } } @@ -200,6 +214,7 @@ type Controller struct { filesystemMetricsWriter FilesystemMetricsWriter storageInfoProvider StorageInfoProvider nodeStatsSummaryWriter NodeStatsSummaryWriter + podVolumeMetricsWriter K8sPodVolumeMetricsWriter } func (c *Controller) Run(ctx context.Context) error { diff --git a/cmd/agent/daemon/pipeline/controller_test.go b/cmd/agent/daemon/pipeline/controller_test.go index 5c5f11df..02f11c71 100644 --- a/cmd/agent/daemon/pipeline/controller_test.go +++ b/cmd/agent/daemon/pipeline/controller_test.go @@ -808,7 +808,7 @@ func TestController(t *testing.T) { blockWriter := ctrl.blockDeviceMetricsWriter.(*mockBlockDeviceMetricsWriter) fsWriter := ctrl.filesystemMetricsWriter.(*mockFilesystemMetricsWriter) - ctrl.collectStorageMetrics() + ctrl.collectStorageMetrics(context.Background()) r.Len(blockWriter.metrics, 1) r.Len(fsWriter.metrics, 1) @@ -929,6 +929,7 @@ func newTestController(opts ...any) *Controller { filesystemMetrics, &mockStorageInfoProvider{}, nodeStatsSummaryWriter, + nil, // podVolumeMetricsWriter ) return ctrl } @@ -1237,6 +1238,12 @@ func (m *mockKubeClient) GetNodeStatsSummary(ctx context.Context, req *kubepb.Ge }, nil } +func (m *mockKubeClient) GetPodVolumes(ctx context.Context, req *kubepb.GetPodVolumesRequest, opts ...grpc.CallOption) (*kubepb.GetPodVolumesResponse, error) { + return &kubepb.GetPodVolumesResponse{ + Volumes: []*kubepb.PodVolumeInfo{}, + }, nil +} + type mockProcessTreeController struct { } @@ -1360,3 +1367,7 @@ func (m *mockStorageInfoProvider) CollectNodeStatsSummary(ctx context.Context) ( Timestamp: time.Now(), }, nil } + +func (m *mockStorageInfoProvider) CollectPodVolumeMetrics(ctx context.Context) ([]K8sPodVolumeMetric, error) { + return []K8sPodVolumeMetric{}, nil +} diff --git a/cmd/agent/daemon/pipeline/storage_info_provider.go b/cmd/agent/daemon/pipeline/storage_info_provider.go index 8c3d1b93..4296b9e6 100644 --- a/cmd/agent/daemon/pipeline/storage_info_provider.go +++ b/cmd/agent/daemon/pipeline/storage_info_provider.go @@ -76,6 +76,14 @@ type FilesystemMetric struct { TotalInodes *int64 `avro:"total_inodes"` UsedInodes *int64 `avro:"used_inodes"` Timestamp time.Time `avro:"ts"` + + // Pod/PVC metadata (nil for node-level filesystems) + Namespace *string `avro:"namespace"` + PodName *string `avro:"pod_name"` + PodUID *string `avro:"pod_uid"` + PVCName *string `avro:"pvc_name"` + PVName *string `avro:"pv_name"` + StorageClass *string `avro:"storage_class"` } // NodeStatsSummaryMetric represents node-level filesystem statistics from kubelet @@ -89,6 +97,26 @@ type NodeStatsSummaryMetric struct { Timestamp time.Time `avro:"ts"` } +// K8sPodVolumeMetric represents pod volume information from Kubernetes +type K8sPodVolumeMetric struct { + NodeName string `avro:"node_name"` + NodeTemplate *string `avro:"node_template"` + Namespace string `avro:"namespace"` + PodName string `avro:"pod_name"` + PodUID string `avro:"pod_uid"` + ControllerKind string `avro:"controller_kind"` + ControllerName string `avro:"controller_name"` + ContainerName string `avro:"container_name"` + VolumeName string `avro:"volume_name"` + MountPath string `avro:"mount_path"` + PVCName *string `avro:"pvc_name"` + RequestedSizeBytes *int64 `avro:"requested_size_bytes"` + PVName *string `avro:"pv_name"` + StorageClass *string `avro:"storage_class"` + CSIDriver *string `avro:"csi_driver"` + Timestamp time.Time `avro:"ts"` +} + type storageMetricsState struct { blockDevices map[string]*BlockDeviceMetric filesystems map[string]*FilesystemMetric @@ -98,6 +126,7 @@ type StorageInfoProvider interface { BuildFilesystemMetrics(timestamp time.Time) ([]FilesystemMetric, error) BuildBlockDeviceMetrics(timestamp time.Time) ([]BlockDeviceMetric, error) CollectNodeStatsSummary(ctx context.Context) (*NodeStatsSummaryMetric, error) + CollectPodVolumeMetrics(ctx context.Context) ([]K8sPodVolumeMetric, error) } type SysfsStorageInfoProvider struct { @@ -286,6 +315,65 @@ func (s *SysfsStorageInfoProvider) CollectNodeStatsSummary(ctx context.Context) return metric, nil } +// CollectPodVolumeMetrics retrieves pod volume information from the controller +func (s *SysfsStorageInfoProvider) CollectPodVolumeMetrics(ctx context.Context) ([]K8sPodVolumeMetric, error) { + if s.kubeClient == nil { + return nil, fmt.Errorf("kube client is not initialized") + } + + resp, err := s.kubeClient.GetPodVolumes(ctx, &kubepb.GetPodVolumesRequest{ + NodeName: s.nodeName, + }, grpc.UseCompressor(gzip.Name)) + if err != nil { + return nil, fmt.Errorf("failed to get pod volumes for %s: %w", s.nodeName, err) + } + + nodeTemplate, err := s.getNodeTemplate() + if err != nil { + s.log.Warnf("failed to get node template: %v", err) + nodeTemplate = nil + } + + timestamp := time.Now() + metrics := make([]K8sPodVolumeMetric, 0, len(resp.Volumes)) + + for _, v := range resp.Volumes { + metric := K8sPodVolumeMetric{ + NodeName: s.nodeName, + NodeTemplate: nodeTemplate, + Namespace: v.Namespace, + PodName: v.PodName, + PodUID: v.PodUid, + ControllerKind: v.ControllerKind, + ControllerName: v.ControllerName, + ContainerName: v.ContainerName, + VolumeName: v.VolumeName, + MountPath: v.MountPath, + Timestamp: timestamp, + } + + if v.PvcName != "" { + metric.PVCName = &v.PvcName + } + if v.RequestedSizeBytes > 0 { + metric.RequestedSizeBytes = &v.RequestedSizeBytes + } + if v.PvName != "" { + metric.PVName = &v.PvName + } + if v.StorageClass != "" { + metric.StorageClass = &v.StorageClass + } + if v.CsiDriver != "" { + metric.CSIDriver = &v.CsiDriver + } + + metrics = append(metrics, metric) + } + + return metrics, nil +} + func (s *SysfsStorageInfoProvider) BuildFilesystemMetrics(timestamp time.Time) ([]FilesystemMetric, error) { // Read mount information from /proc/1/mountinfo mounts, err := readMountInfo("/proc/1/mountinfo") @@ -293,9 +381,12 @@ func (s *SysfsStorageInfoProvider) BuildFilesystemMetrics(timestamp time.Time) ( return nil, fmt.Errorf("failed to read mountinfo: %w", err) } + // Build pod volume lookup map for enrichment + podVolumeMap := s.buildPodVolumeLookupMap() + filesystemMetrics := make([]FilesystemMetric, 0, len(mounts)) for _, mount := range mounts { - metric, err := s.buildFilesystemMetric(mount, timestamp) + metric, err := s.buildFilesystemMetric(mount, timestamp, podVolumeMap) if err != nil { s.log.Warnf("skipping filesystem metric for %s: %v", mount.MountPoint, err) continue @@ -306,7 +397,38 @@ func (s *SysfsStorageInfoProvider) BuildFilesystemMetrics(timestamp time.Time) ( return filesystemMetrics, nil } -func (s *SysfsStorageInfoProvider) buildFilesystemMetric(mount mountInfo, timestamp time.Time) (FilesystemMetric, error) { +// podVolumeKey generates a lookup key from pod UID and volume name +func podVolumeKey(podUID, volumeName string) string { + return podUID + "/" + volumeName +} + +// buildPodVolumeLookupMap fetches pod volumes from controller and builds a lookup map +func (s *SysfsStorageInfoProvider) buildPodVolumeLookupMap() map[string]*kubepb.PodVolumeInfo { + if s.kubeClient == nil { + return nil + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + resp, err := s.kubeClient.GetPodVolumes(ctx, &kubepb.GetPodVolumesRequest{ + NodeName: s.nodeName, + }, grpc.UseCompressor(gzip.Name)) + if err != nil { + s.log.Warnf("failed to get pod volumes for enrichment: %v", err) + return nil + } + + volumeMap := make(map[string]*kubepb.PodVolumeInfo, len(resp.Volumes)) + for _, v := range resp.Volumes { + key := podVolumeKey(v.PodUid, v.VolumeName) + volumeMap[key] = v + } + + return volumeMap +} + +func (s *SysfsStorageInfoProvider) buildFilesystemMetric(mount mountInfo, timestamp time.Time, podVolumeMap map[string]*kubepb.PodVolumeInfo) (FilesystemMetric, error) { // Construct the path from host's root to access the filesystem fileSystemPath := filepath.Join(s.hostRootPath, mount.MountPoint) @@ -324,7 +446,7 @@ func (s *SysfsStorageInfoProvider) buildFilesystemMetric(mount mountInfo, timest // Check whether the filesystem is holding kubelet and/or castai-storage directories labels := buildFilesystemLabels(devID, s.wellKnownPathDeviceID) - return FilesystemMetric{ + metric := FilesystemMetric{ Devices: s.getBackingDevices(mount.Device), NodeName: s.nodeName, NodeTemplate: nodeTemplate, @@ -337,7 +459,28 @@ func (s *SysfsStorageInfoProvider) buildFilesystemMetric(mount mountInfo, timest UsedInodes: &usedInodes, Labels: labels, Timestamp: timestamp, - }, nil + } + + // Check if this is a pod volume mount and enrich with pod metadata + if volInfo := ParseVolumeMountPath(mount.MountPoint); volInfo != nil && podVolumeMap != nil { + key := podVolumeKey(volInfo.PodUID, volInfo.VolumeName) + if pv, ok := podVolumeMap[key]; ok { + metric.Namespace = &pv.Namespace + metric.PodName = &pv.PodName + metric.PodUID = &pv.PodUid + if pv.PvcName != "" { + metric.PVCName = &pv.PvcName + } + if pv.PvName != "" { + metric.PVName = &pv.PvName + } + if pv.StorageClass != "" { + metric.StorageClass = &pv.StorageClass + } + } + } + + return metric, nil } // getBackingDevices resolves a device to its backing device. @@ -799,7 +942,7 @@ func getFilesystemStats(mountPoint string) (sizeBytes, usedBytes int64, totalIno var mountPointStat unix.Stat_t err = unix.Stat(mountPoint, &mountPointStat) if err == nil { - devID = mountPointStat.Dev + devID = uint64(mountPointStat.Dev) } // statfs.Bsize is uint32 on Darwin, int64 on Linux - convert safely to uint64 @@ -837,7 +980,7 @@ func getDeviceIDForPath(path string) (uint64, error) { return 0, err } - return stat.Dev, nil + return uint64(stat.Dev), nil } func buildFilesystemLabels(fsMountPointDeviceID uint64, wellKnownPathsDeviceID map[string]uint64) map[string]string { diff --git a/cmd/agent/daemon/pipeline/storage_pipeline.go b/cmd/agent/daemon/pipeline/storage_pipeline.go index 1ea9b972..e018f9d3 100644 --- a/cmd/agent/daemon/pipeline/storage_pipeline.go +++ b/cmd/agent/daemon/pipeline/storage_pipeline.go @@ -19,14 +19,14 @@ func (c *Controller) runStoragePipeline(ctx context.Context) error { return ctx.Err() case <-ticker.C: start := time.Now() - c.collectStorageMetrics() + c.collectStorageMetrics(ctx) c.collectNodeStatsSummary(ctx) c.log.Debugf("storage stats exported, duration=%v", time.Since(start)) } } } -func (c *Controller) collectStorageMetrics() { +func (c *Controller) collectStorageMetrics(ctx context.Context) { start := time.Now() c.log.Debug("starting storage stats collection") @@ -39,6 +39,10 @@ func (c *Controller) collectStorageMetrics() { c.log.Errorf("failed to collect filesystem metrics: %v", err) } + if err := c.processPodVolumeMetrics(ctx); err != nil { + c.log.Errorf("failed to collect pod volume metrics: %v", err) + } + c.log.Debugf("storage stats collection completed in %v", time.Since(start)) } @@ -80,6 +84,32 @@ func (c *Controller) processFilesystemMetrics(timestamp time.Time) error { return nil } +func (c *Controller) processPodVolumeMetrics(ctx context.Context) error { + if c.podVolumeMetricsWriter == nil { + return nil // Pod volume metrics writer not configured, skip + } + + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + + metrics, err := c.storageInfoProvider.CollectPodVolumeMetrics(ctx) + if err != nil { + return fmt.Errorf("failed to collect pod volume metrics: %w", err) + } + + if len(metrics) == 0 { + return nil + } + + c.log.Infof("collected %d pod volume metrics", len(metrics)) + + if err := c.podVolumeMetricsWriter.Write(metrics...); err != nil { + return fmt.Errorf("failed to write pod volume metrics: %w", err) + } + + return nil +} + func (c *Controller) collectNodeStatsSummary(ctx context.Context) { if c.nodeStatsSummaryWriter == nil || c.storageInfoProvider == nil { return diff --git a/cmd/agent/daemon/pipeline/volume_mapper.go b/cmd/agent/daemon/pipeline/volume_mapper.go new file mode 100644 index 00000000..0285a106 --- /dev/null +++ b/cmd/agent/daemon/pipeline/volume_mapper.go @@ -0,0 +1,30 @@ +package pipeline + +import "regexp" + +// podVolumeMountRegex matches kubelet pod volume mount paths. +// Format: /var/lib/kubelet/pods//volumes// +var podVolumeMountRegex = regexp.MustCompile( + `/var/lib/kubelet/pods/([a-f0-9-]+)/volumes/([^/]+)/([^/]+)`, +) + +// VolumePathInfo contains extracted information from a volume mount path. +type VolumePathInfo struct { + PodUID string + VolumePlugin string + VolumeName string +} + +// ParseVolumeMountPath extracts pod and volume info from a kubelet mount path. +// Returns nil if the path is not a pod volume mount. +func ParseVolumeMountPath(mountPath string) *VolumePathInfo { + matches := podVolumeMountRegex.FindStringSubmatch(mountPath) + if len(matches) != 4 { + return nil + } + return &VolumePathInfo{ + PodUID: matches[1], + VolumePlugin: matches[2], + VolumeName: matches[3], + } +} diff --git a/cmd/agent/daemon/pipeline/volume_mapper_test.go b/cmd/agent/daemon/pipeline/volume_mapper_test.go new file mode 100644 index 00000000..e06ad868 --- /dev/null +++ b/cmd/agent/daemon/pipeline/volume_mapper_test.go @@ -0,0 +1,84 @@ +package pipeline + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseVolumeMountPath(t *testing.T) { + tests := []struct { + name string + path string + expected *VolumePathInfo + }{ + { + name: "CSI volume mount", + path: "/var/lib/kubelet/pods/3e61c214-bc3e-d9ff-81e2-1474dd6cba17/volumes/kubernetes.io~csi/pvc-abc123", + expected: &VolumePathInfo{ + PodUID: "3e61c214-bc3e-d9ff-81e2-1474dd6cba17", + VolumePlugin: "kubernetes.io~csi", + VolumeName: "pvc-abc123", + }, + }, + { + name: "EmptyDir volume mount", + path: "/var/lib/kubelet/pods/abc-def-123/volumes/kubernetes.io~empty-dir/cache", + expected: &VolumePathInfo{ + PodUID: "abc-def-123", + VolumePlugin: "kubernetes.io~empty-dir", + VolumeName: "cache", + }, + }, + { + name: "HostPath volume mount", + path: "/var/lib/kubelet/pods/12345678-1234-1234-1234-123456789012/volumes/kubernetes.io~host-path/host-data", + expected: &VolumePathInfo{ + PodUID: "12345678-1234-1234-1234-123456789012", + VolumePlugin: "kubernetes.io~host-path", + VolumeName: "host-data", + }, + }, + { + name: "ConfigMap volume mount", + path: "/var/lib/kubelet/pods/aabbccdd-1122-3344-5566-778899aabbcc/volumes/kubernetes.io~configmap/config", + expected: &VolumePathInfo{ + PodUID: "aabbccdd-1122-3344-5566-778899aabbcc", + VolumePlugin: "kubernetes.io~configmap", + VolumeName: "config", + }, + }, + { + name: "Root filesystem - not a pod volume", + path: "/", + expected: nil, + }, + { + name: "Kubelet directory - not a volume mount", + path: "/var/lib/kubelet", + expected: nil, + }, + { + name: "Containerd directory - not a pod volume", + path: "/var/lib/containerd", + expected: nil, + }, + { + name: "Regular mount point - not a pod volume", + path: "/mnt/data", + expected: nil, + }, + { + name: "Pod directory without volumes - not a volume mount", + path: "/var/lib/kubelet/pods/abc-123", + expected: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := ParseVolumeMountPath(tt.path) + assert.Equal(t, tt.expected, result) + }) + } +} diff --git a/cmd/controller/kube/client.go b/cmd/controller/kube/client.go index 8a408a9b..4a183b5b 100644 --- a/cmd/controller/kube/client.go +++ b/cmd/controller/kube/client.go @@ -99,6 +99,8 @@ func (c *Client) RegisterHandlers(factory informers.SharedInformerFactory) { factory.Core().V1().Services().Informer(), factory.Core().V1().Endpoints().Informer(), factory.Core().V1().Namespaces().Informer(), + factory.Core().V1().PersistentVolumeClaims().Informer(), + factory.Core().V1().PersistentVolumes().Informer(), factory.Apps().V1().Deployments().Informer(), factory.Apps().V1().StatefulSets().Informer(), factory.Apps().V1().DaemonSets().Informer(), @@ -177,6 +179,10 @@ func (c *Client) eventHandler() cache.ResourceEventHandler { c.index.addFromEndpoints(t) case *corev1.Node: c.index.addFromNode(t) + case *corev1.PersistentVolumeClaim: + c.index.addFromPVC(t) + case *corev1.PersistentVolume: + c.index.addFromPV(t) case *batchv1.Job: c.index.jobs[t.UID] = t.ObjectMeta case *appsv1.ReplicaSet: @@ -202,6 +208,10 @@ func (c *Client) eventHandler() cache.ResourceEventHandler { c.index.addFromEndpoints(t) case *corev1.Node: c.index.addFromNode(t) + case *corev1.PersistentVolumeClaim: + c.index.addFromPVC(t) + case *corev1.PersistentVolume: + c.index.addFromPV(t) case *batchv1.Job: c.index.jobs[t.UID] = t.ObjectMeta case *appsv1.ReplicaSet: @@ -227,6 +237,10 @@ func (c *Client) eventHandler() cache.ResourceEventHandler { c.index.deleteFromEndpoints(t) case *corev1.Node: c.index.deleteByNode(t) + case *corev1.PersistentVolumeClaim: + c.index.deleteFromPVC(t) + case *corev1.PersistentVolume: + c.index.deleteFromPV(t) case *batchv1.Job: delete(c.index.jobs, t.UID) case *appsv1.ReplicaSet: @@ -286,6 +300,27 @@ func (c *Client) GetNodeInfo(name string) (*corev1.Node, bool) { return node, true } +func (c *Client) GetPVCByName(namespace, name string) (*corev1.PersistentVolumeClaim, bool) { + c.mu.RLock() + defer c.mu.RUnlock() + + return c.index.GetPVCByName(namespace, name) +} + +func (c *Client) GetPVByName(name string) (*corev1.PersistentVolume, bool) { + c.mu.RLock() + defer c.mu.RUnlock() + + return c.index.GetPVByName(name) +} + +func (c *Client) GetPodsOnNode(nodeName string) []*PodInfo { + c.mu.RLock() + defer c.mu.RUnlock() + + return c.index.GetPodsOnNode(nodeName) +} + func (c *Client) GetOwnerUID(obj Object) string { c.mu.RLock() defer c.mu.RUnlock() diff --git a/cmd/controller/kube/index.go b/cmd/controller/kube/index.go index 5a67752d..d252b281 100644 --- a/cmd/controller/kube/index.go +++ b/cmd/controller/kube/index.go @@ -19,6 +19,8 @@ func NewIndex() *Index { deployments: make(map[types.UID]*appsv1.Deployment), pods: make(map[types.UID]*PodInfo), nodesByName: make(map[string]*corev1.Node), + pvcs: make(map[string]*corev1.PersistentVolumeClaim), + pvs: make(map[string]*corev1.PersistentVolume), } } @@ -29,6 +31,8 @@ type Index struct { deployments map[types.UID]*appsv1.Deployment pods map[types.UID]*PodInfo nodesByName map[string]*corev1.Node + pvcs map[string]*corev1.PersistentVolumeClaim // key: namespace/name + pvs map[string]*corev1.PersistentVolume // key: PV name } func (i *Index) addFromPod(pod *corev1.Pod) { @@ -297,3 +301,43 @@ type PodInfo struct { Owner metav1.OwnerReference Zone string } + +func pvcKey(namespace, name string) string { + return namespace + "/" + name +} + +func (i *Index) addFromPVC(pvc *corev1.PersistentVolumeClaim) { + i.pvcs[pvcKey(pvc.Namespace, pvc.Name)] = pvc +} + +func (i *Index) deleteFromPVC(pvc *corev1.PersistentVolumeClaim) { + delete(i.pvcs, pvcKey(pvc.Namespace, pvc.Name)) +} + +func (i *Index) GetPVCByName(namespace, name string) (*corev1.PersistentVolumeClaim, bool) { + pvc, found := i.pvcs[pvcKey(namespace, name)] + return pvc, found +} + +func (i *Index) addFromPV(pv *corev1.PersistentVolume) { + i.pvs[pv.Name] = pv +} + +func (i *Index) deleteFromPV(pv *corev1.PersistentVolume) { + delete(i.pvs, pv.Name) +} + +func (i *Index) GetPVByName(name string) (*corev1.PersistentVolume, bool) { + pv, found := i.pvs[name] + return pv, found +} + +func (i *Index) GetPodsOnNode(nodeName string) []*PodInfo { + var pods []*PodInfo + for _, podInfo := range i.pods { + if podInfo.Pod.Spec.NodeName == nodeName { + pods = append(pods, podInfo) + } + } + return pods +} diff --git a/cmd/controller/kube/server.go b/cmd/controller/kube/server.go index 496a7b79..61eb60e9 100644 --- a/cmd/controller/kube/server.go +++ b/cmd/controller/kube/server.go @@ -6,6 +6,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" + corev1 "k8s.io/api/core/v1" _ "google.golang.org/grpc/encoding/gzip" @@ -158,6 +159,88 @@ func (s *Server) GetNodeStatsSummary(ctx context.Context, req *kubepb.GetNodeSta return resp, nil } +func (s *Server) GetPodVolumes(ctx context.Context, req *kubepb.GetPodVolumesRequest) (*kubepb.GetPodVolumesResponse, error) { + if req.NodeName == "" { + return nil, status.Errorf(codes.InvalidArgument, "node_name is required") + } + + pods := s.client.GetPodsOnNode(req.NodeName) + var volumes []*kubepb.PodVolumeInfo + + for _, podInfo := range pods { + pod := podInfo.Pod + if pod == nil { + continue + } + + // Build a map of volume name -> volume for quick lookup + volumeMap := make(map[string]corev1.Volume) + for _, vol := range pod.Spec.Volumes { + volumeMap[vol.Name] = vol + } + + // Iterate through containers and their volume mounts + for _, container := range pod.Spec.Containers { + for _, mount := range container.VolumeMounts { + vol, exists := volumeMap[mount.Name] + if !exists { + continue + } + + volInfo := &kubepb.PodVolumeInfo{ + Namespace: pod.Namespace, + PodName: pod.Name, + PodUid: string(pod.UID), + ControllerKind: podInfo.Owner.Kind, + ControllerName: podInfo.Owner.Name, + ContainerName: container.Name, + VolumeName: vol.Name, + MountPath: mount.MountPath, + } + + // If this is a PVC-backed volume, enrich with PVC/PV details + if vol.PersistentVolumeClaim != nil { + pvcName := vol.PersistentVolumeClaim.ClaimName + volInfo.PvcName = pvcName + + if pvc, found := s.client.GetPVCByName(pod.Namespace, pvcName); found { + volInfo.PvcUid = string(pvc.UID) + + // Get requested storage size + if req, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; ok { + volInfo.RequestedSizeBytes = req.Value() + } + + // Get storage class + if pvc.Spec.StorageClassName != nil { + volInfo.StorageClass = *pvc.Spec.StorageClassName + } + + // Get PV details if bound + if pvc.Spec.VolumeName != "" { + volInfo.PvName = pvc.Spec.VolumeName + + if pv, found := s.client.GetPVByName(pvc.Spec.VolumeName); found { + // Get CSI details if available + if pv.Spec.CSI != nil { + volInfo.CsiDriver = pv.Spec.CSI.Driver + volInfo.CsiVolumeHandle = pv.Spec.CSI.VolumeHandle + } + } + } + } + } + + volumes = append(volumes, volInfo) + } + } + } + + return &kubepb.GetPodVolumesResponse{ + Volumes: volumes, + }, nil +} + func toProtoWorkloadKind(kind string) kubepb.WorkloadKind { switch kind { case "Deployment": From 9e0b52ee750ba6208ffdf8fa54ce4f399b7edd60 Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Fri, 16 Jan 2026 23:16:25 +0200 Subject: [PATCH 02/22] Take into account block volumes as well --- api/v1/kube/kube_api.pb.go | 24 ++++- api/v1/kube/kube_api.proto | 2 + .../daemon/pipeline/storage_info_provider.go | 6 ++ cmd/controller/kube/server.go | 95 +++++++++++++------ 4 files changed, 95 insertions(+), 32 deletions(-) diff --git a/api/v1/kube/kube_api.pb.go b/api/v1/kube/kube_api.pb.go index 4485a5ef..52963626 100644 --- a/api/v1/kube/kube_api.pb.go +++ b/api/v1/kube/kube_api.pb.go @@ -1426,6 +1426,8 @@ type PodVolumeInfo struct { StorageClass string `protobuf:"bytes,13,opt,name=storage_class,json=storageClass,proto3" json:"storage_class,omitempty"` CsiDriver string `protobuf:"bytes,14,opt,name=csi_driver,json=csiDriver,proto3" json:"csi_driver,omitempty"` CsiVolumeHandle string `protobuf:"bytes,15,opt,name=csi_volume_handle,json=csiVolumeHandle,proto3" json:"csi_volume_handle,omitempty"` + VolumeMode string `protobuf:"bytes,16,opt,name=volume_mode,json=volumeMode,proto3" json:"volume_mode,omitempty"` // "Filesystem" or "Block" + DevicePath string `protobuf:"bytes,17,opt,name=device_path,json=devicePath,proto3" json:"device_path,omitempty"` // For block volumes: container's volumeDevices[].devicePath unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1565,6 +1567,20 @@ func (x *PodVolumeInfo) GetCsiVolumeHandle() string { return "" } +func (x *PodVolumeInfo) GetVolumeMode() string { + if x != nil { + return x.VolumeMode + } + return "" +} + +func (x *PodVolumeInfo) GetDevicePath() string { + if x != nil { + return x.DevicePath + } + return "" +} + var File_api_v1_kube_kube_api_proto protoreflect.FileDescriptor const file_api_v1_kube_kube_api_proto_rawDesc = "" + @@ -1662,7 +1678,7 @@ const file_api_v1_kube_kube_api_proto_rawDesc = "" + "\x14GetPodVolumesRequest\x12\x1b\n" + "\tnode_name\x18\x01 \x01(\tR\bnodeName\"I\n" + "\x15GetPodVolumesResponse\x120\n" + - "\avolumes\x18\x01 \x03(\v2\x16.kube.v1.PodVolumeInfoR\avolumes\"\x89\x04\n" + + "\avolumes\x18\x01 \x03(\v2\x16.kube.v1.PodVolumeInfoR\avolumes\"\xcb\x04\n" + "\rPodVolumeInfo\x12\x1c\n" + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12\x19\n" + "\bpod_name\x18\x02 \x01(\tR\apodName\x12\x17\n" + @@ -1682,7 +1698,11 @@ const file_api_v1_kube_kube_api_proto_rawDesc = "" + "\rstorage_class\x18\r \x01(\tR\fstorageClass\x12\x1d\n" + "\n" + "csi_driver\x18\x0e \x01(\tR\tcsiDriver\x12*\n" + - "\x11csi_volume_handle\x18\x0f \x01(\tR\x0fcsiVolumeHandle*\xed\x01\n" + + "\x11csi_volume_handle\x18\x0f \x01(\tR\x0fcsiVolumeHandle\x12\x1f\n" + + "\vvolume_mode\x18\x10 \x01(\tR\n" + + "volumeMode\x12\x1f\n" + + "\vdevice_path\x18\x11 \x01(\tR\n" + + "devicePath*\xed\x01\n" + "\fWorkloadKind\x12\x19\n" + "\x15WORKLOAD_KIND_UNKNOWN\x10\x00\x12\x1c\n" + "\x18WORKLOAD_KIND_DEPLOYMENT\x10\x01\x12\x1d\n" + diff --git a/api/v1/kube/kube_api.proto b/api/v1/kube/kube_api.proto index 259126d6..c257113b 100644 --- a/api/v1/kube/kube_api.proto +++ b/api/v1/kube/kube_api.proto @@ -171,4 +171,6 @@ message PodVolumeInfo { string storage_class = 13; string csi_driver = 14; string csi_volume_handle = 15; + string volume_mode = 16; // "Filesystem" or "Block" + string device_path = 17; // For block volumes: container's volumeDevices[].devicePath } diff --git a/cmd/agent/daemon/pipeline/storage_info_provider.go b/cmd/agent/daemon/pipeline/storage_info_provider.go index 4296b9e6..505636ea 100644 --- a/cmd/agent/daemon/pipeline/storage_info_provider.go +++ b/cmd/agent/daemon/pipeline/storage_info_provider.go @@ -114,6 +114,8 @@ type K8sPodVolumeMetric struct { PVName *string `avro:"pv_name"` StorageClass *string `avro:"storage_class"` CSIDriver *string `avro:"csi_driver"` + VolumeMode string `avro:"volume_mode"` // "Filesystem" or "Block" + DevicePath *string `avro:"device_path"` // For block volumes: container's volumeDevices[].devicePath Timestamp time.Time `avro:"ts"` } @@ -349,6 +351,7 @@ func (s *SysfsStorageInfoProvider) CollectPodVolumeMetrics(ctx context.Context) ContainerName: v.ContainerName, VolumeName: v.VolumeName, MountPath: v.MountPath, + VolumeMode: v.VolumeMode, Timestamp: timestamp, } @@ -367,6 +370,9 @@ func (s *SysfsStorageInfoProvider) CollectPodVolumeMetrics(ctx context.Context) if v.CsiDriver != "" { metric.CSIDriver = &v.CsiDriver } + if v.DevicePath != "" { + metric.DevicePath = &v.DevicePath + } metrics = append(metrics, metric) } diff --git a/cmd/controller/kube/server.go b/cmd/controller/kube/server.go index 61eb60e9..4876a8dd 100644 --- a/cmd/controller/kube/server.go +++ b/cmd/controller/kube/server.go @@ -179,7 +179,7 @@ func (s *Server) GetPodVolumes(ctx context.Context, req *kubepb.GetPodVolumesReq volumeMap[vol.Name] = vol } - // Iterate through containers and their volume mounts + // Iterate through containers and their volume mounts (filesystem volumes) for _, container := range pod.Spec.Containers { for _, mount := range container.VolumeMounts { vol, exists := volumeMap[mount.Name] @@ -196,39 +196,39 @@ func (s *Server) GetPodVolumes(ctx context.Context, req *kubepb.GetPodVolumesReq ContainerName: container.Name, VolumeName: vol.Name, MountPath: mount.MountPath, + VolumeMode: "Filesystem", } // If this is a PVC-backed volume, enrich with PVC/PV details if vol.PersistentVolumeClaim != nil { - pvcName := vol.PersistentVolumeClaim.ClaimName - volInfo.PvcName = pvcName - - if pvc, found := s.client.GetPVCByName(pod.Namespace, pvcName); found { - volInfo.PvcUid = string(pvc.UID) - - // Get requested storage size - if req, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; ok { - volInfo.RequestedSizeBytes = req.Value() - } - - // Get storage class - if pvc.Spec.StorageClassName != nil { - volInfo.StorageClass = *pvc.Spec.StorageClassName - } - - // Get PV details if bound - if pvc.Spec.VolumeName != "" { - volInfo.PvName = pvc.Spec.VolumeName - - if pv, found := s.client.GetPVByName(pvc.Spec.VolumeName); found { - // Get CSI details if available - if pv.Spec.CSI != nil { - volInfo.CsiDriver = pv.Spec.CSI.Driver - volInfo.CsiVolumeHandle = pv.Spec.CSI.VolumeHandle - } - } - } - } + s.enrichPVCDetails(volInfo, vol, pod.Namespace) + } + + volumes = append(volumes, volInfo) + } + + // Handle block volumes (VolumeDevices) + for _, device := range container.VolumeDevices { + vol, exists := volumeMap[device.Name] + if !exists { + continue + } + + volInfo := &kubepb.PodVolumeInfo{ + Namespace: pod.Namespace, + PodName: pod.Name, + PodUid: string(pod.UID), + ControllerKind: podInfo.Owner.Kind, + ControllerName: podInfo.Owner.Name, + ContainerName: container.Name, + VolumeName: vol.Name, + DevicePath: device.DevicePath, + VolumeMode: "Block", + } + + // If this is a PVC-backed volume, enrich with PVC/PV details + if vol.PersistentVolumeClaim != nil { + s.enrichPVCDetails(volInfo, vol, pod.Namespace) } volumes = append(volumes, volInfo) @@ -241,6 +241,41 @@ func (s *Server) GetPodVolumes(ctx context.Context, req *kubepb.GetPodVolumesReq }, nil } +func (s *Server) enrichPVCDetails(volInfo *kubepb.PodVolumeInfo, vol corev1.Volume, namespace string) { + pvcName := vol.PersistentVolumeClaim.ClaimName + volInfo.PvcName = pvcName + + pvc, found := s.client.GetPVCByName(namespace, pvcName) + if !found { + return + } + + volInfo.PvcUid = string(pvc.UID) + + // Get requested storage size + if req, ok := pvc.Spec.Resources.Requests[corev1.ResourceStorage]; ok { + volInfo.RequestedSizeBytes = req.Value() + } + + // Get storage class + if pvc.Spec.StorageClassName != nil { + volInfo.StorageClass = *pvc.Spec.StorageClassName + } + + // Get PV details if bound + if pvc.Spec.VolumeName != "" { + volInfo.PvName = pvc.Spec.VolumeName + + if pv, found := s.client.GetPVByName(pvc.Spec.VolumeName); found { + // Get CSI details if available + if pv.Spec.CSI != nil { + volInfo.CsiDriver = pv.Spec.CSI.Driver + volInfo.CsiVolumeHandle = pv.Spec.CSI.VolumeHandle + } + } + } +} + func toProtoWorkloadKind(kind string) kubepb.WorkloadKind { switch kind { case "Deployment": From ec21bd338312d5d15254e9373bda6a8ed31a3f6b Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Fri, 16 Jan 2026 23:25:24 +0200 Subject: [PATCH 03/22] Add logging only mode --- cmd/agent/daemon/app/app.go | 38 +++++++++----- cmd/agent/daemon/pipeline/controller.go | 66 +++++++++++++++++++++++++ 2 files changed, 92 insertions(+), 12 deletions(-) diff --git a/cmd/agent/daemon/app/app.go b/cmd/agent/daemon/app/app.go index a6fc39da..a0d609a7 100644 --- a/cmd/agent/daemon/app/app.go +++ b/cmd/agent/daemon/app/app.go @@ -258,20 +258,26 @@ func (a *App) Run(ctx context.Context) error { var podVolumeMetricsWriter pipeline.K8sPodVolumeMetricsWriter var storageInfoProvider pipeline.StorageInfoProvider if cfg.Stats.StorageEnabled { - metricsClient, err := createMetricsClient(cfg) - if err != nil { - return fmt.Errorf("failed to create metrics client: %w", err) - } - - go func() { - if err = metricsClient.Start(ctx); err != nil { - log.Warnf("metric client failed with:%v", err) + // Check if logging-only mode is enabled (for testing without ClickHouse) + if os.Getenv("STORAGE_METRICS_LOG_ONLY") == "true" { + log.Info("using logging writers for storage metrics (STORAGE_METRICS_LOG_ONLY=true)") + blockDeviceMetricsWriter, filesystemMetricsWriter, nodeStatsSummaryWriter, podVolumeMetricsWriter = setupLoggingStorageMetrics(log) + } else { + metricsClient, err := createMetricsClient(cfg) + if err != nil { + return fmt.Errorf("failed to create metrics client: %w", err) } - }() - blockDeviceMetricsWriter, filesystemMetricsWriter, nodeStatsSummaryWriter, podVolumeMetricsWriter, err = setupStorageMetrics(metricsClient) - if err != nil { - return fmt.Errorf("failed to setup storage metrics: %w", err) + go func() { + if err = metricsClient.Start(ctx); err != nil { + log.Warnf("metric client failed with:%v", err) + } + }() + + blockDeviceMetricsWriter, filesystemMetricsWriter, nodeStatsSummaryWriter, podVolumeMetricsWriter, err = setupStorageMetrics(metricsClient) + if err != nil { + return fmt.Errorf("failed to setup storage metrics: %w", err) + } } storageInfoProvider, err = pipeline.NewStorageInfoProvider(log, kubeAPIServerClient, cfg.Castai.ClusterID) @@ -595,6 +601,14 @@ func setupStorageMetrics(metricsClient custommetrics.MetricClient) (pipeline.Blo return blockDeviceMetrics, filesystemMetrics, nodeStatsSummaryWriter, podVolumeMetricsWriter, nil } +// setupLoggingStorageMetrics creates logging-based writers for testing without ClickHouse +func setupLoggingStorageMetrics(log *logging.Logger) (pipeline.BlockDeviceMetricsWriter, pipeline.FilesystemMetricsWriter, pipeline.NodeStatsSummaryWriter, pipeline.K8sPodVolumeMetricsWriter) { + return pipeline.NewLoggingBlockDeviceMetricsWriter(log), + pipeline.NewLoggingFilesystemMetricsWriter(log), + pipeline.NewLoggingNodeStatsSummaryWriter(log), + pipeline.NewLoggingK8sPodVolumeMetricsWriter(log) +} + // resolveMetricsAddr transforms kvisor.* addresses to telemetry.* addresses func resolveMetricsAddr(addr string) string { const ( diff --git a/cmd/agent/daemon/pipeline/controller.go b/cmd/agent/daemon/pipeline/controller.go index b50b124a..3932c86f 100644 --- a/cmd/agent/daemon/pipeline/controller.go +++ b/cmd/agent/daemon/pipeline/controller.go @@ -132,6 +132,72 @@ func NewK8sPodVolumeMetricsWriter(metricsClient custommetrics.MetricClient) (K8s ) } +// Logging writers for testing without ClickHouse + +type LoggingBlockDeviceMetricsWriter struct { + log *logging.Logger +} + +func NewLoggingBlockDeviceMetricsWriter(log *logging.Logger) BlockDeviceMetricsWriter { + return &LoggingBlockDeviceMetricsWriter{log: log.WithField("writer", "block_device")} +} + +func (w *LoggingBlockDeviceMetricsWriter) Write(metrics ...BlockDeviceMetric) error { + for _, m := range metrics { + w.log.Infof("[BlockDevice] name=%s path=%s size=%v type=%s partition_of=%s read_iops=%.2f write_iops=%.2f", + m.Name, m.Path, m.SizeBytes, m.DiskType, m.PartitionOf, m.ReadIOPS, m.WriteIOPS) + } + return nil +} + +type LoggingFilesystemMetricsWriter struct { + log *logging.Logger +} + +func NewLoggingFilesystemMetricsWriter(log *logging.Logger) FilesystemMetricsWriter { + return &LoggingFilesystemMetricsWriter{log: log.WithField("writer", "filesystem")} +} + +func (w *LoggingFilesystemMetricsWriter) Write(metrics ...FilesystemMetric) error { + for _, m := range metrics { + w.log.Infof("[Filesystem] mount=%s type=%s total=%v used=%v namespace=%v pod=%v pvc=%v", + m.MountPoint, m.Type, m.TotalBytes, m.UsedBytes, m.Namespace, m.PodName, m.PVCName) + } + return nil +} + +type LoggingNodeStatsSummaryWriter struct { + log *logging.Logger +} + +func NewLoggingNodeStatsSummaryWriter(log *logging.Logger) NodeStatsSummaryWriter { + return &LoggingNodeStatsSummaryWriter{log: log.WithField("writer", "node_stats")} +} + +func (w *LoggingNodeStatsSummaryWriter) Write(metrics ...NodeStatsSummaryMetric) error { + for _, m := range metrics { + w.log.Infof("[NodeStats] node=%s image_fs_size=%v image_fs_used=%v container_fs_size=%v container_fs_used=%v", + m.NodeName, m.ImageFsSizeBytes, m.ImageFsUsedBytes, m.ContainerFsSizeBytes, m.ContainerFsUsedBytes) + } + return nil +} + +type LoggingK8sPodVolumeMetricsWriter struct { + log *logging.Logger +} + +func NewLoggingK8sPodVolumeMetricsWriter(log *logging.Logger) K8sPodVolumeMetricsWriter { + return &LoggingK8sPodVolumeMetricsWriter{log: log.WithField("writer", "pod_volume")} +} + +func (w *LoggingK8sPodVolumeMetricsWriter) Write(metrics ...K8sPodVolumeMetric) error { + for _, m := range metrics { + w.log.Infof("[PodVolume] ns=%s pod=%s volume=%s mount=%s mode=%s pvc=%v pv=%v storage_class=%v device_path=%v", + m.Namespace, m.PodName, m.VolumeName, m.MountPath, m.VolumeMode, m.PVCName, m.PVName, m.StorageClass, m.DevicePath) + } + return nil +} + func NewController( log *logging.Logger, cfg Config, From 2b8a47a1ffc181ed5fcd9b41301d0d7537be6e58 Mon Sep 17 00:00:00 2001 From: Roman Melnyk Date: Fri, 16 Jan 2026 13:52:31 +0100 Subject: [PATCH 04/22] Integrate VPC metadata enrichment for netflow IP addresses (#611) - Added VPCIndex in-memory index for IP metadata lookups across VPCs, subnets, peered networks, and service ranges - Periodic refresh from cloud provider with configurable interval - gRPC endpoint GetIPsInfo enriches IPs with VPC metadata (region, zone, cloud service detection via domain) - Improved zone/region detection with kube nodes info --- api/v1/kube/kube_api.pb.go | 61 ++- api/v1/kube/kube_api.proto | 4 + api/v1/kube/kube_api_grpc.pb.go | 5 +- api/v1/runtime/common.pb.go | 35 +- api/v1/runtime/common.proto | 2 + api/v1/runtime/runtime_agent_api.pb.go | 11 +- api/v1/runtime/runtime_agent_api_grpc.pb.go | 5 +- cmd/agent/daemon/pipeline/controller.go | 40 +- cmd/agent/daemon/pipeline/netflow_pipeline.go | 73 +++- cmd/controller/app/app.go | 8 + cmd/controller/config/config.go | 1 + .../controllers/vpc_metadata_controller.go | 143 +++++++ .../vpc_metadata_controller_test.go | 87 ++++ cmd/controller/kube/client.go | 19 +- cmd/controller/kube/index.go | 29 +- cmd/controller/kube/index_ips_details.go | 25 ++ cmd/controller/kube/index_vpc.go | 273 ++++++++++++ cmd/controller/kube/index_vpc_test.go | 396 ++++++++++++++++++ cmd/controller/kube/server.go | 87 +++- cmd/controller/main.go | 13 +- go.mod | 1 + go.sum | 2 + pkg/net/iputil/iputilt.go | 8 + 23 files changed, 1252 insertions(+), 76 deletions(-) create mode 100644 cmd/controller/controllers/vpc_metadata_controller.go create mode 100644 cmd/controller/controllers/vpc_metadata_controller_test.go create mode 100644 cmd/controller/kube/index_vpc.go create mode 100644 cmd/controller/kube/index_vpc_test.go diff --git a/api/v1/kube/kube_api.pb.go b/api/v1/kube/kube_api.pb.go index 52963626..c50cd6e3 100644 --- a/api/v1/kube/kube_api.pb.go +++ b/api/v1/kube/kube_api.pb.go @@ -1,17 +1,18 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.11 -// protoc v6.33.0 +// protoc-gen-go v1.36.9 +// protoc v6.32.0 // source: api/v1/kube/kube_api.proto package v1 import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) const ( @@ -125,6 +126,7 @@ type GetClusterInfoResponse struct { state protoimpl.MessageState `protogen:"open.v1"` PodsCidr []string `protobuf:"bytes,1,rep,name=pods_cidr,json=podsCidr,proto3" json:"pods_cidr,omitempty"` ServiceCidr []string `protobuf:"bytes,2,rep,name=service_cidr,json=serviceCidr,proto3" json:"service_cidr,omitempty"` + OtherCidr []string `protobuf:"bytes,3,rep,name=other_cidr,json=otherCidr,proto3" json:"other_cidr,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -173,6 +175,13 @@ func (x *GetClusterInfoResponse) GetServiceCidr() []string { return nil } +func (x *GetClusterInfoResponse) GetOtherCidr() []string { + if x != nil { + return x.OtherCidr + } + return nil +} + type GetIPInfoRequest struct { state protoimpl.MessageState `protogen:"open.v1"` Ip []byte `protobuf:"bytes,1,opt,name=ip,proto3" json:"ip,omitempty"` @@ -358,7 +367,9 @@ type IPInfo struct { WorkloadKind string `protobuf:"bytes,5,opt,name=workload_kind,json=workloadKind,proto3" json:"workload_kind,omitempty"` WorkloadUid string `protobuf:"bytes,6,opt,name=workload_uid,json=workloadUid,proto3" json:"workload_uid,omitempty"` Zone string `protobuf:"bytes,7,opt,name=zone,proto3" json:"zone,omitempty"` + Region string `protobuf:"bytes,10,opt,name=region,proto3" json:"region,omitempty"` NodeName string `protobuf:"bytes,8,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` + CloudDomain string `protobuf:"bytes,11,opt,name=cloud_domain,json=cloudDomain,proto3" json:"cloud_domain,omitempty"` Ip []byte `protobuf:"bytes,9,opt,name=ip,proto3" json:"ip,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -443,6 +454,13 @@ func (x *IPInfo) GetZone() string { return "" } +func (x *IPInfo) GetRegion() string { + if x != nil { + return x.Region + } + return "" +} + func (x *IPInfo) GetNodeName() string { if x != nil { return x.NodeName @@ -450,6 +468,13 @@ func (x *IPInfo) GetNodeName() string { return "" } +func (x *IPInfo) GetCloudDomain() string { + if x != nil { + return x.CloudDomain + } + return "" +} + func (x *IPInfo) GetIp() []byte { if x != nil { return x.Ip @@ -551,6 +576,7 @@ type Pod struct { WorkloadName string `protobuf:"bytes,2,opt,name=workload_name,json=workloadName,proto3" json:"workload_name,omitempty"` WorkloadKind WorkloadKind `protobuf:"varint,3,opt,name=workload_kind,json=workloadKind,proto3,enum=kube.v1.WorkloadKind" json:"workload_kind,omitempty"` Zone string `protobuf:"bytes,4,opt,name=zone,proto3" json:"zone,omitempty"` + Region string `protobuf:"bytes,6,opt,name=region,proto3" json:"region,omitempty"` NodeName string `protobuf:"bytes,5,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache @@ -614,6 +640,13 @@ func (x *Pod) GetZone() string { return "" } +func (x *Pod) GetRegion() string { + if x != nil { + return x.Region + } + return "" +} + func (x *Pod) GetNodeName() string { if x != nil { return x.NodeName @@ -1586,10 +1619,12 @@ var File_api_v1_kube_kube_api_proto protoreflect.FileDescriptor const file_api_v1_kube_kube_api_proto_rawDesc = "" + "\n" + "\x1aapi/v1/kube/kube_api.proto\x12\akube.v1\"\x17\n" + - "\x15GetClusterInfoRequest\"X\n" + + "\x15GetClusterInfoRequest\"w\n" + "\x16GetClusterInfoResponse\x12\x1b\n" + "\tpods_cidr\x18\x01 \x03(\tR\bpodsCidr\x12!\n" + - "\fservice_cidr\x18\x02 \x03(\tR\vserviceCidr\"\"\n" + + "\fservice_cidr\x18\x02 \x03(\tR\vserviceCidr\x12\x1d\n" + + "\n" + + "other_cidr\x18\x03 \x03(\tR\totherCidr\"\"\n" + "\x10GetIPInfoRequest\x12\x0e\n" + "\x02ip\x18\x01 \x01(\fR\x02ip\"8\n" + "\x11GetIPInfoResponse\x12#\n" + @@ -1597,7 +1632,7 @@ const file_api_v1_kube_kube_api_proto_rawDesc = "" + "\x11GetIPsInfoRequest\x12\x10\n" + "\x03ips\x18\x01 \x03(\fR\x03ips\"9\n" + "\x12GetIPsInfoResponse\x12#\n" + - "\x04list\x18\x01 \x03(\v2\x0f.kube.v1.IPInfoR\x04list\"\x88\x02\n" + + "\x04list\x18\x01 \x03(\v2\x0f.kube.v1.IPInfoR\x04list\"\xc3\x02\n" + "\x06IPInfo\x12\x17\n" + "\apod_uid\x18\x01 \x01(\tR\x06podUid\x12\x19\n" + "\bpod_name\x18\x03 \x01(\tR\apodName\x12\x1c\n" + @@ -1605,18 +1640,22 @@ const file_api_v1_kube_kube_api_proto_rawDesc = "" + "\rworkload_name\x18\x04 \x01(\tR\fworkloadName\x12#\n" + "\rworkload_kind\x18\x05 \x01(\tR\fworkloadKind\x12!\n" + "\fworkload_uid\x18\x06 \x01(\tR\vworkloadUid\x12\x12\n" + - "\x04zone\x18\a \x01(\tR\x04zone\x12\x1b\n" + - "\tnode_name\x18\b \x01(\tR\bnodeName\x12\x0e\n" + + "\x04zone\x18\a \x01(\tR\x04zone\x12\x16\n" + + "\x06region\x18\n" + + " \x01(\tR\x06region\x12\x1b\n" + + "\tnode_name\x18\b \x01(\tR\bnodeName\x12!\n" + + "\fcloud_domain\x18\v \x01(\tR\vcloudDomain\x12\x0e\n" + "\x02ip\x18\t \x01(\fR\x02ip\"!\n" + "\rGetPodRequest\x12\x10\n" + "\x03uid\x18\x02 \x01(\tR\x03uid\"0\n" + "\x0eGetPodResponse\x12\x1e\n" + - "\x03pod\x18\x01 \x01(\v2\f.kube.v1.PodR\x03pod\"\xba\x01\n" + + "\x03pod\x18\x01 \x01(\v2\f.kube.v1.PodR\x03pod\"\xd2\x01\n" + "\x03Pod\x12!\n" + "\fworkload_uid\x18\x01 \x01(\tR\vworkloadUid\x12#\n" + "\rworkload_name\x18\x02 \x01(\tR\fworkloadName\x12:\n" + "\rworkload_kind\x18\x03 \x01(\x0e2\x15.kube.v1.WorkloadKindR\fworkloadKind\x12\x12\n" + - "\x04zone\x18\x04 \x01(\tR\x04zone\x12\x1b\n" + + "\x04zone\x18\x04 \x01(\tR\x04zone\x12\x16\n" + + "\x06region\x18\x06 \x01(\tR\x06region\x12\x1b\n" + "\tnode_name\x18\x05 \x01(\tR\bnodeName\"$\n" + "\x0eGetNodeRequest\x12\x12\n" + "\x04name\x18\x01 \x01(\tR\x04name\"4\n" + diff --git a/api/v1/kube/kube_api.proto b/api/v1/kube/kube_api.proto index c257113b..073900e4 100644 --- a/api/v1/kube/kube_api.proto +++ b/api/v1/kube/kube_api.proto @@ -19,6 +19,7 @@ message GetClusterInfoRequest {} message GetClusterInfoResponse { repeated string pods_cidr = 1; repeated string service_cidr = 2; + repeated string other_cidr = 3; } message GetIPInfoRequest { @@ -45,7 +46,9 @@ message IPInfo { string workload_kind = 5; string workload_uid = 6; string zone = 7; + string region = 10; string node_name = 8; + string cloud_domain = 11; bytes ip = 9; } @@ -73,6 +76,7 @@ message Pod { string workload_name = 2; WorkloadKind workload_kind = 3; string zone = 4; + string region = 6; string node_name = 5; } diff --git a/api/v1/kube/kube_api_grpc.pb.go b/api/v1/kube/kube_api_grpc.pb.go index a85380b5..cd325a48 100644 --- a/api/v1/kube/kube_api_grpc.pb.go +++ b/api/v1/kube/kube_api_grpc.pb.go @@ -1,13 +1,14 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.6.0 -// - protoc v6.33.0 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v6.32.0 // source: api/v1/kube/kube_api.proto package v1 import ( context "context" + grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" diff --git a/api/v1/runtime/common.pb.go b/api/v1/runtime/common.pb.go index 2fc4d5d5..7f6a3df3 100644 --- a/api/v1/runtime/common.pb.go +++ b/api/v1/runtime/common.pb.go @@ -1,17 +1,18 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.11 -// protoc v6.33.0 +// protoc-gen-go v1.36.9 +// protoc v6.32.0 // source: api/v1/runtime/common.proto package v1 import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) const ( @@ -1486,6 +1487,7 @@ type Netflow struct { WorkloadName string `protobuf:"bytes,6,opt,name=workload_name,json=workloadName,proto3" json:"workload_name,omitempty"` WorkloadKind string `protobuf:"bytes,7,opt,name=workload_kind,json=workloadKind,proto3" json:"workload_kind,omitempty"` Zone string `protobuf:"bytes,8,opt,name=zone,proto3" json:"zone,omitempty"` + Region string `protobuf:"bytes,16,opt,name=region,proto3" json:"region,omitempty"` NodeName string `protobuf:"bytes,13,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` Addr []byte `protobuf:"bytes,9,opt,name=addr,proto3" json:"addr,omitempty"` Port uint32 `protobuf:"varint,10,opt,name=port,proto3" json:"port,omitempty"` @@ -1583,6 +1585,13 @@ func (x *Netflow) GetZone() string { return "" } +func (x *Netflow) GetRegion() string { + if x != nil { + return x.Region + } + return "" +} + func (x *Netflow) GetNodeName() string { if x != nil { return x.NodeName @@ -1639,6 +1648,7 @@ type NetflowDestination struct { WorkloadName string `protobuf:"bytes,3,opt,name=workload_name,json=workloadName,proto3" json:"workload_name,omitempty"` WorkloadKind string `protobuf:"bytes,4,opt,name=workload_kind,json=workloadKind,proto3" json:"workload_kind,omitempty"` Zone string `protobuf:"bytes,5,opt,name=zone,proto3" json:"zone,omitempty"` + Region string `protobuf:"bytes,15,opt,name=region,proto3" json:"region,omitempty"` DnsQuestion string `protobuf:"bytes,6,opt,name=dns_question,json=dnsQuestion,proto3" json:"dns_question,omitempty"` NodeName string `protobuf:"bytes,14,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` Addr []byte `protobuf:"bytes,8,opt,name=addr,proto3" json:"addr,omitempty"` @@ -1716,6 +1726,13 @@ func (x *NetflowDestination) GetZone() string { return "" } +func (x *NetflowDestination) GetRegion() string { + if x != nil { + return x.Region + } + return "" +} + func (x *NetflowDestination) GetDnsQuestion() string { if x != nil { return x.DnsQuestion @@ -2260,7 +2277,7 @@ const file_api_v1_runtime_common_proto_rawDesc = "" + "\tfull_repo\x18\x02 \x01(\tR\bfullRepo\x12\x16\n" + "\x06server\x18\x03 \x01(\tR\x06server\x12\x1b\n" + "\trepo_path\x18\x04 \x01(\tR\brepoPath\"(\n" + - "&IngressNightmareExploitDetectedFinding\"\x8a\x04\n" + + "&IngressNightmareExploitDetectedFinding\"\xa2\x04\n" + "\aNetflow\x12\x1c\n" + "\ttimestamp\x18\x01 \x01(\x04R\ttimestamp\x12!\n" + "\fprocess_name\x18\x02 \x01(\tR\vprocessName\x12\x1c\n" + @@ -2269,7 +2286,8 @@ const file_api_v1_runtime_common_proto_rawDesc = "" + "\x0econtainer_name\x18\x05 \x01(\tR\rcontainerName\x12#\n" + "\rworkload_name\x18\x06 \x01(\tR\fworkloadName\x12#\n" + "\rworkload_kind\x18\a \x01(\tR\fworkloadKind\x12\x12\n" + - "\x04zone\x18\b \x01(\tR\x04zone\x12\x1b\n" + + "\x04zone\x18\b \x01(\tR\x04zone\x12\x16\n" + + "\x06region\x18\x10 \x01(\tR\x06region\x12\x1b\n" + "\tnode_name\x18\r \x01(\tR\bnodeName\x12\x12\n" + "\x04addr\x18\t \x01(\fR\x04addr\x12\x12\n" + "\x04port\x18\n" + @@ -2277,13 +2295,14 @@ const file_api_v1_runtime_common_proto_rawDesc = "" + "\x03pid\x18\x0e \x01(\rR\x03pid\x12,\n" + "\x12process_start_time\x18\x0f \x01(\x04R\x10processStartTime\x127\n" + "\bprotocol\x18\v \x01(\x0e2\x1b.runtime.v1.NetflowProtocolR\bprotocol\x12B\n" + - "\fdestinations\x18\f \x03(\v2\x1e.runtime.v1.NetflowDestinationR\fdestinations\"\x87\x03\n" + + "\fdestinations\x18\f \x03(\v2\x1e.runtime.v1.NetflowDestinationR\fdestinations\"\x9f\x03\n" + "\x12NetflowDestination\x12\x1c\n" + "\tnamespace\x18\x01 \x01(\tR\tnamespace\x12\x19\n" + "\bpod_name\x18\x02 \x01(\tR\apodName\x12#\n" + "\rworkload_name\x18\x03 \x01(\tR\fworkloadName\x12#\n" + "\rworkload_kind\x18\x04 \x01(\tR\fworkloadKind\x12\x12\n" + - "\x04zone\x18\x05 \x01(\tR\x04zone\x12!\n" + + "\x04zone\x18\x05 \x01(\tR\x04zone\x12\x16\n" + + "\x06region\x18\x0f \x01(\tR\x06region\x12!\n" + "\fdns_question\x18\x06 \x01(\tR\vdnsQuestion\x12\x1b\n" + "\tnode_name\x18\x0e \x01(\tR\bnodeName\x12\x12\n" + "\x04addr\x18\b \x01(\fR\x04addr\x12\x12\n" + diff --git a/api/v1/runtime/common.proto b/api/v1/runtime/common.proto index 12691fa7..cfe27921 100644 --- a/api/v1/runtime/common.proto +++ b/api/v1/runtime/common.proto @@ -168,6 +168,7 @@ message Netflow { string workload_name = 6; string workload_kind = 7; string zone = 8; + string region = 16; string node_name = 13; bytes addr = 9; uint32 port = 10; @@ -183,6 +184,7 @@ message NetflowDestination { string workload_name = 3; string workload_kind = 4; string zone = 5; + string region = 15; string dns_question = 6; string node_name = 14; bytes addr = 8; diff --git a/api/v1/runtime/runtime_agent_api.pb.go b/api/v1/runtime/runtime_agent_api.pb.go index 3d6b1145..8153ff21 100644 --- a/api/v1/runtime/runtime_agent_api.pb.go +++ b/api/v1/runtime/runtime_agent_api.pb.go @@ -1,18 +1,19 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.11 -// protoc v6.33.0 +// protoc-gen-go v1.36.4 +// protoc v5.29.3 // source: api/v1/runtime/runtime_agent_api.proto package v1 import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" reflect "reflect" sync "sync" unsafe "unsafe" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( diff --git a/api/v1/runtime/runtime_agent_api_grpc.pb.go b/api/v1/runtime/runtime_agent_api_grpc.pb.go index b54758de..e3b15e8c 100644 --- a/api/v1/runtime/runtime_agent_api_grpc.pb.go +++ b/api/v1/runtime/runtime_agent_api_grpc.pb.go @@ -1,13 +1,14 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.6.0 -// - protoc v6.33.0 +// - protoc-gen-go-grpc v1.5.1 +// - protoc v6.32.0 // source: api/v1/runtime/runtime_agent_api.proto package v1 import ( context "context" + grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" diff --git a/cmd/agent/daemon/pipeline/controller.go b/cmd/agent/daemon/pipeline/controller.go index 3932c86f..4510b649 100644 --- a/cmd/agent/daemon/pipeline/controller.go +++ b/cmd/agent/daemon/pipeline/controller.go @@ -222,6 +222,12 @@ func NewController( if err != nil { panic(err) } + nodeCache, err := freelru.NewSynced[string, *kubepb.Node](4, func(k string) uint32 { + return uint32(xxhash.Sum64String(k)) // nolint:gosec + }) + if err != nil { + panic(err) + } return &Controller{ log: log.WithField("component", "ctrl"), @@ -235,6 +241,7 @@ func NewController( nodeName: os.Getenv("NODE_NAME"), mutedNamespaces: map[string]struct{}{}, podCache: podCache, + nodeCache: nodeCache, processTreeCollector: processTreeCollector, procHandler: procHandler, enrichmentService: enrichmentService, @@ -270,6 +277,7 @@ type Controller struct { clusterInfo *clusterInfo kubeClient kubepb.KubeAPIClient podCache *freelru.SyncedLRU[string, *kubepb.Pod] + nodeCache *freelru.SyncedLRU[string, *kubepb.Node] conntrackCache *freelru.LRU[types.AddrTuple, netip.AddrPort] eventGroups map[uint64]*containerEventsGroup @@ -438,7 +446,6 @@ func (c *Controller) MuteNamespace(namespace string) error { cgroups := c.containersClient.GetCgroupsInNamespace(namespace) err := c.tracer.MuteEventsFromCgroups(cgroups, fmt.Sprintf("muted namespace %q", namespace)) - if err != nil { return err } @@ -469,6 +476,21 @@ func (c *Controller) getPodInfo(podID string) (*kubepb.Pod, bool) { return pod, true } +func (c *Controller) getNodeInfo(nodeName string) (*kubepb.Node, bool) { + node, found := c.nodeCache.Get(nodeName) + if !found { + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + resp, err := c.kubeClient.GetNode(ctx, &kubepb.GetNodeRequest{Name: nodeName}) + if err != nil { + return nil, false + } + node = resp.Node + c.nodeCache.Add(nodeName, node) + } + return node, true +} + func workloadKindString(kind kubepb.WorkloadKind) string { switch kind { case kubepb.WorkloadKind_WORKLOAD_KIND_DEPLOYMENT: @@ -501,3 +523,19 @@ func toProtoProcessAction(action processtree.ProcessAction) castaipb.ProcessActi } return castaipb.ProcessAction_PROCESS_ACTION_UNKNOWN } + +func getZone(n *kubepb.Node) string { + if n == nil { + return "" + } + zone := n.Labels["topology.kubernetes.io/zone"] + return zone +} + +func getRegion(n *kubepb.Node) string { + if n == nil { + return "" + } + region := n.Labels["topology.kubernetes.io/region"] + return region +} diff --git a/cmd/agent/daemon/pipeline/netflow_pipeline.go b/cmd/agent/daemon/pipeline/netflow_pipeline.go index 2d0a0558..cf4f68f0 100644 --- a/cmd/agent/daemon/pipeline/netflow_pipeline.go +++ b/cmd/agent/daemon/pipeline/netflow_pipeline.go @@ -27,6 +27,8 @@ import ( type clusterInfo struct { podCidr []netip.Prefix serviceCidr []netip.Prefix + otherCidr []netip.Prefix + clusterCidr []netip.Prefix } func (c *Controller) getClusterInfo(ctx context.Context) (*clusterInfo, error) { @@ -50,6 +52,7 @@ func (c *Controller) getClusterInfo(ctx context.Context) (*clusterInfo, error) { return nil, fmt.Errorf("parsing pods cidr: %w", err) } res.podCidr = append(res.podCidr, subnet) + res.clusterCidr = append(res.clusterCidr, subnet) } for _, cidr := range resp.ServiceCidr { subnet, err := netip.ParsePrefix(cidr) @@ -57,6 +60,15 @@ func (c *Controller) getClusterInfo(ctx context.Context) (*clusterInfo, error) { return nil, fmt.Errorf("parsing service cidr: %w", err) } res.serviceCidr = append(res.serviceCidr, subnet) + res.clusterCidr = append(res.clusterCidr, subnet) + } + for _, cidr := range resp.OtherCidr { + subnet, err := netip.ParsePrefix(cidr) + if err != nil { + return nil, fmt.Errorf("parsing other cidr: %w", err) + } + res.otherCidr = append(res.otherCidr, subnet) + res.clusterCidr = append(res.clusterCidr, subnet) } return &res, nil } @@ -162,14 +174,20 @@ func digestAddUint64(digest *xxhash.Digest, val uint64) { _, _ = digest.Write(dst[:]) } -func (c *Controller) handleNetflows(ctx context.Context, groups map[uint64]*netflowGroup, stats *dataBatchStats, digest *xxhash.Digest, keys []ebpftracer.TrafficKey, vals []ebpftracer.TrafficSummary) { +func (c *Controller) handleNetflows( + ctx context.Context, + groups map[uint64]*netflowGroup, + stats *dataBatchStats, + digest *xxhash.Digest, + keys []ebpftracer.TrafficKey, + vals []ebpftracer.TrafficSummary, +) { c.log.Debugf("handling netflows, total=%v", len(keys)) start := time.Now() kubeDestinations := map[netip.Addr]struct{}{} var foundContainerdConnectErrors bool - for i, key := range keys { summary := vals[i] group, found := groups[key.ProcessIdentity.CgroupId] @@ -183,7 +201,7 @@ func (c *Controller) handleNetflows(ctx context.Context, groups map[uint64]*netf netflowKey := newNetflowKey(digest, &key) netflow, found := group.flows[netflowKey] if !found { - d, err := c.toNetflow(ctx, &key, &summary, start) + netflowPb, err := c.toNetflow(ctx, &key, &summary, start) if err != nil { // TODO: Investigate why containerd connect fails for some clusters. Most likely sock is in a different path. if strings.Contains(err.Error(), "/run/containerd/containerd.sock: connect: connection refused") { @@ -194,7 +212,7 @@ func (c *Controller) handleNetflows(ctx context.Context, groups map[uint64]*netf continue } val := &netflowVal{ - pb: d, + pb: netflowPb, updatedAt: time.Now(), } group.flows[netflowKey] = val @@ -208,7 +226,7 @@ func (c *Controller) handleNetflows(ctx context.Context, groups map[uint64]*netf continue } - if (c.clusterInfo != nil && (c.clusterInfo.serviceCidrContains(destAddr) || c.clusterInfo.podCidrContains(destAddr))) || !c.cfg.Netflow.CheckClusterNetworkRanges { + if (c.clusterInfo != nil && c.clusterInfo.clusterCidrContains(destAddr)) || !c.cfg.Netflow.CheckClusterNetworkRanges { kubeDestinations[destAddr] = struct{}{} } @@ -263,7 +281,14 @@ func (c *Controller) enrichKubeDestinations(ctx context.Context, groups map[uint flowDest.WorkloadName = info.WorkloadName flowDest.WorkloadKind = info.WorkloadKind flowDest.Zone = info.Zone + flowDest.Region = info.Region flowDest.NodeName = info.NodeName + + // set cloud domain as dns question when it's empty + // i.e. googleapis.com or amazonaws.com + if flowDest.DnsQuestion == "" && info.CloudDomain != "" { + flowDest.DnsQuestion = info.CloudDomain + } } } } @@ -296,6 +321,13 @@ func (c *Controller) addNetflowDestination(netflow *netflowVal, dest *castaipb.N return } + // If destination zone is unknown but IP is local network (loopback, link-local), + // then destination must be on same zone/region + if !isPublicDest && dest.Zone == "" && iputil.IsLocalNetwork(destAddr) { + dest.Zone = netflow.pb.Zone + dest.Region = netflow.pb.Region + } + // No merge, just append to destinations list. netflow.pb.Destinations = append(netflow.pb.Destinations, dest) if maybeMerge { @@ -352,12 +384,25 @@ func (c *Controller) toNetflow(ctx context.Context, key *ebpftracer.TrafficKey, res.PodName = container.PodName res.ContainerName = container.Name - ipInfo, found := c.getPodInfo(container.PodUID) + podInfo, found := c.getPodInfo(container.PodUID) if found { - res.WorkloadName = ipInfo.WorkloadName - res.WorkloadKind = workloadKindString(ipInfo.WorkloadKind) - res.Zone = ipInfo.Zone - res.NodeName = ipInfo.NodeName + res.WorkloadName = podInfo.WorkloadName + res.WorkloadKind = workloadKindString(podInfo.WorkloadKind) + res.Zone = podInfo.Zone + res.Region = podInfo.Region + res.NodeName = podInfo.NodeName + } + } + + // in case when pod info is not found we still can get AZ info from node + if res.Zone == "" || res.Region == "" { + if nodeInfo, found := c.getNodeInfo(res.NodeName); found { + if res.Zone == "" { + res.Zone = getZone(nodeInfo) + } + if res.Region == "" { + res.Region = getRegion(nodeInfo) + } } } @@ -422,8 +467,8 @@ func (c *Controller) getConntrackDest(src, dst netip.AddrPort) (netip.AddrPort, return realDst, true } -func (c *clusterInfo) podCidrContains(ip netip.Addr) bool { - for _, cidr := range c.podCidr { +func (c *clusterInfo) serviceCidrContains(ip netip.Addr) bool { + for _, cidr := range c.serviceCidr { if cidr.Contains(ip) { return true } @@ -431,8 +476,8 @@ func (c *clusterInfo) podCidrContains(ip netip.Addr) bool { return false } -func (c *clusterInfo) serviceCidrContains(ip netip.Addr) bool { - for _, cidr := range c.serviceCidr { +func (c *clusterInfo) clusterCidrContains(ip netip.Addr) bool { + for _, cidr := range c.clusterCidr { if cidr.Contains(ip) { return true } diff --git a/cmd/controller/app/app.go b/cmd/controller/app/app.go index 7af9322d..b25e9c5a 100644 --- a/cmd/controller/app/app.go +++ b/cmd/controller/app/app.go @@ -107,6 +107,14 @@ func (a *App) Run(ctx context.Context) error { return kubeClient.Run(ctx) }) + // Initialize cloud provider if enabled + if cfg.CloudProvider.Enabled { + errg.Go(func() error { + vpcMetadataCtrl := controllers.NewVPCMetadataController(log, cfg.CloudProvider, kubeClient) + return vpcMetadataCtrl.Run(ctx) + }) + } + // CAST AI specific logic. if castaiClient != nil { errg.Go(func() error { diff --git a/cmd/controller/config/config.go b/cmd/controller/config/config.go index c577b325..c5126b2d 100644 --- a/cmd/controller/config/config.go +++ b/cmd/controller/config/config.go @@ -39,6 +39,7 @@ type Config struct { KubeBench kubebench.Config `json:"kubeBench"` JobsCleanup controllers.JobsCleanupConfig `json:"jobsCleanup"` AgentConfig AgentConfig `json:"agentConfig"` + CloudProvider controllers.VPCMetadataConfig `json:"cloudProvider"` } type AgentConfig struct { diff --git a/cmd/controller/controllers/vpc_metadata_controller.go b/cmd/controller/controllers/vpc_metadata_controller.go new file mode 100644 index 00000000..88d6bfcf --- /dev/null +++ b/cmd/controller/controllers/vpc_metadata_controller.go @@ -0,0 +1,143 @@ +package controllers + +import ( + "context" + "time" + + "github.com/castai/kvisor/cmd/controller/kube" + "github.com/castai/kvisor/pkg/cloudprovider" + cloudtypes "github.com/castai/kvisor/pkg/cloudprovider/types" + "github.com/castai/kvisor/pkg/logging" +) + +type VPCMetadataConfig struct { + Enabled bool `json:"enabled"` + Type string `json:"type"` + NetworkName string `json:"networkName"` + RefreshInterval time.Duration `json:"refreshInterval"` + CredentialsFile string `json:"credentialsFile"` + GCPProjectID string `json:"gcpProjectID"` + AWSAccountID string `json:"awsAccountID"` + AzureSubscription string `json:"azureSubscription"` +} + +func NewVPCMetadataController(log *logging.Logger, cfg VPCMetadataConfig, kubeClient *kube.Client) *VPCMetadataController { + if cfg.RefreshInterval == 0 { + cfg.RefreshInterval = 1 * time.Hour + } + return &VPCMetadataController{ + log: log.WithField("component", "vpc_metadata"), + cfg: cfg, + kubeClient: kubeClient, + } +} + +type VPCMetadataController struct { + log *logging.Logger + cfg VPCMetadataConfig + kubeClient *kube.Client +} + +func (c *VPCMetadataController) Run(ctx context.Context) error { + c.log.Infof("running for cloud provider: %s", c.cfg.Type) + defer c.log.Infof("stopping") + + cpConfig := cloudtypes.Config{ + Type: cloudtypes.Type(c.cfg.Type), + NetworkName: c.cfg.NetworkName, + CredentialsFile: c.cfg.CredentialsFile, + GCPProjectID: c.cfg.GCPProjectID, + AWSAccountID: c.cfg.AWSAccountID, + } + + provider, err := cloudprovider.NewProvider(ctx, cpConfig) + if err != nil { + c.log.Errorf("failed to initialize cloud provider: %v", err) + return nil + } + defer provider.Close() + + c.log.Infof("cloud provider %s initialized successfully", provider.Type()) + + vpcIndex := kube.NewVPCIndex(c.log, c.cfg.RefreshInterval) + + if err := c.fetchInitialMetadata(ctx, provider, vpcIndex); err != nil { + c.log.Errorf("failed to fetch initial VPC metadata: %v", err) + return nil + } + + c.kubeClient.SetVPCIndex(vpcIndex) + + return c.runRefreshLoop(ctx, provider, vpcIndex) +} + +func (c *VPCMetadataController) fetchInitialMetadata(ctx context.Context, provider cloudtypes.Provider, vpcIndex *kube.VPCIndex) error { + backoff := 2 * time.Second + maxRetries := 5 + + for i := 0; i < maxRetries; i++ { + err := provider.RefreshMetadata(ctx) + if err != nil { + c.log.Errorf("VPC metadata refresh failed: %v", err) + continue + } + metadata, err := provider.GetMetadata(ctx) + if err == nil { + if err := vpcIndex.Update(metadata); err != nil { + c.log.Errorf("failed to update VPC index: %v", err) + } else { + c.log.Info("initial VPC metadata loaded successfully") + return nil + } + } + + if i < maxRetries-1 { + c.log.Warnf("VPC metadata fetch attempt %d/%d failed: %v, retrying in %v", i+1, maxRetries, err, backoff) + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(backoff): + backoff *= 2 + if backoff > 30*time.Second { + backoff = 30 * time.Second + } + } + } + } + + c.log.Errorf("failed to fetch initial VPC metadata after %d attempts", maxRetries) + return nil +} + +func (c *VPCMetadataController) runRefreshLoop(ctx context.Context, provider cloudtypes.Provider, vpcIndex *kube.VPCIndex) error { + ticker := time.NewTicker(c.cfg.RefreshInterval) + defer ticker.Stop() + + c.log.Infof("starting VPC metadata refresh (interval: %v)", c.cfg.RefreshInterval) + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + err := provider.RefreshMetadata(ctx) + if err != nil { + c.log.Errorf("VPC metadata refresh failed: %v", err) + continue + } + + metadata, err := provider.GetMetadata(ctx) + if err != nil { + c.log.Errorf("VPC metadata loading failed: %v", err) + continue + } + + if err := vpcIndex.Update(metadata); err != nil { + c.log.Errorf("failed to update VPC index: %v", err) + continue + } + + c.log.Debug("VPC metadata refreshed successfully") + } + } +} diff --git a/cmd/controller/controllers/vpc_metadata_controller_test.go b/cmd/controller/controllers/vpc_metadata_controller_test.go new file mode 100644 index 00000000..6ab89ce8 --- /dev/null +++ b/cmd/controller/controllers/vpc_metadata_controller_test.go @@ -0,0 +1,87 @@ +package controllers + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/castai/kvisor/cmd/controller/kube" + "github.com/castai/kvisor/pkg/logging" + "github.com/stretchr/testify/require" + "k8s.io/client-go/kubernetes/fake" +) + +func TestVPCMetadataController(t *testing.T) { + log := logging.NewTestLog() + k8sClient := fake.NewSimpleClientset() + kubeClient := kube.NewClient(log, "agent", "ns", kube.Version{}, k8sClient) + + t.Run("returns nil on cloud provider initialization failure", func(t *testing.T) { + r := require.New(t) + ctx := context.Background() + + cfg := VPCMetadataConfig{ + Enabled: true, + Type: "invalid-provider", + NetworkName: "test-network", + RefreshInterval: 1 * time.Hour, + } + + ctrl := NewVPCMetadataController(log, cfg, kubeClient) + + // Should return nil (no error) on cloud provider initialization failure + err := ctrl.Run(ctx) + r.NoError(err) + }) + + t.Run("stops on context cancellation", func(t *testing.T) { + r := require.New(t) + ctx, cancel := context.WithTimeout(context.Background(), 100*time.Millisecond) + defer cancel() + + cfg := VPCMetadataConfig{ + Enabled: true, + Type: "invalid-provider", + NetworkName: "test-network", + RefreshInterval: 1 * time.Hour, + } + + ctrl := NewVPCMetadataController(log, cfg, kubeClient) + + err := ctrl.Run(ctx) + // Should either be nil (failed to init provider) or context deadline exceeded + if err != nil { + r.True(errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled)) + } + }) + + t.Run("sets default refresh interval", func(t *testing.T) { + r := require.New(t) + + cfg := VPCMetadataConfig{ + Enabled: true, + Type: "gcp", + NetworkName: "test-network", + RefreshInterval: 0, + } + + ctrl := NewVPCMetadataController(log, cfg, kubeClient) + r.Equal(1*time.Hour, ctrl.cfg.RefreshInterval) + }) + + t.Run("uses configured refresh interval", func(t *testing.T) { + r := require.New(t) + + customInterval := 30 * time.Minute + cfg := VPCMetadataConfig{ + Enabled: true, + Type: "gcp", + NetworkName: "test-network", + RefreshInterval: customInterval, + } + + ctrl := NewVPCMetadataController(log, cfg, kubeClient) + r.Equal(customInterval, ctrl.cfg.RefreshInterval) + }) +} diff --git a/cmd/controller/kube/client.go b/cmd/controller/kube/client.go index 4a183b5b..6230dd1a 100644 --- a/cmd/controller/kube/client.go +++ b/cmd/controller/kube/client.go @@ -65,7 +65,8 @@ type Client struct { mu sync.RWMutex kvisorControllerPodSpec *corev1.PodSpec - index *Index + index *Index + vpcIndex *VPCIndex clusterInfo *ClusterInfo @@ -88,11 +89,23 @@ func NewClient( kvisorControllerContainerName: "controller", client: client, index: NewIndex(), - version: version, - ipInfoTTL: 30 * time.Second, + // TODO: set default + vpcIndex: nil, + version: version, + ipInfoTTL: 30 * time.Second, } } +// SetVPCIndex sets the VPC index for enriching external IPs with VPC metadata. +func (i *Client) SetVPCIndex(vpcIndex *VPCIndex) { + i.vpcIndex = vpcIndex +} + +// GetVPCIndex returns the VPC index if available. +func (i *Client) GetVPCIndex() *VPCIndex { + return i.vpcIndex +} + func (c *Client) RegisterHandlers(factory informers.SharedInformerFactory) { informersList := []cache.SharedInformer{ factory.Core().V1().Nodes().Informer(), diff --git a/cmd/controller/kube/index.go b/cmd/controller/kube/index.go index d252b281..15667b85 100644 --- a/cmd/controller/kube/index.go +++ b/cmd/controller/kube/index.go @@ -39,10 +39,12 @@ func (i *Index) addFromPod(pod *corev1.Pod) { owner := i.getPodOwner(pod) node := i.nodesByName[pod.Spec.NodeName] zone := getZone(node) + region := getRegion(node) podInfo := &PodInfo{ - Pod: pod, - Owner: owner, - Zone: zone, + Pod: pod, + Owner: owner, + Zone: zone, + Region: region, } i.pods[pod.UID] = podInfo if !pod.Spec.HostNetwork { @@ -56,6 +58,8 @@ func (i *Index) addFromPod(pod *corev1.Pod) { i.ipsDetails.set(addr, IPInfo{ PodInfo: podInfo, resourceID: pod.UID, + zone: zone, + region: region, }) } } @@ -117,6 +121,8 @@ func (i *Index) addFromNode(v *corev1.Node) { i.ipsDetails.set(addr, IPInfo{ Node: v, resourceID: v.UID, + zone: getZone(v), + region: getRegion(v), }) return } @@ -277,6 +283,14 @@ func getZone(n *corev1.Node) string { return zone } +func getRegion(n *corev1.Node) string { + if n == nil { + return "" + } + region := n.Labels["topology.kubernetes.io/region"] + return region +} + type IPEndpoint struct { ID string Name string @@ -290,6 +304,8 @@ type IPInfo struct { Node *corev1.Node Endpoint *IPEndpoint + zone string + region string ip netip.Addr resourceID types.UID setAt time.Time @@ -297,9 +313,10 @@ type IPInfo struct { } type PodInfo struct { - Pod *corev1.Pod - Owner metav1.OwnerReference - Zone string + Pod *corev1.Pod + Owner metav1.OwnerReference + Zone string + Region string } func pvcKey(namespace, name string) string { diff --git a/cmd/controller/kube/index_ips_details.go b/cmd/controller/kube/index_ips_details.go index 678bb663..4a16edca 100644 --- a/cmd/controller/kube/index_ips_details.go +++ b/cmd/controller/kube/index_ips_details.go @@ -20,6 +20,28 @@ func (m ipsDetails) find(ip netip.Addr) (IPInfo, bool) { if len(list) == 1 { return list[0], true } + + // Despite multiple records we can use ip info if all records have the same zone + // this is needed because in GCP subnets are regional and not zonal, so it means + // that same IP can be spawn in different zones (not relevant for AWS) + if len(list) > 1 { + ipZone := "" + // region will be always the same for same IP + // as unique CIDRs are always assigned to the same region + ipRegion := getRegion(list[0].Node) + for _, ipInfo := range list { + if ipInfo.Node != nil { + nodeZone := getZone(ipInfo.Node) + if ipZone == "" { + ipZone = nodeZone + } else if ipZone != nodeZone { + ipZone = "" + break + } + } + } + return IPInfo{zone: ipZone, region: ipRegion}, true + } return IPInfo{}, false } @@ -39,6 +61,9 @@ func (m ipsDetails) set(ip netip.Addr, info IPInfo) { } // Add new record. + // We can have multiple records for the same IP. + // i.e. multiple pods running on the same node and have hostNetwork: true. + // or old pod removed but new pod with the same IP created while cache not yet cleared list = append(list, info) m[ip] = list } diff --git a/cmd/controller/kube/index_vpc.go b/cmd/controller/kube/index_vpc.go new file mode 100644 index 00000000..b2bd0fa2 --- /dev/null +++ b/cmd/controller/kube/index_vpc.go @@ -0,0 +1,273 @@ +package kube + +import ( + "net" + "net/netip" + "sync" + "time" + + cloudtypes "github.com/castai/kvisor/pkg/cloudprovider/types" + "github.com/castai/kvisor/pkg/logging" + "github.com/elastic/go-freelru" + "github.com/yl2chen/cidranger" +) + +// IPVPCInfo contains network metadata for a specific IP address. +type IPVPCInfo struct { + IP netip.Addr + Zone string // filled only for AWS + Region string + CloudDomain string // filled when IP is public cloud service + ResolvedAt time.Time +} + +// VPCIndex maintains VPC metadata with fast IP-to-VPC lookups using a CIDR tree. +type VPCIndex struct { + log *logging.Logger + + mu sync.RWMutex + metadata *cloudtypes.Metadata + + // CIDR tree for fast IP lookups + cidrTree cidranger.Ranger + + // IP lookup cache (LRU) + ipCache *freelru.SyncedLRU[netip.Addr, *IPVPCInfo] + + refreshInterval time.Duration + + // Last successful refresh + lastRefresh time.Time +} + +// cidrEntry implements cidranger.RangerEntry for storing metadata with CIDR ranges. +type cidrEntry struct { + ipNet net.IPNet + metadata any +} + +func (c *cidrEntry) Network() net.IPNet { + return c.ipNet +} + +// CIDRInfo stores zone/region CIDR information. +type cidrInfo struct { + Zone string + Region string + + // stores GCP/AWS service IP range information + CloudDomain string +} + +// NewVPCIndex creates a new VPC index. +func NewVPCIndex(log *logging.Logger, refreshInterval time.Duration) *VPCIndex { + ipCache, err := freelru.NewSynced[netip.Addr, *IPVPCInfo](10000, func(ip netip.Addr) uint32 { + b := ip.As16() + return uint32(b[0])<<24 | uint32(b[1])<<16 | uint32(b[2])<<8 | uint32(b[3]) + }) + if err != nil { + log.Warnf("failed to create IP cache: %v", err) + // Continue without cache + } + + return &VPCIndex{ + log: log, + cidrTree: cidranger.NewPCTrieRanger(), + ipCache: ipCache, + refreshInterval: refreshInterval, + } +} + +// Update updates the VPC metadata and rebuilds the CIDR tree. +func (vi *VPCIndex) Update(metadata *cloudtypes.Metadata) error { + vi.mu.Lock() + defer vi.mu.Unlock() + + vi.metadata = metadata + vi.lastRefresh = time.Now() + + vi.rebuildCIDRTree() + + // Clear IP cache on metadata update + if vi.ipCache != nil { + vi.ipCache.Purge() + } + + vi.log.Info("VPC index updated") + return nil +} + +// rebuildCIDRTree rebuilds the CIDR tree from metadata. +// Must be called with lock held. +func (vi *VPCIndex) rebuildCIDRTree() { + vi.cidrTree = cidranger.NewPCTrieRanger() + + if vi.metadata == nil { + return + } + + // Index service IP ranges first (lowest priority in lookups) + for _, svcRange := range vi.metadata.ServiceRanges { + for _, cidr := range svcRange.CIRDs { + _, ipNet, err := net.ParseCIDR(cidr.String()) + if err != nil { + vi.log.Warnf("parsing service IP range %s: %v", svcRange, err) + continue + } + entry := &cidrEntry{ + ipNet: *ipNet, + metadata: &cidrInfo{ + CloudDomain: vi.metadata.Domain, + Region: svcRange.Region, + }, + } + if err := vi.cidrTree.Insert(entry); err != nil { + vi.log.Warnf("inserting service IP range: %v", err) + } + } + } + + // Index VPC and subnet CIDRs + for _, vpc := range vi.metadata.VPCs { + for _, cidr := range vpc.CIDRs { + _, ipNet, err := net.ParseCIDR(cidr.String()) + if err != nil { + vi.log.Warnf("parsing VPC CIDR %s: %v", cidr, err) + continue + } + entry := &cidrEntry{ + ipNet: *ipNet, + metadata: &cidrInfo{}, + } + if err := vi.cidrTree.Insert(entry); err != nil { + vi.log.Warnf("inserting VPC CIDR: %v", err) + } + } + + // Index subnet CIDRs + for _, subnet := range vpc.Subnets { + _, ipNet, err := net.ParseCIDR(subnet.CIDR.String()) + if err != nil { + vi.log.Warnf("parsing subnet CIDR %s: %v", subnet.CIDR, err) + continue + } + entry := &cidrEntry{ + ipNet: *ipNet, + metadata: &cidrInfo{ + Zone: subnet.Zone, + Region: subnet.Region, + }, + } + if err := vi.cidrTree.Insert(entry); err != nil { + vi.log.Warnf("inserting subnet CIDR: %v", err) + } + + // Index secondary ranges (GKE alias IPs) + for _, secondary := range subnet.SecondaryRanges { + _, ipNet, err := net.ParseCIDR(secondary.CIDR.String()) + if err != nil { + continue + } + entry := &cidrEntry{ + ipNet: *ipNet, + metadata: &cidrInfo{ + Zone: subnet.Zone, + Region: subnet.Region, + }, + } + if err := vi.cidrTree.Insert(entry); err != nil { + vi.log.Warnf("inserting secondary range CIDR: %v", err) + } + } + } + + // Index peered VPC CIDRs + for _, peer := range vpc.PeeredVPCs { + for _, cidrRange := range peer.Ranges { + _, ipNet, err := net.ParseCIDR(cidrRange.CIDR.String()) + if err != nil { + continue + } + entry := &cidrEntry{ + ipNet: *ipNet, + metadata: &cidrInfo{ + Zone: cidrRange.Zone, + Region: cidrRange.Region, + }, + } + if err := vi.cidrTree.Insert(entry); err != nil { + vi.log.Warnf("inserting peered VPC CIDR: %v", err) + } + } + } + } +} + +// LookupIP looks up VPC metadata for an IP address. +func (vi *VPCIndex) LookupIP(ip netip.Addr) (*IPVPCInfo, bool) { + // Check cache first + if vi.ipCache != nil { + if cached, ok := vi.ipCache.Get(ip); ok { + // Check if cached entry is not older than refresh interval + if time.Since(cached.ResolvedAt) < vi.refreshInterval { + return cached, true + } + } + } + + vi.mu.RLock() + defer vi.mu.RUnlock() + + result := vi.lookupInTree(ip) + if result != nil { + if vi.ipCache != nil { + vi.ipCache.Add(ip, result) + } + return result, true + } + + // Not found - cache negative result too + emptyResult := &IPVPCInfo{ + IP: ip, + ResolvedAt: time.Now(), + } + if vi.ipCache != nil { + vi.ipCache.Add(ip, emptyResult) + } + + return nil, false +} + +// lookupInTree performs the actual CIDR tree lookup. +// Must be called with read lock held. +func (vi *VPCIndex) lookupInTree(ip netip.Addr) *IPVPCInfo { + if vi.cidrTree == nil { + return nil + } + + netIP := net.IP(ip.AsSlice()) + + // Find all containing networks + entries, err := vi.cidrTree.ContainingNetworks(netIP) + if err != nil || len(entries) == 0 { + return nil + } + + // Return most specific match (longest prefix / last in list) + // cidranger returns entries ordered from least to most specific + mostSpecific := entries[len(entries)-1].(*cidrEntry) + + result := &IPVPCInfo{ + IP: ip, + ResolvedAt: time.Now(), + } + + switch meta := mostSpecific.metadata.(type) { + case *cidrInfo: + result.Zone = meta.Zone + result.Region = meta.Region + result.CloudDomain = meta.CloudDomain + } + + return result +} diff --git a/cmd/controller/kube/index_vpc_test.go b/cmd/controller/kube/index_vpc_test.go new file mode 100644 index 00000000..60bc490f --- /dev/null +++ b/cmd/controller/kube/index_vpc_test.go @@ -0,0 +1,396 @@ +package kube + +import ( + "net/netip" + "testing" + "time" + + cloudtypes "github.com/castai/kvisor/pkg/cloudprovider/types" + "github.com/castai/kvisor/pkg/logging" + "github.com/stretchr/testify/require" +) + +func TestVPCIndex(t *testing.T) { + log := logging.NewTestLog() + + t.Run("new VPC index", func(t *testing.T) { + r := require.New(t) + refreshInterval := 1 * time.Hour + + index := NewVPCIndex(log, refreshInterval) + + r.NotNil(index) + r.NotNil(index.cidrTree) + r.NotNil(index.ipCache) + r.Equal(refreshInterval, index.refreshInterval) + }) + + t.Run("update metadata", func(t *testing.T) { + r := require.New(t) + index := NewVPCIndex(log, 1*time.Hour) + + metadata := &cloudtypes.Metadata{ + Domain: "example.com", + VPCs: []cloudtypes.VPC{ + { + ID: "vpc-1", + CIDRs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/16")}, + }, + }, + } + + err := index.Update(metadata) + r.NoError(err) + r.NotNil(index.metadata) + r.Equal(metadata, index.metadata) + r.False(index.lastRefresh.IsZero()) + }) + + t.Run("lookup IP in subnet", func(t *testing.T) { + r := require.New(t) + index := NewVPCIndex(log, 1*time.Hour) + + metadata := &cloudtypes.Metadata{ + VPCs: []cloudtypes.VPC{ + { + ID: "vpc-1", + Subnets: []cloudtypes.Subnet{ + { + ID: "subnet-1", + CIDR: netip.MustParsePrefix("10.0.1.0/24"), + Zone: "us-east-1a", + Region: "us-east-1", + }, + }, + }, + }, + } + + err := index.Update(metadata) + r.NoError(err) + + // Lookup IP in subnet + ip := netip.MustParseAddr("10.0.1.50") + info, found := index.LookupIP(ip) + r.True(found) + r.NotNil(info) + r.Equal("us-east-1a", info.Zone) + r.Equal("us-east-1", info.Region) + r.Equal("", info.CloudDomain) + }) + + t.Run("lookup IP in service range", func(t *testing.T) { + r := require.New(t) + index := NewVPCIndex(log, 1*time.Hour) + + metadata := &cloudtypes.Metadata{ + Domain: "googleapis.com", + ServiceRanges: []cloudtypes.ServiceRanges{ + { + Region: "us-central1", + CIRDs: []netip.Prefix{netip.MustParsePrefix("34.126.0.0/18")}, + }, + }, + } + + err := index.Update(metadata) + r.NoError(err) + + // Lookup IP in service range + ip := netip.MustParseAddr("34.126.10.1") + info, found := index.LookupIP(ip) + r.True(found) + r.NotNil(info) + r.Equal("us-central1", info.Region) + r.Equal("googleapis.com", info.CloudDomain) + }) + + t.Run("lookup IP in secondary range", func(t *testing.T) { + r := require.New(t) + index := NewVPCIndex(log, 1*time.Hour) + + metadata := &cloudtypes.Metadata{ + VPCs: []cloudtypes.VPC{ + { + ID: "vpc-1", + Subnets: []cloudtypes.Subnet{ + { + ID: "subnet-1", + CIDR: netip.MustParsePrefix("10.0.1.0/24"), + Zone: "us-east-1a", + Region: "us-east-1", + SecondaryRanges: []cloudtypes.SecondaryRange{ + { + Name: "pods", + CIDR: netip.MustParsePrefix("10.100.0.0/16"), + }, + }, + }, + }, + }, + }, + } + + err := index.Update(metadata) + r.NoError(err) + + // Lookup IP in secondary range + ip := netip.MustParseAddr("10.100.5.10") + info, found := index.LookupIP(ip) + r.True(found) + r.NotNil(info) + r.Equal("us-east-1a", info.Zone) + r.Equal("us-east-1", info.Region) + }) + + t.Run("lookup IP in peered VPC", func(t *testing.T) { + r := require.New(t) + index := NewVPCIndex(log, 1*time.Hour) + + metadata := &cloudtypes.Metadata{ + VPCs: []cloudtypes.VPC{ + { + ID: "vpc-1", + PeeredVPCs: []cloudtypes.PeeredVPC{ + { + Name: "peered-vpc", + Ranges: []cloudtypes.PeeredVPCRange{ + { + CIDR: netip.MustParsePrefix("192.168.0.0/16"), + Zone: "eu-west-1a", + Region: "eu-west-1", + }, + }, + }, + }, + }, + }, + } + + err := index.Update(metadata) + r.NoError(err) + + // Lookup IP in peered VPC + ip := netip.MustParseAddr("192.168.10.5") + info, found := index.LookupIP(ip) + r.True(found) + r.NotNil(info) + r.Equal("eu-west-1a", info.Zone) + r.Equal("eu-west-1", info.Region) + }) + + t.Run("lookup IP not found", func(t *testing.T) { + r := require.New(t) + index := NewVPCIndex(log, 1*time.Hour) + + metadata := &cloudtypes.Metadata{ + VPCs: []cloudtypes.VPC{ + { + ID: "vpc-1", + Subnets: []cloudtypes.Subnet{ + { + ID: "subnet-1", + CIDR: netip.MustParsePrefix("10.0.1.0/24"), + Zone: "us-east-1a", + Region: "us-east-1", + }, + }, + }, + }, + } + + err := index.Update(metadata) + r.NoError(err) + + // Lookup IP not in any range + ip := netip.MustParseAddr("172.16.5.10") + info, found := index.LookupIP(ip) + r.False(found) + r.Nil(info) + }) + + t.Run("lookup uses cache", func(t *testing.T) { + r := require.New(t) + index := NewVPCIndex(log, 1*time.Hour) + + metadata := &cloudtypes.Metadata{ + VPCs: []cloudtypes.VPC{ + { + ID: "vpc-1", + Subnets: []cloudtypes.Subnet{ + { + ID: "subnet-1", + CIDR: netip.MustParsePrefix("10.0.1.0/24"), + Zone: "us-east-1a", + Region: "us-east-1", + }, + }, + }, + }, + } + + err := index.Update(metadata) + r.NoError(err) + + ip := netip.MustParseAddr("10.0.1.50") + + // First lookup - not cached + info1, found1 := index.LookupIP(ip) + r.True(found1) + r.NotNil(info1) + + // Second lookup - should use cache + info2, found2 := index.LookupIP(ip) + r.True(found2) + r.NotNil(info2) + r.Equal(info1.Zone, info2.Zone) + r.Equal(info1.Region, info2.Region) + r.Equal(info1.ResolvedAt, info2.ResolvedAt) // Same timestamp means from cache + }) + + t.Run("cache invalidated on update", func(t *testing.T) { + r := require.New(t) + index := NewVPCIndex(log, 1*time.Hour) + + metadata1 := &cloudtypes.Metadata{ + VPCs: []cloudtypes.VPC{ + { + ID: "vpc-1", + Subnets: []cloudtypes.Subnet{ + { + ID: "subnet-1", + CIDR: netip.MustParsePrefix("10.0.1.0/24"), + Zone: "us-east-1a", + Region: "us-east-1", + }, + }, + }, + }, + } + + err := index.Update(metadata1) + r.NoError(err) + + ip := netip.MustParseAddr("10.0.1.50") + + // First lookup + info1, found1 := index.LookupIP(ip) + r.True(found1) + r.Equal("us-east-1a", info1.Zone) + + // Update metadata with different zone + metadata2 := &cloudtypes.Metadata{ + VPCs: []cloudtypes.VPC{ + { + ID: "vpc-1", + Subnets: []cloudtypes.Subnet{ + { + ID: "subnet-1", + CIDR: netip.MustParsePrefix("10.0.1.0/24"), + Zone: "us-east-1b", + Region: "us-east-1", + }, + }, + }, + }, + } + + err = index.Update(metadata2) + r.NoError(err) + + // Second lookup - should reflect new metadata + info2, found2 := index.LookupIP(ip) + r.True(found2) + r.Equal("us-east-1b", info2.Zone) + }) + + t.Run("most specific match wins", func(t *testing.T) { + r := require.New(t) + index := NewVPCIndex(log, 1*time.Hour) + + metadata := &cloudtypes.Metadata{ + VPCs: []cloudtypes.VPC{ + { + ID: "vpc-1", + CIDRs: []netip.Prefix{netip.MustParsePrefix("10.0.0.0/16")}, + Subnets: []cloudtypes.Subnet{ + { + ID: "subnet-1", + CIDR: netip.MustParsePrefix("10.0.1.0/24"), + Zone: "us-east-1a", + Region: "us-east-1", + }, + }, + }, + }, + } + + err := index.Update(metadata) + r.NoError(err) + + // IP is in both VPC CIDR (10.0.0.0/16) and subnet CIDR (10.0.1.0/24) + // Should return subnet info (more specific) + ip := netip.MustParseAddr("10.0.1.50") + info, found := index.LookupIP(ip) + r.True(found) + r.NotNil(info) + r.Equal("us-east-1a", info.Zone) + r.Equal("us-east-1", info.Region) + }) + + t.Run("empty metadata", func(t *testing.T) { + r := require.New(t) + index := NewVPCIndex(log, 1*time.Hour) + + metadata := &cloudtypes.Metadata{} + err := index.Update(metadata) + r.NoError(err) + + ip := netip.MustParseAddr("10.0.1.50") + info, found := index.LookupIP(ip) + r.False(found) + r.Nil(info) + }) + + t.Run("cache expiry", func(t *testing.T) { + r := require.New(t) + shortRefresh := 100 * time.Millisecond + index := NewVPCIndex(log, shortRefresh) + + metadata := &cloudtypes.Metadata{ + VPCs: []cloudtypes.VPC{ + { + ID: "vpc-1", + Subnets: []cloudtypes.Subnet{ + { + ID: "subnet-1", + CIDR: netip.MustParsePrefix("10.0.1.0/24"), + Zone: "us-east-1a", + Region: "us-east-1", + }, + }, + }, + }, + } + + err := index.Update(metadata) + r.NoError(err) + + ip := netip.MustParseAddr("10.0.1.50") + + // First lookup - populates cache + info1, found1 := index.LookupIP(ip) + r.True(found1) + r.NotNil(info1) + timestamp1 := info1.ResolvedAt + + // Wait for cache to expire + time.Sleep(150 * time.Millisecond) + + // Second lookup - should get fresh result + info2, found2 := index.LookupIP(ip) + r.True(found2) + r.NotNil(info2) + r.True(info2.ResolvedAt.After(timestamp1), "Second lookup should have newer timestamp") + }) +} diff --git a/cmd/controller/kube/server.go b/cmd/controller/kube/server.go index 4876a8dd..ce1a5931 100644 --- a/cmd/controller/kube/server.go +++ b/cmd/controller/kube/server.go @@ -33,6 +33,7 @@ func (s *Server) GetIPInfo(ctx context.Context, req *kubepb.GetIPInfoRequest) (* res := &kubepb.IPInfo{} if info.Node != nil { res.Zone = getZone(info.Node) + res.NodeName = info.Node.GetName() } if podInfo := info.PodInfo; podInfo != nil { res.PodUid = string(podInfo.Pod.UID) @@ -74,35 +75,69 @@ func (s *Server) GetIPsInfo(ctx context.Context, req *kubepb.GetIPsInfoRequest) res := &kubepb.GetIPsInfoResponse{ List: make([]*kubepb.IPInfo, 0, len(infos)), } - for _, info := range infos { + for _, ip := range ips { + shouldIncludeIP := false pbInfo := &kubepb.IPInfo{ - Ip: info.ip.AsSlice(), + Ip: ip.AsSlice(), } - if info.Node != nil { - pbInfo.Zone = getZone(info.Node) - } - if podInfo := info.PodInfo; podInfo != nil { - pbInfo.PodUid = string(podInfo.Pod.UID) - pbInfo.PodName = podInfo.Pod.Name - pbInfo.Namespace = podInfo.Pod.Namespace - pbInfo.WorkloadUid = string(podInfo.Owner.UID) - pbInfo.WorkloadName = podInfo.Owner.Name - pbInfo.WorkloadKind = podInfo.Owner.Kind - pbInfo.Zone = podInfo.Zone - pbInfo.NodeName = podInfo.Pod.Spec.NodeName + + // step 1: check IPs from kube client first + info, ok := s.client.GetIPInfo(ip) + if ok { + shouldIncludeIP = true + pbInfo.Zone = info.zone + pbInfo.Region = info.region + + if info.Node != nil { + pbInfo.Zone = getZone(info.Node) + pbInfo.Region = getRegion(info.Node) + pbInfo.NodeName = info.Node.GetName() + } + if podInfo := info.PodInfo; podInfo != nil { + pbInfo.PodUid = string(podInfo.Pod.UID) + pbInfo.PodName = podInfo.Pod.Name + pbInfo.Namespace = podInfo.Pod.Namespace + pbInfo.WorkloadUid = string(podInfo.Owner.UID) + pbInfo.WorkloadName = podInfo.Owner.Name + pbInfo.WorkloadKind = podInfo.Owner.Kind + pbInfo.Zone = podInfo.Zone + pbInfo.Region = podInfo.Region + pbInfo.NodeName = podInfo.Pod.Spec.NodeName + } + if svc := info.Service; svc != nil { + pbInfo.WorkloadKind = "Service" + pbInfo.WorkloadName = svc.Name + pbInfo.Namespace = svc.Namespace + } + if e := info.Endpoint; e != nil { + pbInfo.WorkloadKind = "Endpoint" + pbInfo.WorkloadName = e.Name + pbInfo.Namespace = e.Namespace + } } - if svc := info.Service; svc != nil { - pbInfo.WorkloadKind = "Service" - pbInfo.WorkloadName = svc.Name - pbInfo.Namespace = svc.Namespace + + // step 2: check IPs from VPC index + if s.client.vpcIndex != nil { + vpcIPInfo, ok := s.client.vpcIndex.LookupIP(ip) + if ok { + shouldIncludeIP = true + if pbInfo.Zone == "" && vpcIPInfo.Zone != "" { + pbInfo.Zone = vpcIPInfo.Zone + } + if pbInfo.Region == "" && vpcIPInfo.Region != "" { + pbInfo.Region = vpcIPInfo.Region + } + if pbInfo.CloudDomain == "" && vpcIPInfo.CloudDomain != "" { + pbInfo.CloudDomain = vpcIPInfo.CloudDomain + } + } } - if e := info.Endpoint; e != nil { - pbInfo.WorkloadKind = "Endpoint" - pbInfo.WorkloadName = e.Name - pbInfo.Namespace = e.Namespace + + if shouldIncludeIP { + res.List = append(res.List, pbInfo) } - res.List = append(res.List, pbInfo) } + return res, nil } @@ -111,9 +146,14 @@ func (s *Server) GetClusterInfo(ctx context.Context, req *kubepb.GetClusterInfoR if err != nil || info == nil { return nil, status.Errorf(codes.NotFound, "cluster info not found: %v", err) } + var otherCidr []string + if s.client.vpcIndex != nil { + otherCidr = s.client.vpcIndex.metadata.ListKnownCIDRs() + } return &kubepb.GetClusterInfoResponse{ PodsCidr: info.PodCidr, ServiceCidr: info.ServiceCidr, + OtherCidr: otherCidr, }, nil } @@ -128,6 +168,7 @@ func (s *Server) GetPod(ctx context.Context, req *kubepb.GetPodRequest) (*kubepb WorkloadName: info.Owner.Name, WorkloadKind: toProtoWorkloadKind(info.Owner.Kind), Zone: info.Zone, + Region: info.Region, NodeName: info.Pod.Spec.NodeName, }, }, nil diff --git a/cmd/controller/main.go b/cmd/controller/main.go index 48a16bdd..4463491b 100644 --- a/cmd/controller/main.go +++ b/cmd/controller/main.go @@ -41,7 +41,11 @@ var ( chartVersion = pflag.String("chart-version", "", "Helm chart version") - cloudProvider = pflag.String("cloud-provider", "", "Cloud provider in which the cluster is running") + cloudProvider = pflag.String("cloud-provider", "", "Cloud provider in which the cluster is running") + cloudProviderVPCSyncEnabled = pflag.Bool("cloud-provider-vpc-sync-enabled", false, "Enable cloud provider VPC metadata sync") + cloudProviderVPCName = pflag.String("cloud-provider-vpc-name", "", "Cloud provider VPC name in which the cluster is running") + cloudProviderVPCSyncInterval = pflag.Duration("cloud-provider-vpc-sync-interval", 1*time.Hour, "Cloud provider VPC sync interval") + cloudProviderGCPProjectID = pflag.String("cloud-provider-gcp-project-id", "", "Cloud provider VPC syncer in which the cluster is running") castaiSecretRefName = pflag.String("castai-secret-ref-name", "castai-kvisor", "CASTAI k8s secret name") castaiConfigSyncDuration = pflag.Duration("castai-config-sync-duration", 1*time.Minute, "CASTAI remote config sync duration") @@ -188,6 +192,13 @@ func main() { AgentConfig: config.AgentConfig{ Enabled: *agentEnabled, }, + CloudProvider: controllers.VPCMetadataConfig{ + Enabled: *cloudProviderVPCSyncEnabled, + NetworkName: *cloudProviderVPCName, + RefreshInterval: *cloudProviderVPCSyncInterval, + Type: *cloudProvider, + GCPProjectID: *cloudProviderGCPProjectID, + }, }, clientset, ) diff --git a/go.mod b/go.mod index ebeb5260..4bab9e6a 100644 --- a/go.mod +++ b/go.mod @@ -47,6 +47,7 @@ require ( github.com/testcontainers/testcontainers-go v0.35.0 github.com/tklauser/go-sysconf v0.3.15 github.com/vishvananda/netns v0.0.5 + github.com/yl2chen/cidranger v1.0.2 go.uber.org/atomic v1.11.0 go.uber.org/goleak v1.3.0 golang.org/x/net v0.46.0 diff --git a/go.sum b/go.sum index e5acb1c8..aab698c9 100644 --- a/go.sum +++ b/go.sum @@ -1894,6 +1894,8 @@ github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZ github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E= github.com/yashtewari/glob-intersection v0.2.0 h1:8iuHdN88yYuCzCdjt0gDe+6bAhUwBeEWqThExu54RFg= github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= +github.com/yl2chen/cidranger v1.0.2 h1:lbOWZVCG1tCRX4u24kuM1Tb4nHqWkDxwLdoS+SevawU= +github.com/yl2chen/cidranger v1.0.2/go.mod h1:9U1yz7WPYDwf0vpNWFaeRh0bjwz5RVgRy/9UEQfHl0g= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= diff --git a/pkg/net/iputil/iputilt.go b/pkg/net/iputil/iputilt.go index 7afe56da..b68dbcac 100644 --- a/pkg/net/iputil/iputilt.go +++ b/pkg/net/iputil/iputilt.go @@ -10,3 +10,11 @@ func IsPrivateNetwork(ip netip.Addr) bool { ip.IsLinkLocalMulticast() || ip.IsInterfaceLocalMulticast() } + +func IsLocalNetwork(ip netip.Addr) bool { + return ip.IsLoopback() || + // https://www.ibm.com/docs/en/zvm/7.2.0?topic=addresses-multicast-scope + ip.IsLinkLocalUnicast() || + ip.IsLinkLocalMulticast() || + ip.IsInterfaceLocalMulticast() +} From 72f6763113dde7712ac80b0761b43dedf36d7282 Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Thu, 15 Jan 2026 16:56:49 +0200 Subject: [PATCH 05/22] Gather Kubernetes data about pod volumes and add K8s context to filesystem metrics --- api/v1/kube/kube_api.pb.go | 4 ++-- api/v1/kube/kube_api_grpc.pb.go | 4 ++-- api/v1/runtime/common.pb.go | 4 ++-- api/v1/runtime/runtime_agent_api.pb.go | 4 ++-- api/v1/runtime/runtime_agent_api_grpc.pb.go | 4 ++-- cmd/agent/daemon/app/app.go | 16 ++++++---------- .../daemon/pipeline/storage_info_provider.go | 4 ++-- 7 files changed, 18 insertions(+), 22 deletions(-) diff --git a/api/v1/kube/kube_api.pb.go b/api/v1/kube/kube_api.pb.go index c50cd6e3..0b39915c 100644 --- a/api/v1/kube/kube_api.pb.go +++ b/api/v1/kube/kube_api.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.9 -// protoc v6.32.0 +// protoc-gen-go v1.36.11 +// protoc v6.33.0 // source: api/v1/kube/kube_api.proto package v1 diff --git a/api/v1/kube/kube_api_grpc.pb.go b/api/v1/kube/kube_api_grpc.pb.go index cd325a48..b4a99541 100644 --- a/api/v1/kube/kube_api_grpc.pb.go +++ b/api/v1/kube/kube_api_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v6.32.0 +// - protoc-gen-go-grpc v1.6.0 +// - protoc v6.33.0 // source: api/v1/kube/kube_api.proto package v1 diff --git a/api/v1/runtime/common.pb.go b/api/v1/runtime/common.pb.go index 7f6a3df3..39f38365 100644 --- a/api/v1/runtime/common.pb.go +++ b/api/v1/runtime/common.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.9 -// protoc v6.32.0 +// protoc-gen-go v1.36.11 +// protoc v6.33.0 // source: api/v1/runtime/common.proto package v1 diff --git a/api/v1/runtime/runtime_agent_api.pb.go b/api/v1/runtime/runtime_agent_api.pb.go index 8153ff21..6fcc4dfa 100644 --- a/api/v1/runtime/runtime_agent_api.pb.go +++ b/api/v1/runtime/runtime_agent_api.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.4 -// protoc v5.29.3 +// protoc-gen-go v1.36.11 +// protoc v6.33.0 // source: api/v1/runtime/runtime_agent_api.proto package v1 diff --git a/api/v1/runtime/runtime_agent_api_grpc.pb.go b/api/v1/runtime/runtime_agent_api_grpc.pb.go index e3b15e8c..7b3f2ea3 100644 --- a/api/v1/runtime/runtime_agent_api_grpc.pb.go +++ b/api/v1/runtime/runtime_agent_api_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.5.1 -// - protoc v6.32.0 +// - protoc-gen-go-grpc v1.6.0 +// - protoc v6.33.0 // source: api/v1/runtime/runtime_agent_api.proto package v1 diff --git a/cmd/agent/daemon/app/app.go b/cmd/agent/daemon/app/app.go index a0d609a7..4a15fb74 100644 --- a/cmd/agent/daemon/app/app.go +++ b/cmd/agent/daemon/app/app.go @@ -268,16 +268,9 @@ func (a *App) Run(ctx context.Context) error { return fmt.Errorf("failed to create metrics client: %w", err) } - go func() { - if err = metricsClient.Start(ctx); err != nil { - log.Warnf("metric client failed with:%v", err) - } - }() - - blockDeviceMetricsWriter, filesystemMetricsWriter, nodeStatsSummaryWriter, podVolumeMetricsWriter, err = setupStorageMetrics(metricsClient) - if err != nil { - return fmt.Errorf("failed to setup storage metrics: %w", err) - } + blockDeviceMetricsWriter, filesystemMetricsWriter, nodeStatsSummaryWriter, podVolumeMetricsWriter, err = setupStorageMetrics(metricsClient) + if err != nil { + return fmt.Errorf("failed to setup storage metrics: %w", err) } storageInfoProvider, err = pipeline.NewStorageInfoProvider(log, kubeAPIServerClient, cfg.Castai.ClusterID) @@ -599,6 +592,7 @@ func setupStorageMetrics(metricsClient custommetrics.MetricClient) (pipeline.Blo } return blockDeviceMetrics, filesystemMetrics, nodeStatsSummaryWriter, podVolumeMetricsWriter, nil +<<<<<<< HEAD } // setupLoggingStorageMetrics creates logging-based writers for testing without ClickHouse @@ -607,6 +601,8 @@ func setupLoggingStorageMetrics(log *logging.Logger) (pipeline.BlockDeviceMetric pipeline.NewLoggingFilesystemMetricsWriter(log), pipeline.NewLoggingNodeStatsSummaryWriter(log), pipeline.NewLoggingK8sPodVolumeMetricsWriter(log) +======= +>>>>>>> e47cf1e (Gather Kubernetes data about pod volumes and add K8s context to filesystem metrics) } // resolveMetricsAddr transforms kvisor.* addresses to telemetry.* addresses diff --git a/cmd/agent/daemon/pipeline/storage_info_provider.go b/cmd/agent/daemon/pipeline/storage_info_provider.go index 505636ea..70dedbce 100644 --- a/cmd/agent/daemon/pipeline/storage_info_provider.go +++ b/cmd/agent/daemon/pipeline/storage_info_provider.go @@ -114,8 +114,8 @@ type K8sPodVolumeMetric struct { PVName *string `avro:"pv_name"` StorageClass *string `avro:"storage_class"` CSIDriver *string `avro:"csi_driver"` - VolumeMode string `avro:"volume_mode"` // "Filesystem" or "Block" - DevicePath *string `avro:"device_path"` // For block volumes: container's volumeDevices[].devicePath + VolumeMode string `avro:"volume_mode"` // "Filesystem" or "Block" + DevicePath *string `avro:"device_path"` // For block volumes: container's volumeDevices[].devicePath Timestamp time.Time `avro:"ts"` } From fdf6ca9a2c5089c238b115dfde82dc4bf842c56f Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Sun, 18 Jan 2026 15:47:50 +0200 Subject: [PATCH 06/22] Fix merge --- cmd/agent/daemon/app/app.go | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/cmd/agent/daemon/app/app.go b/cmd/agent/daemon/app/app.go index 4a15fb74..dfe8adea 100644 --- a/cmd/agent/daemon/app/app.go +++ b/cmd/agent/daemon/app/app.go @@ -268,9 +268,10 @@ func (a *App) Run(ctx context.Context) error { return fmt.Errorf("failed to create metrics client: %w", err) } - blockDeviceMetricsWriter, filesystemMetricsWriter, nodeStatsSummaryWriter, podVolumeMetricsWriter, err = setupStorageMetrics(metricsClient) - if err != nil { - return fmt.Errorf("failed to setup storage metrics: %w", err) + blockDeviceMetricsWriter, filesystemMetricsWriter, nodeStatsSummaryWriter, podVolumeMetricsWriter, err = setupStorageMetrics(metricsClient) + if err != nil { + return fmt.Errorf("failed to setup storage metrics: %w", err) + } } storageInfoProvider, err = pipeline.NewStorageInfoProvider(log, kubeAPIServerClient, cfg.Castai.ClusterID) @@ -592,7 +593,6 @@ func setupStorageMetrics(metricsClient custommetrics.MetricClient) (pipeline.Blo } return blockDeviceMetrics, filesystemMetrics, nodeStatsSummaryWriter, podVolumeMetricsWriter, nil -<<<<<<< HEAD } // setupLoggingStorageMetrics creates logging-based writers for testing without ClickHouse @@ -601,8 +601,6 @@ func setupLoggingStorageMetrics(log *logging.Logger) (pipeline.BlockDeviceMetric pipeline.NewLoggingFilesystemMetricsWriter(log), pipeline.NewLoggingNodeStatsSummaryWriter(log), pipeline.NewLoggingK8sPodVolumeMetricsWriter(log) -======= ->>>>>>> e47cf1e (Gather Kubernetes data about pod volumes and add K8s context to filesystem metrics) } // resolveMetricsAddr transforms kvisor.* addresses to telemetry.* addresses From 0eb39bada1ab1d0c48715196122fc189fc7d046f Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Sun, 18 Jan 2026 16:44:54 +0200 Subject: [PATCH 07/22] Add debugging logs --- cmd/agent/daemon/pipeline/controller.go | 24 ++++++++++++------- cmd/agent/daemon/pipeline/storage_pipeline.go | 1 + cmd/controller/kube/server.go | 2 ++ 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/cmd/agent/daemon/pipeline/controller.go b/cmd/agent/daemon/pipeline/controller.go index 4510b649..094e3213 100644 --- a/cmd/agent/daemon/pipeline/controller.go +++ b/cmd/agent/daemon/pipeline/controller.go @@ -134,6 +134,14 @@ func NewK8sPodVolumeMetricsWriter(metricsClient custommetrics.MetricClient) (K8s // Logging writers for testing without ClickHouse +// ptrToStr safely dereferences a pointer for logging, returning "" if nil +func ptrToStr[T any](p *T) string { + if p == nil { + return "" + } + return fmt.Sprintf("%v", *p) +} + type LoggingBlockDeviceMetricsWriter struct { log *logging.Logger } @@ -144,8 +152,8 @@ func NewLoggingBlockDeviceMetricsWriter(log *logging.Logger) BlockDeviceMetricsW func (w *LoggingBlockDeviceMetricsWriter) Write(metrics ...BlockDeviceMetric) error { for _, m := range metrics { - w.log.Infof("[BlockDevice] name=%s path=%s size=%v type=%s partition_of=%s read_iops=%.2f write_iops=%.2f", - m.Name, m.Path, m.SizeBytes, m.DiskType, m.PartitionOf, m.ReadIOPS, m.WriteIOPS) + w.log.Infof("[BlockDevice] name=%s path=%s size=%s type=%s partition_of=%s read_iops=%.2f write_iops=%.2f", + m.Name, m.Path, ptrToStr(m.SizeBytes), m.DiskType, m.PartitionOf, m.ReadIOPS, m.WriteIOPS) } return nil } @@ -160,8 +168,8 @@ func NewLoggingFilesystemMetricsWriter(log *logging.Logger) FilesystemMetricsWri func (w *LoggingFilesystemMetricsWriter) Write(metrics ...FilesystemMetric) error { for _, m := range metrics { - w.log.Infof("[Filesystem] mount=%s type=%s total=%v used=%v namespace=%v pod=%v pvc=%v", - m.MountPoint, m.Type, m.TotalBytes, m.UsedBytes, m.Namespace, m.PodName, m.PVCName) + w.log.Infof("[Filesystem] mount=%s type=%s total=%s used=%s namespace=%s pod=%s pvc=%s", + m.MountPoint, m.Type, ptrToStr(m.TotalBytes), ptrToStr(m.UsedBytes), ptrToStr(m.Namespace), ptrToStr(m.PodName), ptrToStr(m.PVCName)) } return nil } @@ -176,8 +184,8 @@ func NewLoggingNodeStatsSummaryWriter(log *logging.Logger) NodeStatsSummaryWrite func (w *LoggingNodeStatsSummaryWriter) Write(metrics ...NodeStatsSummaryMetric) error { for _, m := range metrics { - w.log.Infof("[NodeStats] node=%s image_fs_size=%v image_fs_used=%v container_fs_size=%v container_fs_used=%v", - m.NodeName, m.ImageFsSizeBytes, m.ImageFsUsedBytes, m.ContainerFsSizeBytes, m.ContainerFsUsedBytes) + w.log.Infof("[NodeStats] node=%s image_fs_size=%s image_fs_used=%s container_fs_size=%s container_fs_used=%s", + m.NodeName, ptrToStr(m.ImageFsSizeBytes), ptrToStr(m.ImageFsUsedBytes), ptrToStr(m.ContainerFsSizeBytes), ptrToStr(m.ContainerFsUsedBytes)) } return nil } @@ -192,8 +200,8 @@ func NewLoggingK8sPodVolumeMetricsWriter(log *logging.Logger) K8sPodVolumeMetric func (w *LoggingK8sPodVolumeMetricsWriter) Write(metrics ...K8sPodVolumeMetric) error { for _, m := range metrics { - w.log.Infof("[PodVolume] ns=%s pod=%s volume=%s mount=%s mode=%s pvc=%v pv=%v storage_class=%v device_path=%v", - m.Namespace, m.PodName, m.VolumeName, m.MountPath, m.VolumeMode, m.PVCName, m.PVName, m.StorageClass, m.DevicePath) + w.log.Infof("[PodVolume] ns=%s pod=%s volume=%s mount=%s mode=%s pvc=%s pv=%s storage_class=%s device_path=%s", + m.Namespace, m.PodName, m.VolumeName, m.MountPath, m.VolumeMode, ptrToStr(m.PVCName), ptrToStr(m.PVName), ptrToStr(m.StorageClass), ptrToStr(m.DevicePath)) } return nil } diff --git a/cmd/agent/daemon/pipeline/storage_pipeline.go b/cmd/agent/daemon/pipeline/storage_pipeline.go index e018f9d3..56c2ee7c 100644 --- a/cmd/agent/daemon/pipeline/storage_pipeline.go +++ b/cmd/agent/daemon/pipeline/storage_pipeline.go @@ -98,6 +98,7 @@ func (c *Controller) processPodVolumeMetrics(ctx context.Context) error { } if len(metrics) == 0 { + c.log.Info("no pod volume metrics collected from controller (empty response)") return nil } diff --git a/cmd/controller/kube/server.go b/cmd/controller/kube/server.go index ce1a5931..9da0110e 100644 --- a/cmd/controller/kube/server.go +++ b/cmd/controller/kube/server.go @@ -206,6 +206,7 @@ func (s *Server) GetPodVolumes(ctx context.Context, req *kubepb.GetPodVolumesReq } pods := s.client.GetPodsOnNode(req.NodeName) + s.client.log.Infof("GetPodVolumes: found %d pods on node %s", len(pods), req.NodeName) var volumes []*kubepb.PodVolumeInfo for _, podInfo := range pods { @@ -277,6 +278,7 @@ func (s *Server) GetPodVolumes(ctx context.Context, req *kubepb.GetPodVolumesReq } } + s.client.log.Infof("GetPodVolumes: returning %d volumes for node %s", len(volumes), req.NodeName) return &kubepb.GetPodVolumesResponse{ Volumes: volumes, }, nil From 9a4d2816c99efb48852687f19662ca3fcd593475 Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Sun, 18 Jan 2026 22:51:38 +0200 Subject: [PATCH 08/22] Filter pod volume metrics to only include PVC-backed volumes and exclude kube-system --- .../daemon/pipeline/storage_info_provider.go | 15 +++++++++- cmd/controller/kube/server.go | 28 ++++++++++++------- 2 files changed, 32 insertions(+), 11 deletions(-) diff --git a/cmd/agent/daemon/pipeline/storage_info_provider.go b/cmd/agent/daemon/pipeline/storage_info_provider.go index 70dedbce..44a03cda 100644 --- a/cmd/agent/daemon/pipeline/storage_info_provider.go +++ b/cmd/agent/daemon/pipeline/storage_info_provider.go @@ -323,12 +323,14 @@ func (s *SysfsStorageInfoProvider) CollectPodVolumeMetrics(ctx context.Context) return nil, fmt.Errorf("kube client is not initialized") } + s.log.Infof("CollectPodVolumeMetrics: requesting pod volumes for node %s", s.nodeName) resp, err := s.kubeClient.GetPodVolumes(ctx, &kubepb.GetPodVolumesRequest{ NodeName: s.nodeName, }, grpc.UseCompressor(gzip.Name)) if err != nil { return nil, fmt.Errorf("failed to get pod volumes for %s: %w", s.nodeName, err) } + s.log.Infof("CollectPodVolumeMetrics: received %d volumes from controller", len(resp.Volumes)) nodeTemplate, err := s.getNodeTemplate() if err != nil { @@ -409,6 +411,9 @@ func podVolumeKey(podUID, volumeName string) string { } // buildPodVolumeLookupMap fetches pod volumes from controller and builds a lookup map +// The map is keyed by both: +// - podUID/volumeName (for emptyDir, configMap, etc.) +// - podUID/pvName (for CSI volumes where the mount path contains the PV name) func (s *SysfsStorageInfoProvider) buildPodVolumeLookupMap() map[string]*kubepb.PodVolumeInfo { if s.kubeClient == nil { return nil @@ -425,10 +430,18 @@ func (s *SysfsStorageInfoProvider) buildPodVolumeLookupMap() map[string]*kubepb. return nil } - volumeMap := make(map[string]*kubepb.PodVolumeInfo, len(resp.Volumes)) + volumeMap := make(map[string]*kubepb.PodVolumeInfo, len(resp.Volumes)*2) for _, v := range resp.Volumes { + // Primary key: podUID/volumeName key := podVolumeKey(v.PodUid, v.VolumeName) volumeMap[key] = v + + // Secondary key: podUID/pvName (for CSI volumes) + // CSI mount paths use the PV name as the directory name, not the volume name + if v.PvName != "" { + pvKey := podVolumeKey(v.PodUid, v.PvName) + volumeMap[pvKey] = v + } } return volumeMap diff --git a/cmd/controller/kube/server.go b/cmd/controller/kube/server.go index 9da0110e..d3340d88 100644 --- a/cmd/controller/kube/server.go +++ b/cmd/controller/kube/server.go @@ -215,6 +215,11 @@ func (s *Server) GetPodVolumes(ctx context.Context, req *kubepb.GetPodVolumesReq continue } + // Skip kube-system namespace - system pods don't have user-relevant PVCs + if pod.Namespace == "kube-system" { + continue + } + // Build a map of volume name -> volume for quick lookup volumeMap := make(map[string]corev1.Volume) for _, vol := range pod.Spec.Volumes { @@ -222,6 +227,7 @@ func (s *Server) GetPodVolumes(ctx context.Context, req *kubepb.GetPodVolumesReq } // Iterate through containers and their volume mounts (filesystem volumes) + // Only include PVC-backed volumes - skip ephemeral volumes like configMaps, secrets, serviceAccount tokens for _, container := range pod.Spec.Containers { for _, mount := range container.VolumeMounts { vol, exists := volumeMap[mount.Name] @@ -229,6 +235,11 @@ func (s *Server) GetPodVolumes(ctx context.Context, req *kubepb.GetPodVolumesReq continue } + // Only include PVC-backed volumes + if vol.PersistentVolumeClaim == nil { + continue + } + volInfo := &kubepb.PodVolumeInfo{ Namespace: pod.Namespace, PodName: pod.Name, @@ -241,11 +252,7 @@ func (s *Server) GetPodVolumes(ctx context.Context, req *kubepb.GetPodVolumesReq VolumeMode: "Filesystem", } - // If this is a PVC-backed volume, enrich with PVC/PV details - if vol.PersistentVolumeClaim != nil { - s.enrichPVCDetails(volInfo, vol, pod.Namespace) - } - + s.enrichPVCDetails(volInfo, vol, pod.Namespace) volumes = append(volumes, volInfo) } @@ -256,6 +263,11 @@ func (s *Server) GetPodVolumes(ctx context.Context, req *kubepb.GetPodVolumesReq continue } + // Only include PVC-backed volumes + if vol.PersistentVolumeClaim == nil { + continue + } + volInfo := &kubepb.PodVolumeInfo{ Namespace: pod.Namespace, PodName: pod.Name, @@ -268,11 +280,7 @@ func (s *Server) GetPodVolumes(ctx context.Context, req *kubepb.GetPodVolumesReq VolumeMode: "Block", } - // If this is a PVC-backed volume, enrich with PVC/PV details - if vol.PersistentVolumeClaim != nil { - s.enrichPVCDetails(volInfo, vol, pod.Namespace) - } - + s.enrichPVCDetails(volInfo, vol, pod.Namespace) volumes = append(volumes, volInfo) } } From 870343cf4dddc0f5752a6c51d834b1e9b63ca05d Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Sun, 18 Jan 2026 22:52:13 +0200 Subject: [PATCH 09/22] Add RBAC permissions for PVCs and PVs in controller ClusterRole --- charts/kvisor/templates/controller.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/charts/kvisor/templates/controller.yaml b/charts/kvisor/templates/controller.yaml index d0c48717..fbeca56c 100644 --- a/charts/kvisor/templates/controller.yaml +++ b/charts/kvisor/templates/controller.yaml @@ -257,6 +257,8 @@ rules: - namespaces - services - endpoints + - persistentvolumeclaims + - persistentvolumes verbs: - get - list From 44e25ed134b33738213396f1b3b2e5993da8dcec Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Sun, 18 Jan 2026 23:15:53 +0200 Subject: [PATCH 10/22] Add CSI Volume handle to K8sPodVolumeMetric --- cmd/agent/daemon/pipeline/controller.go | 4 ++-- cmd/agent/daemon/pipeline/storage_info_provider.go | 8 ++++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/cmd/agent/daemon/pipeline/controller.go b/cmd/agent/daemon/pipeline/controller.go index 094e3213..68dbf83c 100644 --- a/cmd/agent/daemon/pipeline/controller.go +++ b/cmd/agent/daemon/pipeline/controller.go @@ -200,8 +200,8 @@ func NewLoggingK8sPodVolumeMetricsWriter(log *logging.Logger) K8sPodVolumeMetric func (w *LoggingK8sPodVolumeMetricsWriter) Write(metrics ...K8sPodVolumeMetric) error { for _, m := range metrics { - w.log.Infof("[PodVolume] ns=%s pod=%s volume=%s mount=%s mode=%s pvc=%s pv=%s storage_class=%s device_path=%s", - m.Namespace, m.PodName, m.VolumeName, m.MountPath, m.VolumeMode, ptrToStr(m.PVCName), ptrToStr(m.PVName), ptrToStr(m.StorageClass), ptrToStr(m.DevicePath)) + w.log.Infof("[PodVolume] ns=%s pod=%s volume=%s mount=%s mode=%s pvc=%s pv=%s storage_class=%s csi_volume_handle=%s device_path=%s", + m.Namespace, m.PodName, m.VolumeName, m.MountPath, m.VolumeMode, ptrToStr(m.PVCName), ptrToStr(m.PVName), ptrToStr(m.StorageClass), ptrToStr(m.CSIVolumeHandle), ptrToStr(m.DevicePath)) } return nil } diff --git a/cmd/agent/daemon/pipeline/storage_info_provider.go b/cmd/agent/daemon/pipeline/storage_info_provider.go index 44a03cda..de5c91bd 100644 --- a/cmd/agent/daemon/pipeline/storage_info_provider.go +++ b/cmd/agent/daemon/pipeline/storage_info_provider.go @@ -114,8 +114,9 @@ type K8sPodVolumeMetric struct { PVName *string `avro:"pv_name"` StorageClass *string `avro:"storage_class"` CSIDriver *string `avro:"csi_driver"` - VolumeMode string `avro:"volume_mode"` // "Filesystem" or "Block" - DevicePath *string `avro:"device_path"` // For block volumes: container's volumeDevices[].devicePath + CSIVolumeHandle *string `avro:"csi_volume_handle"` // For EBS: vol-xxx, can be joined with block_device.ebs_volume_id + VolumeMode string `avro:"volume_mode"` // "Filesystem" or "Block" + DevicePath *string `avro:"device_path"` // For block volumes: container's volumeDevices[].devicePath Timestamp time.Time `avro:"ts"` } @@ -372,6 +373,9 @@ func (s *SysfsStorageInfoProvider) CollectPodVolumeMetrics(ctx context.Context) if v.CsiDriver != "" { metric.CSIDriver = &v.CsiDriver } + if v.CsiVolumeHandle != "" { + metric.CSIVolumeHandle = &v.CsiVolumeHandle + } if v.DevicePath != "" { metric.DevicePath = &v.DevicePath } From 8fe64a0cacca03355b621dcdea6a9fbffc61ec01 Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Sun, 18 Jan 2026 23:19:29 +0200 Subject: [PATCH 11/22] Deduplicate filesystem metrics --- .../daemon/pipeline/storage_info_provider.go | 20 ++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/cmd/agent/daemon/pipeline/storage_info_provider.go b/cmd/agent/daemon/pipeline/storage_info_provider.go index de5c91bd..b8332c80 100644 --- a/cmd/agent/daemon/pipeline/storage_info_provider.go +++ b/cmd/agent/daemon/pipeline/storage_info_provider.go @@ -396,13 +396,31 @@ func (s *SysfsStorageInfoProvider) BuildFilesystemMetrics(timestamp time.Time) ( // Build pod volume lookup map for enrichment podVolumeMap := s.buildPodVolumeLookupMap() - filesystemMetrics := make([]FilesystemMetric, 0, len(mounts)) + // Deduplicate by major:minor device ID + // When multiple mounts point to the same device (bind mounts), prefer paths + // matching /var/lib/kubelet/pods because they can be enriched with pod metadata + seenDevices := make(map[string]FilesystemMetric) for _, mount := range mounts { metric, err := s.buildFilesystemMetric(mount, timestamp, podVolumeMap) if err != nil { s.log.Warnf("skipping filesystem metric for %s: %v", mount.MountPoint, err) continue } + + deviceKey := mount.MajorMinor + if existing, seen := seenDevices[deviceKey]; seen { + // Prefer the mount that has pod metadata (was enriched) + if metric.PodUID != nil && existing.PodUID == nil { + seenDevices[deviceKey] = metric + } + // Otherwise keep the first one we saw + } else { + seenDevices[deviceKey] = metric + } + } + + filesystemMetrics := make([]FilesystemMetric, 0, len(seenDevices)) + for _, metric := range seenDevices { filesystemMetrics = append(filesystemMetrics, metric) } From 7ff8bac850393607f13418f8f21c5851bcc2b9b0 Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Sun, 18 Jan 2026 23:38:44 +0200 Subject: [PATCH 12/22] Add warning log about PV not found in index for PVC --- cmd/controller/kube/server.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/controller/kube/server.go b/cmd/controller/kube/server.go index d3340d88..e037e563 100644 --- a/cmd/controller/kube/server.go +++ b/cmd/controller/kube/server.go @@ -323,6 +323,8 @@ func (s *Server) enrichPVCDetails(volInfo *kubepb.PodVolumeInfo, vol corev1.Volu volInfo.CsiDriver = pv.Spec.CSI.Driver volInfo.CsiVolumeHandle = pv.Spec.CSI.VolumeHandle } + } else { + s.client.log.Warnf("PV %s not found in index for PVC %s/%s", pvc.Spec.VolumeName, namespace, pvcName) } } } From d88cf61c39d45c799280626253d8ff35006e9402 Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Sun, 18 Jan 2026 23:58:23 +0200 Subject: [PATCH 13/22] Improve logs --- cmd/agent/daemon/pipeline/controller.go | 4 ++-- cmd/controller/kube/server.go | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/cmd/agent/daemon/pipeline/controller.go b/cmd/agent/daemon/pipeline/controller.go index 68dbf83c..6ff38455 100644 --- a/cmd/agent/daemon/pipeline/controller.go +++ b/cmd/agent/daemon/pipeline/controller.go @@ -168,8 +168,8 @@ func NewLoggingFilesystemMetricsWriter(log *logging.Logger) FilesystemMetricsWri func (w *LoggingFilesystemMetricsWriter) Write(metrics ...FilesystemMetric) error { for _, m := range metrics { - w.log.Infof("[Filesystem] mount=%s type=%s total=%s used=%s namespace=%s pod=%s pvc=%s", - m.MountPoint, m.Type, ptrToStr(m.TotalBytes), ptrToStr(m.UsedBytes), ptrToStr(m.Namespace), ptrToStr(m.PodName), ptrToStr(m.PVCName)) + w.log.Infof("[Filesystem] mount=%s devices=%v type=%s total=%s used=%s namespace=%s pod=%s pvc=%s", + m.MountPoint, m.Devices, m.Type, ptrToStr(m.TotalBytes), ptrToStr(m.UsedBytes), ptrToStr(m.Namespace), ptrToStr(m.PodName), ptrToStr(m.PVCName)) } return nil } diff --git a/cmd/controller/kube/server.go b/cmd/controller/kube/server.go index e037e563..2b8ce6de 100644 --- a/cmd/controller/kube/server.go +++ b/cmd/controller/kube/server.go @@ -322,6 +322,10 @@ func (s *Server) enrichPVCDetails(volInfo *kubepb.PodVolumeInfo, vol corev1.Volu if pv.Spec.CSI != nil { volInfo.CsiDriver = pv.Spec.CSI.Driver volInfo.CsiVolumeHandle = pv.Spec.CSI.VolumeHandle + s.client.log.Infof("PV %s has CSI driver=%s handle=%s", pv.Name, pv.Spec.CSI.Driver, pv.Spec.CSI.VolumeHandle) + } else { + // Log what volume source the PV has (could be AWS EBS native, not CSI) + s.client.log.Warnf("PV %s has no CSI spec (spec: %+v)", pv.Name, pv.Spec.PersistentVolumeSource) } } else { s.client.log.Warnf("PV %s not found in index for PVC %s/%s", pvc.Spec.VolumeName, namespace, pvcName) From 45d41761b72db8e67e3db6ebadaeb9b0cbf5df6e Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Mon, 19 Jan 2026 00:29:29 +0200 Subject: [PATCH 14/22] Improve logs --- cmd/agent/daemon/pipeline/controller.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/cmd/agent/daemon/pipeline/controller.go b/cmd/agent/daemon/pipeline/controller.go index 6ff38455..d45ce97b 100644 --- a/cmd/agent/daemon/pipeline/controller.go +++ b/cmd/agent/daemon/pipeline/controller.go @@ -152,8 +152,9 @@ func NewLoggingBlockDeviceMetricsWriter(log *logging.Logger) BlockDeviceMetricsW func (w *LoggingBlockDeviceMetricsWriter) Write(metrics ...BlockDeviceMetric) error { for _, m := range metrics { - w.log.Infof("[BlockDevice] name=%s path=%s size=%s type=%s partition_of=%s read_iops=%.2f write_iops=%.2f", - m.Name, m.Path, ptrToStr(m.SizeBytes), m.DiskType, m.PartitionOf, m.ReadIOPS, m.WriteIOPS) + w.log.Infof("[BlockDevice] name=%s path=%s size=%s type=%s partition_of=%s holders=%v is_virtual=%t raid_level=%s read_iops=%.2f write_iops=%.2f read_throughput=%.0f write_throughput=%.0f read_latency_ms=%.2f write_latency_ms=%.2f util=%.2f", + m.Name, m.Path, ptrToStr(m.SizeBytes), m.DiskType, m.PartitionOf, m.Holders, m.IsVirtual, m.RaidLevel, + m.ReadIOPS, m.WriteIOPS, m.ReadThroughputBytes, m.WriteThroughputBytes, m.ReadLatencyMs, m.WriteLatencyMs, m.Utilization) } return nil } @@ -168,8 +169,9 @@ func NewLoggingFilesystemMetricsWriter(log *logging.Logger) FilesystemMetricsWri func (w *LoggingFilesystemMetricsWriter) Write(metrics ...FilesystemMetric) error { for _, m := range metrics { - w.log.Infof("[Filesystem] mount=%s devices=%v type=%s total=%s used=%s namespace=%s pod=%s pvc=%s", - m.MountPoint, m.Devices, m.Type, ptrToStr(m.TotalBytes), ptrToStr(m.UsedBytes), ptrToStr(m.Namespace), ptrToStr(m.PodName), ptrToStr(m.PVCName)) + w.log.Infof("[Filesystem] mount=%s devices=%v type=%s options=%v total=%s used=%s total_inodes=%s used_inodes=%s namespace=%s pod=%s pod_uid=%s pvc=%s pv=%s storage_class=%s", + m.MountPoint, m.Devices, m.Type, m.Options, ptrToStr(m.TotalBytes), ptrToStr(m.UsedBytes), ptrToStr(m.TotalInodes), ptrToStr(m.UsedInodes), + ptrToStr(m.Namespace), ptrToStr(m.PodName), ptrToStr(m.PodUID), ptrToStr(m.PVCName), ptrToStr(m.PVName), ptrToStr(m.StorageClass)) } return nil } @@ -200,8 +202,9 @@ func NewLoggingK8sPodVolumeMetricsWriter(log *logging.Logger) K8sPodVolumeMetric func (w *LoggingK8sPodVolumeMetricsWriter) Write(metrics ...K8sPodVolumeMetric) error { for _, m := range metrics { - w.log.Infof("[PodVolume] ns=%s pod=%s volume=%s mount=%s mode=%s pvc=%s pv=%s storage_class=%s csi_volume_handle=%s device_path=%s", - m.Namespace, m.PodName, m.VolumeName, m.MountPath, m.VolumeMode, ptrToStr(m.PVCName), ptrToStr(m.PVName), ptrToStr(m.StorageClass), ptrToStr(m.CSIVolumeHandle), ptrToStr(m.DevicePath)) + w.log.Infof("[PodVolume] ns=%s pod=%s pod_uid=%s controller=%s/%s container=%s volume=%s mount=%s mode=%s pvc=%s requested_size=%s pv=%s storage_class=%s csi_driver=%s csi_volume_handle=%s device_path=%s", + m.Namespace, m.PodName, m.PodUID, m.ControllerKind, m.ControllerName, m.ContainerName, m.VolumeName, m.MountPath, m.VolumeMode, + ptrToStr(m.PVCName), ptrToStr(m.RequestedSizeBytes), ptrToStr(m.PVName), ptrToStr(m.StorageClass), ptrToStr(m.CSIDriver), ptrToStr(m.CSIVolumeHandle), ptrToStr(m.DevicePath)) } return nil } From 448fd2b99e35580591df036fd1f0f41b77826628 Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Mon, 19 Jan 2026 11:32:16 +0200 Subject: [PATCH 15/22] Simplify FilesystemMetric and add in-tree volume source support --- cmd/agent/daemon/pipeline/controller.go | 4 +- .../daemon/pipeline/storage_info_provider.go | 30 +++--------- cmd/controller/kube/server.go | 46 ++++++++++++++++--- cmd/controller/kube/server_test.go | 41 +++++++++++++++++ 4 files changed, 90 insertions(+), 31 deletions(-) diff --git a/cmd/agent/daemon/pipeline/controller.go b/cmd/agent/daemon/pipeline/controller.go index d45ce97b..2bc57637 100644 --- a/cmd/agent/daemon/pipeline/controller.go +++ b/cmd/agent/daemon/pipeline/controller.go @@ -169,9 +169,9 @@ func NewLoggingFilesystemMetricsWriter(log *logging.Logger) FilesystemMetricsWri func (w *LoggingFilesystemMetricsWriter) Write(metrics ...FilesystemMetric) error { for _, m := range metrics { - w.log.Infof("[Filesystem] mount=%s devices=%v type=%s options=%v total=%s used=%s total_inodes=%s used_inodes=%s namespace=%s pod=%s pod_uid=%s pvc=%s pv=%s storage_class=%s", + w.log.Infof("[Filesystem] mount=%s devices=%v type=%s options=%v total=%s used=%s total_inodes=%s used_inodes=%s pv=%s", m.MountPoint, m.Devices, m.Type, m.Options, ptrToStr(m.TotalBytes), ptrToStr(m.UsedBytes), ptrToStr(m.TotalInodes), ptrToStr(m.UsedInodes), - ptrToStr(m.Namespace), ptrToStr(m.PodName), ptrToStr(m.PodUID), ptrToStr(m.PVCName), ptrToStr(m.PVName), ptrToStr(m.StorageClass)) + ptrToStr(m.PVName)) } return nil } diff --git a/cmd/agent/daemon/pipeline/storage_info_provider.go b/cmd/agent/daemon/pipeline/storage_info_provider.go index b8332c80..1601c7d7 100644 --- a/cmd/agent/daemon/pipeline/storage_info_provider.go +++ b/cmd/agent/daemon/pipeline/storage_info_provider.go @@ -77,13 +77,8 @@ type FilesystemMetric struct { UsedInodes *int64 `avro:"used_inodes"` Timestamp time.Time `avro:"ts"` - // Pod/PVC metadata (nil for node-level filesystems) - Namespace *string `avro:"namespace"` - PodName *string `avro:"pod_name"` - PodUID *string `avro:"pod_uid"` - PVCName *string `avro:"pvc_name"` - PVName *string `avro:"pv_name"` - StorageClass *string `avro:"storage_class"` + // PV name for joining with K8sPodVolumeMetric (nil for node-level filesystems) + PVName *string `avro:"pv_name"` } // NodeStatsSummaryMetric represents node-level filesystem statistics from kubelet @@ -409,8 +404,8 @@ func (s *SysfsStorageInfoProvider) BuildFilesystemMetrics(timestamp time.Time) ( deviceKey := mount.MajorMinor if existing, seen := seenDevices[deviceKey]; seen { - // Prefer the mount that has pod metadata (was enriched) - if metric.PodUID != nil && existing.PodUID == nil { + // Prefer the mount that has PV metadata (was enriched with K8s info) + if metric.PVName != nil && existing.PVName == nil { seenDevices[deviceKey] = metric } // Otherwise keep the first one we saw @@ -502,22 +497,11 @@ func (s *SysfsStorageInfoProvider) buildFilesystemMetric(mount mountInfo, timest Timestamp: timestamp, } - // Check if this is a pod volume mount and enrich with pod metadata + // Check if this is a pod volume mount and enrich with PV name for joining if volInfo := ParseVolumeMountPath(mount.MountPoint); volInfo != nil && podVolumeMap != nil { key := podVolumeKey(volInfo.PodUID, volInfo.VolumeName) - if pv, ok := podVolumeMap[key]; ok { - metric.Namespace = &pv.Namespace - metric.PodName = &pv.PodName - metric.PodUID = &pv.PodUid - if pv.PvcName != "" { - metric.PVCName = &pv.PvcName - } - if pv.PvName != "" { - metric.PVName = &pv.PvName - } - if pv.StorageClass != "" { - metric.StorageClass = &pv.StorageClass - } + if pv, ok := podVolumeMap[key]; ok && pv.PvName != "" { + metric.PVName = &pv.PvName } } diff --git a/cmd/controller/kube/server.go b/cmd/controller/kube/server.go index 2b8ce6de..63a046e8 100644 --- a/cmd/controller/kube/server.go +++ b/cmd/controller/kube/server.go @@ -3,6 +3,7 @@ package kube import ( "context" "net/netip" + "strings" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -318,14 +319,23 @@ func (s *Server) enrichPVCDetails(volInfo *kubepb.PodVolumeInfo, vol corev1.Volu volInfo.PvName = pvc.Spec.VolumeName if pv, found := s.client.GetPVByName(pvc.Spec.VolumeName); found { - // Get CSI details if available + // Get volume source details - supports both CSI and in-tree provisioners if pv.Spec.CSI != nil { + // CSI volume (modern) volInfo.CsiDriver = pv.Spec.CSI.Driver - volInfo.CsiVolumeHandle = pv.Spec.CSI.VolumeHandle - s.client.log.Infof("PV %s has CSI driver=%s handle=%s", pv.Name, pv.Spec.CSI.Driver, pv.Spec.CSI.VolumeHandle) - } else { - // Log what volume source the PV has (could be AWS EBS native, not CSI) - s.client.log.Warnf("PV %s has no CSI spec (spec: %+v)", pv.Name, pv.Spec.PersistentVolumeSource) + volInfo.CsiVolumeHandle = extractVolumeID(pv.Spec.CSI.VolumeHandle) + } else if pv.Spec.AWSElasticBlockStore != nil { + // In-tree AWS EBS provisioner (gp2 storage class) + volInfo.CsiDriver = "kubernetes.io/aws-ebs" + volInfo.CsiVolumeHandle = pv.Spec.AWSElasticBlockStore.VolumeID + } else if pv.Spec.GCEPersistentDisk != nil { + // In-tree GCE PD provisioner + volInfo.CsiDriver = "kubernetes.io/gce-pd" + volInfo.CsiVolumeHandle = pv.Spec.GCEPersistentDisk.PDName + } else if pv.Spec.AzureDisk != nil { + // In-tree Azure Disk provisioner + volInfo.CsiDriver = "kubernetes.io/azure-disk" + volInfo.CsiVolumeHandle = pv.Spec.AzureDisk.DiskName } } else { s.client.log.Warnf("PV %s not found in index for PVC %s/%s", pvc.Spec.VolumeName, namespace, pvcName) @@ -333,6 +343,30 @@ func (s *Server) enrichPVCDetails(volInfo *kubepb.PodVolumeInfo, vol corev1.Volu } } +// extractVolumeID extracts the volume/disk name from a CSI volume handle. +// Different CSI drivers use different path formats: +// - GCP: projects//zones//disks/ +// - AWS: vol-xxx → vol-xxx (no change) +// - Azure: /subscriptions/.../providers/Microsoft.Compute/disks/ +func extractVolumeID(csiVolumeHandle string) string { + // GCP format: projects//zones//disks/ + if strings.Contains(csiVolumeHandle, "/disks/") { + parts := strings.Split(csiVolumeHandle, "/disks/") + if len(parts) == 2 { + return parts[1] + } + } + // Azure format: /subscriptions/.../providers/Microsoft.Compute/disks/ + if strings.Contains(csiVolumeHandle, "Microsoft.Compute/disks/") { + parts := strings.Split(csiVolumeHandle, "Microsoft.Compute/disks/") + if len(parts) == 2 { + return parts[1] + } + } + // AWS and others: return as-is + return csiVolumeHandle +} + func toProtoWorkloadKind(kind string) kubepb.WorkloadKind { switch kind { case "Deployment": diff --git a/cmd/controller/kube/server_test.go b/cmd/controller/kube/server_test.go index 7083246d..7b0dfc50 100644 --- a/cmd/controller/kube/server_test.go +++ b/cmd/controller/kube/server_test.go @@ -237,3 +237,44 @@ func TestServer(t *testing.T) { r.Equal([]string{"10.8.0.0/14", "fd00::/48"}, resp.ServiceCidr) }) } + +func TestExtractVolumeID(t *testing.T) { + tests := []struct { + name string + input string + expected string + }{ + { + name: "GCP CSI volume handle", + input: "projects/engineering-test-353509/zones/us-central1-a/disks/pvc-2f7e7ae2-bbe2-410b-a109-571ce3298b98", + expected: "pvc-2f7e7ae2-bbe2-410b-a109-571ce3298b98", + }, + { + name: "AWS EBS CSI volume handle", + input: "vol-08d551180685f8611", + expected: "vol-08d551180685f8611", + }, + { + name: "Azure CSI volume handle", + input: "/subscriptions/abc123/resourceGroups/rg/providers/Microsoft.Compute/disks/pvc-xxx-yyy-zzz", + expected: "pvc-xxx-yyy-zzz", + }, + { + name: "Simple volume ID", + input: "simple-volume-id", + expected: "simple-volume-id", + }, + { + name: "Empty string", + input: "", + expected: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := extractVolumeID(tt.input) + require.Equal(t, tt.expected, result) + }) + } +} From 520df3a781d9d28709273a21dc4164379d1cbf5a Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Mon, 19 Jan 2026 12:11:52 +0200 Subject: [PATCH 16/22] Remove logging only mode --- cmd/agent/daemon/app/app.go | 28 +++------ cmd/agent/daemon/pipeline/controller.go | 77 ------------------------- 2 files changed, 7 insertions(+), 98 deletions(-) diff --git a/cmd/agent/daemon/app/app.go b/cmd/agent/daemon/app/app.go index dfe8adea..8e9341ac 100644 --- a/cmd/agent/daemon/app/app.go +++ b/cmd/agent/daemon/app/app.go @@ -258,20 +258,14 @@ func (a *App) Run(ctx context.Context) error { var podVolumeMetricsWriter pipeline.K8sPodVolumeMetricsWriter var storageInfoProvider pipeline.StorageInfoProvider if cfg.Stats.StorageEnabled { - // Check if logging-only mode is enabled (for testing without ClickHouse) - if os.Getenv("STORAGE_METRICS_LOG_ONLY") == "true" { - log.Info("using logging writers for storage metrics (STORAGE_METRICS_LOG_ONLY=true)") - blockDeviceMetricsWriter, filesystemMetricsWriter, nodeStatsSummaryWriter, podVolumeMetricsWriter = setupLoggingStorageMetrics(log) - } else { - metricsClient, err := createMetricsClient(cfg) - if err != nil { - return fmt.Errorf("failed to create metrics client: %w", err) - } + metricsClient, err := createMetricsClient(cfg) + if err != nil { + return fmt.Errorf("failed to create metrics client: %w", err) + } - blockDeviceMetricsWriter, filesystemMetricsWriter, nodeStatsSummaryWriter, podVolumeMetricsWriter, err = setupStorageMetrics(metricsClient) - if err != nil { - return fmt.Errorf("failed to setup storage metrics: %w", err) - } + blockDeviceMetricsWriter, filesystemMetricsWriter, nodeStatsSummaryWriter, podVolumeMetricsWriter, err = setupStorageMetrics(metricsClient) + if err != nil { + return fmt.Errorf("failed to setup storage metrics: %w", err) } storageInfoProvider, err = pipeline.NewStorageInfoProvider(log, kubeAPIServerClient, cfg.Castai.ClusterID) @@ -595,14 +589,6 @@ func setupStorageMetrics(metricsClient custommetrics.MetricClient) (pipeline.Blo return blockDeviceMetrics, filesystemMetrics, nodeStatsSummaryWriter, podVolumeMetricsWriter, nil } -// setupLoggingStorageMetrics creates logging-based writers for testing without ClickHouse -func setupLoggingStorageMetrics(log *logging.Logger) (pipeline.BlockDeviceMetricsWriter, pipeline.FilesystemMetricsWriter, pipeline.NodeStatsSummaryWriter, pipeline.K8sPodVolumeMetricsWriter) { - return pipeline.NewLoggingBlockDeviceMetricsWriter(log), - pipeline.NewLoggingFilesystemMetricsWriter(log), - pipeline.NewLoggingNodeStatsSummaryWriter(log), - pipeline.NewLoggingK8sPodVolumeMetricsWriter(log) -} - // resolveMetricsAddr transforms kvisor.* addresses to telemetry.* addresses func resolveMetricsAddr(addr string) string { const ( diff --git a/cmd/agent/daemon/pipeline/controller.go b/cmd/agent/daemon/pipeline/controller.go index 2bc57637..9a99105d 100644 --- a/cmd/agent/daemon/pipeline/controller.go +++ b/cmd/agent/daemon/pipeline/controller.go @@ -132,83 +132,6 @@ func NewK8sPodVolumeMetricsWriter(metricsClient custommetrics.MetricClient) (K8s ) } -// Logging writers for testing without ClickHouse - -// ptrToStr safely dereferences a pointer for logging, returning "" if nil -func ptrToStr[T any](p *T) string { - if p == nil { - return "" - } - return fmt.Sprintf("%v", *p) -} - -type LoggingBlockDeviceMetricsWriter struct { - log *logging.Logger -} - -func NewLoggingBlockDeviceMetricsWriter(log *logging.Logger) BlockDeviceMetricsWriter { - return &LoggingBlockDeviceMetricsWriter{log: log.WithField("writer", "block_device")} -} - -func (w *LoggingBlockDeviceMetricsWriter) Write(metrics ...BlockDeviceMetric) error { - for _, m := range metrics { - w.log.Infof("[BlockDevice] name=%s path=%s size=%s type=%s partition_of=%s holders=%v is_virtual=%t raid_level=%s read_iops=%.2f write_iops=%.2f read_throughput=%.0f write_throughput=%.0f read_latency_ms=%.2f write_latency_ms=%.2f util=%.2f", - m.Name, m.Path, ptrToStr(m.SizeBytes), m.DiskType, m.PartitionOf, m.Holders, m.IsVirtual, m.RaidLevel, - m.ReadIOPS, m.WriteIOPS, m.ReadThroughputBytes, m.WriteThroughputBytes, m.ReadLatencyMs, m.WriteLatencyMs, m.Utilization) - } - return nil -} - -type LoggingFilesystemMetricsWriter struct { - log *logging.Logger -} - -func NewLoggingFilesystemMetricsWriter(log *logging.Logger) FilesystemMetricsWriter { - return &LoggingFilesystemMetricsWriter{log: log.WithField("writer", "filesystem")} -} - -func (w *LoggingFilesystemMetricsWriter) Write(metrics ...FilesystemMetric) error { - for _, m := range metrics { - w.log.Infof("[Filesystem] mount=%s devices=%v type=%s options=%v total=%s used=%s total_inodes=%s used_inodes=%s pv=%s", - m.MountPoint, m.Devices, m.Type, m.Options, ptrToStr(m.TotalBytes), ptrToStr(m.UsedBytes), ptrToStr(m.TotalInodes), ptrToStr(m.UsedInodes), - ptrToStr(m.PVName)) - } - return nil -} - -type LoggingNodeStatsSummaryWriter struct { - log *logging.Logger -} - -func NewLoggingNodeStatsSummaryWriter(log *logging.Logger) NodeStatsSummaryWriter { - return &LoggingNodeStatsSummaryWriter{log: log.WithField("writer", "node_stats")} -} - -func (w *LoggingNodeStatsSummaryWriter) Write(metrics ...NodeStatsSummaryMetric) error { - for _, m := range metrics { - w.log.Infof("[NodeStats] node=%s image_fs_size=%s image_fs_used=%s container_fs_size=%s container_fs_used=%s", - m.NodeName, ptrToStr(m.ImageFsSizeBytes), ptrToStr(m.ImageFsUsedBytes), ptrToStr(m.ContainerFsSizeBytes), ptrToStr(m.ContainerFsUsedBytes)) - } - return nil -} - -type LoggingK8sPodVolumeMetricsWriter struct { - log *logging.Logger -} - -func NewLoggingK8sPodVolumeMetricsWriter(log *logging.Logger) K8sPodVolumeMetricsWriter { - return &LoggingK8sPodVolumeMetricsWriter{log: log.WithField("writer", "pod_volume")} -} - -func (w *LoggingK8sPodVolumeMetricsWriter) Write(metrics ...K8sPodVolumeMetric) error { - for _, m := range metrics { - w.log.Infof("[PodVolume] ns=%s pod=%s pod_uid=%s controller=%s/%s container=%s volume=%s mount=%s mode=%s pvc=%s requested_size=%s pv=%s storage_class=%s csi_driver=%s csi_volume_handle=%s device_path=%s", - m.Namespace, m.PodName, m.PodUID, m.ControllerKind, m.ControllerName, m.ContainerName, m.VolumeName, m.MountPath, m.VolumeMode, - ptrToStr(m.PVCName), ptrToStr(m.RequestedSizeBytes), ptrToStr(m.PVName), ptrToStr(m.StorageClass), ptrToStr(m.CSIDriver), ptrToStr(m.CSIVolumeHandle), ptrToStr(m.DevicePath)) - } - return nil -} - func NewController( log *logging.Logger, cfg Config, From 38926cdbb50b1f8b92a10a50a7398c4296366d5d Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Mon, 19 Jan 2026 12:17:03 +0200 Subject: [PATCH 17/22] Use existing context --- cmd/agent/daemon/pipeline/controller_test.go | 2 +- cmd/agent/daemon/pipeline/storage_info_provider.go | 10 +++++----- cmd/agent/daemon/pipeline/storage_pipeline.go | 6 +++--- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/cmd/agent/daemon/pipeline/controller_test.go b/cmd/agent/daemon/pipeline/controller_test.go index 02f11c71..43bd0fe1 100644 --- a/cmd/agent/daemon/pipeline/controller_test.go +++ b/cmd/agent/daemon/pipeline/controller_test.go @@ -1323,7 +1323,7 @@ func (m *mockNodeStatsSummaryWriter) Write(metrics ...NodeStatsSummaryMetric) er type mockStorageInfoProvider struct{} -func (m *mockStorageInfoProvider) BuildFilesystemMetrics(timestamp time.Time) ([]FilesystemMetric, error) { +func (m *mockStorageInfoProvider) BuildFilesystemMetrics(ctx context.Context, timestamp time.Time) ([]FilesystemMetric, error) { return []FilesystemMetric{ { NodeName: "test-node", diff --git a/cmd/agent/daemon/pipeline/storage_info_provider.go b/cmd/agent/daemon/pipeline/storage_info_provider.go index 1601c7d7..e76068eb 100644 --- a/cmd/agent/daemon/pipeline/storage_info_provider.go +++ b/cmd/agent/daemon/pipeline/storage_info_provider.go @@ -121,7 +121,7 @@ type storageMetricsState struct { } type StorageInfoProvider interface { - BuildFilesystemMetrics(timestamp time.Time) ([]FilesystemMetric, error) + BuildFilesystemMetrics(ctx context.Context, timestamp time.Time) ([]FilesystemMetric, error) BuildBlockDeviceMetrics(timestamp time.Time) ([]BlockDeviceMetric, error) CollectNodeStatsSummary(ctx context.Context) (*NodeStatsSummaryMetric, error) CollectPodVolumeMetrics(ctx context.Context) ([]K8sPodVolumeMetric, error) @@ -381,7 +381,7 @@ func (s *SysfsStorageInfoProvider) CollectPodVolumeMetrics(ctx context.Context) return metrics, nil } -func (s *SysfsStorageInfoProvider) BuildFilesystemMetrics(timestamp time.Time) ([]FilesystemMetric, error) { +func (s *SysfsStorageInfoProvider) BuildFilesystemMetrics(ctx context.Context, timestamp time.Time) ([]FilesystemMetric, error) { // Read mount information from /proc/1/mountinfo mounts, err := readMountInfo("/proc/1/mountinfo") if err != nil { @@ -389,7 +389,7 @@ func (s *SysfsStorageInfoProvider) BuildFilesystemMetrics(timestamp time.Time) ( } // Build pod volume lookup map for enrichment - podVolumeMap := s.buildPodVolumeLookupMap() + podVolumeMap := s.buildPodVolumeLookupMap(ctx) // Deduplicate by major:minor device ID // When multiple mounts point to the same device (bind mounts), prefer paths @@ -431,12 +431,12 @@ func podVolumeKey(podUID, volumeName string) string { // The map is keyed by both: // - podUID/volumeName (for emptyDir, configMap, etc.) // - podUID/pvName (for CSI volumes where the mount path contains the PV name) -func (s *SysfsStorageInfoProvider) buildPodVolumeLookupMap() map[string]*kubepb.PodVolumeInfo { +func (s *SysfsStorageInfoProvider) buildPodVolumeLookupMap(ctx context.Context) map[string]*kubepb.PodVolumeInfo { if s.kubeClient == nil { return nil } - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() resp, err := s.kubeClient.GetPodVolumes(ctx, &kubepb.GetPodVolumesRequest{ diff --git a/cmd/agent/daemon/pipeline/storage_pipeline.go b/cmd/agent/daemon/pipeline/storage_pipeline.go index 56c2ee7c..4b5d616d 100644 --- a/cmd/agent/daemon/pipeline/storage_pipeline.go +++ b/cmd/agent/daemon/pipeline/storage_pipeline.go @@ -35,7 +35,7 @@ func (c *Controller) collectStorageMetrics(ctx context.Context) { c.log.Errorf("failed to collect block device metrics: %v", err) } - if err := c.processFilesystemMetrics(timestamp); err != nil { + if err := c.processFilesystemMetrics(ctx, timestamp); err != nil { c.log.Errorf("failed to collect filesystem metrics: %v", err) } @@ -65,12 +65,12 @@ func (c *Controller) processBlockDeviceMetrics(timestamp time.Time) error { return nil } -func (c *Controller) processFilesystemMetrics(timestamp time.Time) error { +func (c *Controller) processFilesystemMetrics(ctx context.Context, timestamp time.Time) error { if c.filesystemMetricsWriter == nil { return fmt.Errorf("filesystem metrics writer not initialized") } - fsMetrics, err := c.storageInfoProvider.BuildFilesystemMetrics(timestamp) + fsMetrics, err := c.storageInfoProvider.BuildFilesystemMetrics(ctx, timestamp) if err != nil { return fmt.Errorf("failed to collect filesystem metrics: %w", err) } From f0254aed8c29304e9dbd89ced4eb6655106121bc Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Mon, 19 Jan 2026 16:37:18 +0200 Subject: [PATCH 18/22] Use local tables --- cmd/agent/daemon/pipeline/controller.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/agent/daemon/pipeline/controller.go b/cmd/agent/daemon/pipeline/controller.go index 9a99105d..e22f2c7c 100644 --- a/cmd/agent/daemon/pipeline/controller.go +++ b/cmd/agent/daemon/pipeline/controller.go @@ -103,7 +103,7 @@ type K8sPodVolumeMetricsWriter interface { func NewBlockDeviceMetricsWriter(metricsClient custommetrics.MetricClient) (BlockDeviceMetricsWriter, error) { return custommetrics.NewMetric[BlockDeviceMetric]( metricsClient, - custommetrics.WithCollectionName[BlockDeviceMetric]("storage_block_device_metrics_v2"), + custommetrics.WithCollectionName[BlockDeviceMetric]("storage_block_device_metrics_v2_local"), custommetrics.WithSkipTimestamp[BlockDeviceMetric](), ) } @@ -111,7 +111,7 @@ func NewBlockDeviceMetricsWriter(metricsClient custommetrics.MetricClient) (Bloc func NewFilesystemMetricsWriter(metricsClient custommetrics.MetricClient) (FilesystemMetricsWriter, error) { return custommetrics.NewMetric[FilesystemMetric]( metricsClient, - custommetrics.WithCollectionName[FilesystemMetric]("storage_filesystem_metrics_v2"), + custommetrics.WithCollectionName[FilesystemMetric]("storage_filesystem_metrics_v2_local"), custommetrics.WithSkipTimestamp[FilesystemMetric](), ) } @@ -119,7 +119,7 @@ func NewFilesystemMetricsWriter(metricsClient custommetrics.MetricClient) (Files func NewNodeStatsSummaryWriter(metricsClient custommetrics.MetricClient) (NodeStatsSummaryWriter, error) { return custommetrics.NewMetric[NodeStatsSummaryMetric]( metricsClient, - custommetrics.WithCollectionName[NodeStatsSummaryMetric]("storage_node_metrics"), + custommetrics.WithCollectionName[NodeStatsSummaryMetric]("storage_node_metrics_local"), custommetrics.WithSkipTimestamp[NodeStatsSummaryMetric](), ) } @@ -127,7 +127,7 @@ func NewNodeStatsSummaryWriter(metricsClient custommetrics.MetricClient) (NodeSt func NewK8sPodVolumeMetricsWriter(metricsClient custommetrics.MetricClient) (K8sPodVolumeMetricsWriter, error) { return custommetrics.NewMetric[K8sPodVolumeMetric]( metricsClient, - custommetrics.WithCollectionName[K8sPodVolumeMetric]("k8s_pod_volume"), + custommetrics.WithCollectionName[K8sPodVolumeMetric]("k8s_pod_volume_metrics_local"), custommetrics.WithSkipTimestamp[K8sPodVolumeMetric](), ) } From 0f84814a64f046d1e8dfff4b79f4dd349822351a Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Mon, 19 Jan 2026 17:19:32 +0200 Subject: [PATCH 19/22] Remove local suffix --- cmd/agent/daemon/pipeline/controller.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/agent/daemon/pipeline/controller.go b/cmd/agent/daemon/pipeline/controller.go index e22f2c7c..25d22b1d 100644 --- a/cmd/agent/daemon/pipeline/controller.go +++ b/cmd/agent/daemon/pipeline/controller.go @@ -103,7 +103,7 @@ type K8sPodVolumeMetricsWriter interface { func NewBlockDeviceMetricsWriter(metricsClient custommetrics.MetricClient) (BlockDeviceMetricsWriter, error) { return custommetrics.NewMetric[BlockDeviceMetric]( metricsClient, - custommetrics.WithCollectionName[BlockDeviceMetric]("storage_block_device_metrics_v2_local"), + custommetrics.WithCollectionName[BlockDeviceMetric]("storage_block_device_metrics_v2"), custommetrics.WithSkipTimestamp[BlockDeviceMetric](), ) } @@ -111,7 +111,7 @@ func NewBlockDeviceMetricsWriter(metricsClient custommetrics.MetricClient) (Bloc func NewFilesystemMetricsWriter(metricsClient custommetrics.MetricClient) (FilesystemMetricsWriter, error) { return custommetrics.NewMetric[FilesystemMetric]( metricsClient, - custommetrics.WithCollectionName[FilesystemMetric]("storage_filesystem_metrics_v2_local"), + custommetrics.WithCollectionName[FilesystemMetric]("storage_filesystem_metrics_v2"), custommetrics.WithSkipTimestamp[FilesystemMetric](), ) } @@ -119,7 +119,7 @@ func NewFilesystemMetricsWriter(metricsClient custommetrics.MetricClient) (Files func NewNodeStatsSummaryWriter(metricsClient custommetrics.MetricClient) (NodeStatsSummaryWriter, error) { return custommetrics.NewMetric[NodeStatsSummaryMetric]( metricsClient, - custommetrics.WithCollectionName[NodeStatsSummaryMetric]("storage_node_metrics_local"), + custommetrics.WithCollectionName[NodeStatsSummaryMetric]("storage_node_metrics"), custommetrics.WithSkipTimestamp[NodeStatsSummaryMetric](), ) } @@ -127,7 +127,7 @@ func NewNodeStatsSummaryWriter(metricsClient custommetrics.MetricClient) (NodeSt func NewK8sPodVolumeMetricsWriter(metricsClient custommetrics.MetricClient) (K8sPodVolumeMetricsWriter, error) { return custommetrics.NewMetric[K8sPodVolumeMetric]( metricsClient, - custommetrics.WithCollectionName[K8sPodVolumeMetric]("k8s_pod_volume_metrics_local"), + custommetrics.WithCollectionName[K8sPodVolumeMetric]("k8s_pod_volume_metrics"), custommetrics.WithSkipTimestamp[K8sPodVolumeMetric](), ) } From fb2d1a15fcc3790ab7a11e08cf621b762bff1562 Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Tue, 20 Jan 2026 07:21:29 +0200 Subject: [PATCH 20/22] Undo wrong change of removing metrics client start --- cmd/agent/daemon/app/app.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cmd/agent/daemon/app/app.go b/cmd/agent/daemon/app/app.go index 8e9341ac..ac76a384 100644 --- a/cmd/agent/daemon/app/app.go +++ b/cmd/agent/daemon/app/app.go @@ -263,6 +263,12 @@ func (a *App) Run(ctx context.Context) error { return fmt.Errorf("failed to create metrics client: %w", err) } + go func() { + if err := metricsClient.Start(ctx); err != nil { + log.Warnf("metrics client failed: %v", err) + } + }() + blockDeviceMetricsWriter, filesystemMetricsWriter, nodeStatsSummaryWriter, podVolumeMetricsWriter, err = setupStorageMetrics(metricsClient) if err != nil { return fmt.Errorf("failed to setup storage metrics: %w", err) From 9e68ea2c74feafdd4093a9879b13e9d39d78a571 Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Tue, 20 Jan 2026 07:57:26 +0200 Subject: [PATCH 21/22] Add k8s_pod_volume_metrics to E2E test --- e2e/e2e.go | 53 ++++++++++++++++++++++++++++++++++++++++++++++- e2e/pvc-test.yaml | 30 +++++++++++++++++++++++++++ e2e/run.sh | 2 ++ 3 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 e2e/pvc-test.yaml diff --git a/e2e/e2e.go b/e2e/e2e.go index fac00df5..e792d937 100644 --- a/e2e/e2e.go +++ b/e2e/e2e.go @@ -427,6 +427,29 @@ func (t *testCASTAIServer) decodeNodeStatsSummaryMetrics(schema, data []byte) ([ return results, nil } +func (t *testCASTAIServer) decodeK8sPodVolumeMetrics(schema, data []byte) ([]pipeline.K8sPodVolumeMetric, error) { + avroSchema, err := avro.Parse(string(schema)) + if err != nil { + return nil, fmt.Errorf("failed to parse schema: %w", err) + } + + decoder := avro.NewDecoderForSchema(avroSchema, bytes.NewReader(data)) + var results []pipeline.K8sPodVolumeMetric + + for { + var metric pipeline.K8sPodVolumeMetric + if err := decoder.Decode(&metric); err != nil { + if errors.Is(err, io.EOF) { + break // End of data + } + return nil, fmt.Errorf("failed to decode metric: %w", err) + } + results = append(results, metric) + } + + return results, nil +} + func (t *testCASTAIServer) KubeBenchReportIngest(ctx context.Context, report *castaipb.KubeBenchReport) (*castaipb.KubeBenchReportIngestResponse, error) { t.kubeBenchReports = append(t.kubeBenchReports, report) return &castaipb.KubeBenchReportIngestResponse{}, nil @@ -539,7 +562,7 @@ func (t *testCASTAIServer) assertLogs(ctx context.Context) error { } func (t *testCASTAIServer) assertStorageMetrics(ctx context.Context) error { - timeout := time.After(15 * time.Second) + timeout := time.After(30 * time.Second) expectedCollections := []string{ "storage_block_device_metrics_v2", @@ -665,6 +688,34 @@ func (t *testCASTAIServer) assertStorageMetrics(ctx context.Context) error { } } + // k8s_pod_volume_metrics is optional - only sent if there are PVCs in the cluster + if batches, exists := metrics["k8s_pod_volume_metrics"]; exists { + for _, batch := range batches { + podVolumeMetrics, err := t.decodeK8sPodVolumeMetrics(batch.Schema, batch.Metrics) + if err != nil { + return fmt.Errorf("failed to decode k8s pod volume metrics: %w", err) + } + + for _, metric := range podVolumeMetrics { + if metric.NodeName == "" { + return errors.New("k8s pod volume metric missing node name") + } + if metric.Namespace == "" { + return errors.New("k8s pod volume metric missing namespace") + } + if metric.PodName == "" { + return errors.New("k8s pod volume metric missing pod name") + } + if metric.PodUID == "" { + return errors.New("k8s pod volume metric missing pod uid") + } + if metric.VolumeName == "" { + return errors.New("k8s pod volume metric missing volume name") + } + } + } + } + foundCollections := 0 for _, expectedCollection := range expectedCollections { if _, exists := metrics[expectedCollection]; exists { diff --git a/e2e/pvc-test.yaml b/e2e/pvc-test.yaml new file mode 100644 index 00000000..05713074 --- /dev/null +++ b/e2e/pvc-test.yaml @@ -0,0 +1,30 @@ +# PVC test for E2E - generates k8s_pod_volume_metrics +# Uses Kind's default "standard" storage class (local-path-provisioner) +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: e2e-test-pvc +spec: + accessModes: + - ReadWriteOnce + storageClassName: standard + resources: + requests: + storage: 100Mi +--- +apiVersion: v1 +kind: Pod +metadata: + name: e2e-test-pvc-pod +spec: + containers: + - name: test + image: busybox + command: ["sleep", "3600"] + volumeMounts: + - name: data + mountPath: /data + volumes: + - name: data + persistentVolumeClaim: + claimName: e2e-test-pvc diff --git a/e2e/run.sh b/e2e/run.sh index ac43b722..a05bb57f 100755 --- a/e2e/run.sh +++ b/e2e/run.sh @@ -59,6 +59,8 @@ kubectl apply -f ./e2e/socks5-generator.yaml kubectl apply -f ./e2e/nc-server-client.yaml kubectl apply -f ./e2e/conn-generator.yaml kubectl apply -f ./e2e/iperf.yaml +# Deploy PVC test to generate k8s_pod_volume_metrics +kubectl apply -f ./e2e/pvc-test.yaml echo "Waiting for job to finish" From 89a82acebe80ae91209fb57cde204b4dc33c81ca Mon Sep 17 00:00:00 2001 From: Ciprian Focsaneanu Date: Tue, 20 Jan 2026 11:59:22 +0200 Subject: [PATCH 22/22] Fix context in test --- cmd/agent/daemon/pipeline/controller_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/agent/daemon/pipeline/controller_test.go b/cmd/agent/daemon/pipeline/controller_test.go index 43bd0fe1..0b95eb44 100644 --- a/cmd/agent/daemon/pipeline/controller_test.go +++ b/cmd/agent/daemon/pipeline/controller_test.go @@ -808,7 +808,7 @@ func TestController(t *testing.T) { blockWriter := ctrl.blockDeviceMetricsWriter.(*mockBlockDeviceMetricsWriter) fsWriter := ctrl.filesystemMetricsWriter.(*mockFilesystemMetricsWriter) - ctrl.collectStorageMetrics(context.Background()) + ctrl.collectStorageMetrics(t.Context()) r.Len(blockWriter.metrics, 1) r.Len(fsWriter.metrics, 1)