diff --git a/Makefile b/Makefile index 9b3850f80e..426d0327d6 100644 --- a/Makefile +++ b/Makefile @@ -99,7 +99,7 @@ test-e2e: $(BINDATA) test-e2e-local: $(BINDATA) performance-profile-creator-tests gather-sysinfo-tests $(GO_BUILD_RECIPE) for d in performanceprofile/functests-render-command/1_render_command; do \ - $(GO) test -v -timeout 40m ./test/e2e/$$d -ginkgo.v -ginkgo.no-color -ginkgo.fail-fast || exit; \ + $(GO) test -v -timeout 40m ./test/e2e/$$d -ginkgo.v -ginkgo.no-color || exit; \ done # This target ensures /manifests directory is up-to-date. It takes advantage of yaml patching functionality of @@ -211,7 +211,7 @@ pao-functests: cluster-label-worker-cnf pao-functests-only pao-functests-only: $(BINDATA) @echo "Cluster Version" hack/show-cluster-version.sh - hack/run-test.sh -t "test/e2e/performanceprofile/functests/0_config test/e2e/performanceprofile/functests/1_performance test/e2e/performanceprofile/functests/6_mustgather_testing test/e2e/performanceprofile/functests/10_performance_ppc" -p "-v -r --fail-fast --flake-attempts=2 --junit-report=report.xml" -m "Running Functional Tests" + hack/run-test.sh -t "test/e2e/performanceprofile/functests/0_config test/e2e/performanceprofile/functests/1_performance test/e2e/performanceprofile/functests/6_mustgather_testing test/e2e/performanceprofile/functests/10_performance_ppc" -p "-v -r --flake-attempts=2 --junit-report=report.xml" -m "Running Functional Tests" .PHONY: pao-functests-updating-profile pao-functests-updating-profile: cluster-label-worker-cnf pao-functests-update-only @@ -235,19 +235,19 @@ pao-functests-performance-workloadhints: cluster-label-worker-cnf pao-functests- pao-functests-performance-workloadhints-only: $(BINDATA) @echo "Cluster Version" hack/show-cluster-version.sh - hack/run-test.sh -t "test/e2e/performanceprofile/functests/0_config test/e2e/performanceprofile/functests/8_performance_workloadhints" -p "-v -r --fail-fast --flake-attempts=2 --timeout=5h --junit-report=report.xml" -m "Running Functional WorkloadHints Tests" + hack/run-test.sh -t "test/e2e/performanceprofile/functests/0_config test/e2e/performanceprofile/functests/8_performance_workloadhints" -p "-v -r --flake-attempts=2 --timeout=5h --junit-report=report.xml" -m "Running Functional WorkloadHints Tests" .PHONY: pao-functests-latency-testing pao-functests-latency-testing: dist-latency-tests $(BINDATA) @echo "Cluster Version" hack/show-cluster-version.sh - hack/run-test.sh -t "./test/e2e/performanceprofile/functests/0_config ./test/e2e/performanceprofile/functests/5_latency_testing" -p "-v -r --fail-fast --flake-attempts=2 --timeout=5h --junit-report=report.xml" -m "Running Functionalconfiguration latency Tests" + hack/run-test.sh -t "./test/e2e/performanceprofile/functests/0_config ./test/e2e/performanceprofile/functests/5_latency_testing" -p "-v -r --flake-attempts=2 --timeout=5h --junit-report=report.xml" -m "Running Functionalconfiguration latency Tests" .PHONY: pao-functests-mixedcpus pao-functests-mixedcpus: $(BINDATA) @echo "Cluster Version" hack/show-cluster-version.sh - hack/run-test.sh -t "./test/e2e/performanceprofile/functests/0_config ./test/e2e/performanceprofile/functests/11_mixedcpus" -p "-v -r --fail-fast --flake-attempts=2 --junit-report=report.xml" -m "Running MixedCPUs Tests" + hack/run-test.sh -t "./test/e2e/performanceprofile/functests/0_config ./test/e2e/performanceprofile/functests/11_mixedcpus" -p "-v -r --flake-attempts=2 --junit-report=report.xml" -m "Running MixedCPUs Tests" .PHONY: pao-functests-hypershift pao-functests-hypershift: $(BINDATA) @@ -275,7 +275,7 @@ arm-kernelpagesize: $(BINDATA) .PHONY: performance-profile-creator-tests performance-profile-creator-tests: build-performance-profile-creator @echo "Running Performance Profile Creator Tests" - hack/run-test.sh -t "test/e2e/performanceprofile/functests-performance-profile-creator" -p "--v -r --fail-fast --flake-attempts=2" -m "Running Functional Tests" -r "--junit-report=/tmp/artifacts" + hack/run-test.sh -t "test/e2e/performanceprofile/functests-performance-profile-creator" -p "--v -r --flake-attempts=2" -m "Running Functional Tests" -r "--junit-report=/tmp/artifacts" # Gather sysinfo binary for use in must-gather .PHONY: build-gather-sysinfo diff --git a/assets/performanceprofile/configs/99-runtimes.conf b/assets/performanceprofile/configs/99-runtimes.conf index 4874370d9b..710459229f 100644 --- a/assets/performanceprofile/configs/99-runtimes.conf +++ b/assets/performanceprofile/configs/99-runtimes.conf @@ -11,4 +11,5 @@ infra_ctr_cpuset = "{{.ReservedCpus}}" # do not have high-performance binary under the $PATH that will point to it. [crio.runtime.runtimes.high-performance] inherit_default_runtime = true +{{if .ExecCPUAffinity}}exec_cpu_affinity = "{{.ExecCPUAffinity}}"{{end}} allowed_annotations = ["cpu-load-balancing.crio.io", "cpu-quota.crio.io", "irq-load-balancing.crio.io", "cpu-c-states.crio.io", "cpu-freq-governor.crio.io"{{ if .CrioSharedCPUsAnnotation }}{{ printf ", %q" .CrioSharedCPUsAnnotation}}{{end}}] diff --git a/pkg/apis/performanceprofile/v2/performanceprofile_types.go b/pkg/apis/performanceprofile/v2/performanceprofile_types.go index 98e8e6c4ee..c7f4fdd610 100644 --- a/pkg/apis/performanceprofile/v2/performanceprofile_types.go +++ b/pkg/apis/performanceprofile/v2/performanceprofile_types.go @@ -34,6 +34,17 @@ const PerformanceProfileEnablePhysicalRpsAnnotation = "performance.openshift.io/ // Valid values: "true", "enable" (to enable), "false", "disable" (to disable). const PerformanceProfileEnableRpsAnnotation = "performance.openshift.io/enable-rps" +const ( + // PerformanceProfileExecCPUAffinityAnnotation manages the exec-cpu-affinity setting for the node. + // Performance profile sets this falg's value to "first" by default in the runtime handler configuration. + // Valid values: + // "none": will disable performance profile's default setting of this feature in CRI-O. + // Other values are ignored and treated as to keep the default value set by performance-profile. + // remove in 4.23 + PerformanceProfileExecCPUAffinityAnnotation = "performance.openshift.io/exec-cpu-affinity" + PerformanceProfileExecCPUAffinityNone = "none" +) + // PerformanceProfileSpec defines the desired state of PerformanceProfile. type PerformanceProfileSpec struct { // CPU defines a set of CPU related parameters. diff --git a/pkg/performanceprofile/controller/performanceprofile/components/machineconfig/machineconfig.go b/pkg/performanceprofile/controller/performanceprofile/components/machineconfig/machineconfig.go index d5b2559ac2..12c01df1ef 100644 --- a/pkg/performanceprofile/controller/performanceprofile/components/machineconfig/machineconfig.go +++ b/pkg/performanceprofile/controller/performanceprofile/components/machineconfig/machineconfig.go @@ -76,6 +76,9 @@ const ( ovsDynamicPinningTriggerHostFile = "/var/lib/ovn-ic/etc/enable_dynamic_cpu_affinity" cpusetConfigure = "cpuset-configure" + + // ExecCPUAffinity config + execCPUAffinityFirst = "first" ) const ( @@ -118,6 +121,7 @@ const ( templateOvsSliceUsageFile = "01-use-ovs-slice.conf" templateWorkload = "Workload" templateCrioSharedCPUsAnnotation = "CrioSharedCPUsAnnotation" + templateExecCPUAffinity = "ExecCPUAffinity" ) // New returns new machine configuration object for performance sensitive workloads @@ -587,6 +591,10 @@ func renderCrioConfigSnippet(profile *performancev2.PerformanceProfile, src stri templateArgs[templateReservedCpus] = string(*profile.Spec.CPU.Reserved) } + if profilecomponent.IsExecCPUAffinityEnabled(profile) { + templateArgs[templateExecCPUAffinity] = execCPUAffinityFirst + } + if opts.MixedCPUsEnabled { templateArgs[templateSharedCpus] = string(*profile.Spec.CPU.Shared) templateArgs[templateCrioSharedCPUsAnnotation] = "cpu-shared.crio.io" diff --git a/pkg/performanceprofile/controller/performanceprofile/components/machineconfig/machineconfig_test.go b/pkg/performanceprofile/controller/performanceprofile/components/machineconfig/machineconfig_test.go index cbc2294d14..8f1159d4a6 100644 --- a/pkg/performanceprofile/controller/performanceprofile/components/machineconfig/machineconfig_test.go +++ b/pkg/performanceprofile/controller/performanceprofile/components/machineconfig/machineconfig_test.go @@ -5,6 +5,7 @@ import ( "encoding/json" "errors" "fmt" + "path/filepath" "regexp" "strings" @@ -141,6 +142,58 @@ var _ = Describe("Machine Config", func() { }) }) + Context("machine config creation with CRI-O runtime config", func() { + crioRuntimeConfigPath := filepath.Join(crioConfd, crioRuntimesConfig) + + It("should create machine config with exec-cpu-affinity set by default", func() { + profile := testutils.NewPerformanceProfile("test") + + mc, err := New(profile, &components.MachineConfigOptions{}) + Expect(err).ToNot(HaveOccurred()) + + result := igntypes.Config{} + Expect(json.Unmarshal(mc.Spec.Config.Raw, &result)).To(Succeed()) + + var content string + for _, f := range result.Storage.Files { + if f.Path == crioRuntimeConfigPath { + base64Data := strings.TrimPrefix(*f.Contents.Source, "data:text/plain;charset=utf-8;base64,") + decoded, err := base64.StdEncoding.DecodeString(base64Data) + Expect(err).ToNot(HaveOccurred()) + content = string(decoded) + break + } + } + Expect(content).ToNot(BeEmpty(), "crio runtime config not found") + Expect(content).To(ContainSubstring("exec_cpu_affinity = \"first\"")) + }) + + It("should create machine config without exec-cpu-affinity when annotation is set to none", func() { + profile := testutils.NewPerformanceProfile("test") + profile.Annotations = map[string]string{} + profile.Annotations[performancev2.PerformanceProfileExecCPUAffinityAnnotation] = performancev2.PerformanceProfileExecCPUAffinityNone + + mc, err := New(profile, &components.MachineConfigOptions{}) + Expect(err).ToNot(HaveOccurred()) + + result := igntypes.Config{} + Expect(json.Unmarshal(mc.Spec.Config.Raw, &result)).To(Succeed()) + + var content string + for _, f := range result.Storage.Files { + if f.Path == crioRuntimeConfigPath { + base64Data := strings.TrimPrefix(*f.Contents.Source, "data:text/plain;charset=utf-8;base64,") + decoded, err := base64.StdEncoding.DecodeString(base64Data) + Expect(err).ToNot(HaveOccurred()) + content = string(decoded) + break + } + } + Expect(content).ToNot(BeEmpty(), "crio runtime config not found") + Expect(content).To(Not(ContainSubstring("exec_cpu_affinity = \"first\""))) + }) + }) + Context("machine config creation with enabled RPS using alternative values", func() { It("should create machine config with RPS configuration when enabled with 'enable'", func() { profile := testutils.NewPerformanceProfile("test") diff --git a/pkg/performanceprofile/controller/performanceprofile/components/profile/profile.go b/pkg/performanceprofile/controller/performanceprofile/components/profile/profile.go index d120889f60..76b5262e48 100644 --- a/pkg/performanceprofile/controller/performanceprofile/components/profile/profile.go +++ b/pkg/performanceprofile/controller/performanceprofile/components/profile/profile.go @@ -94,3 +94,17 @@ func IsMixedCPUsEnabled(profile *performancev2.PerformanceProfile) bool { } return *profile.Spec.WorkloadHints.MixedCpus } + +// IsExecCPUAffinityEnabled checks if exec-cpu-affinity feature should be enabled +func IsExecCPUAffinityEnabled(profile *performancev2.PerformanceProfile) bool { + if profile.Annotations != nil { + val, ok := profile.Annotations[performancev2.PerformanceProfileExecCPUAffinityAnnotation] + if ok && val == performancev2.PerformanceProfileExecCPUAffinityNone { + // run the legacy behavior and disable exec-cpu-affinity + return false + } + } + + // The default behavior is to enable exec-cpu-affinity whenever profile is applied + return true +} diff --git a/test/e2e/performanceprofile/functests/11_mixedcpus/mixedcpus.go b/test/e2e/performanceprofile/functests/11_mixedcpus/mixedcpus.go index 944b9f0701..aff48ede9d 100644 --- a/test/e2e/performanceprofile/functests/11_mixedcpus/mixedcpus.go +++ b/test/e2e/performanceprofile/functests/11_mixedcpus/mixedcpus.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "math" "strconv" "strings" "time" @@ -41,6 +42,7 @@ import ( "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/poolname" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profilesupdate" + "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/resources" ) const ( @@ -510,6 +512,259 @@ var _ = Describe("Mixedcpus", Ordered, Label(string(label.MixedCPUs)), func() { }) }) }) + + Context("Check exec-cpu-affinity feature", func() { + When("exec-cpu-affinity is enabled (default in PP)", func() { + var workerRTNode *corev1.Node + var profile, initialProfile *performancev2.PerformanceProfile + var getter cgroup.ControllersGetter + var updatedShared, updatedIsolated cpuset.CPUSet + var needsUpdate bool + + BeforeEach(func() { + By("Checking if exec-cpu-affinity is enabled by default in the profile") + profile, _ = profiles.GetByNodeLabels(testutils.NodeSelectorLabels) + Expect(profile).ToNot(BeNil(), "Failed to get performance profile") + initialProfile = profile.DeepCopy() + if profile.Annotations != nil { + val, ok := profile.Annotations[performancev2.PerformanceProfileExecCPUAffinityAnnotation] + if ok && val == performancev2.PerformanceProfileExecCPUAffinityNone { + // fail loudly because the default should be enabled + Fail("exec-cpu-affinity is disabled in the profile") + } + } + + By("Updating performance profile to have enough shared cpus if needed") + updatedIsolated = *mustParse(string(*profile.Spec.CPU.Isolated)) + currentShared := mustParse(string(*profile.Spec.CPU.Shared)) + if len(currentShared.List()) < 2 { + testlog.Info("shared cpuset has less than 2 cpus; this test requires at least 2 shared cpus; update the profile") + isolated := mustParse(string(*profile.Spec.CPU.Isolated)) + + // we need at least 4 total isolated and shared CPUs: + // 1 as a buffer for node's base load + // 1 as the test gu pod requests + // 2 as shared cpus + leastIsolatedCpus := 3 + if currentShared.Size() == 0 { + leastIsolatedCpus = 4 + } + if isolated.Size() < leastIsolatedCpus { + Skip(fmt.Sprintf("isolated cpuset has less than %d cpus; this test requires at least another %d isolated cpus", leastIsolatedCpus, leastIsolatedCpus)) + } + + updatedShared = cpuset.New(isolated.List()[0], isolated.List()[1]) + updatedIsolated = cpuset.New(isolated.List()[2:]...) + + if len(currentShared.List()) == 1 { + updatedShared = cpuset.New(currentShared.List()[0], isolated.List()[0]) + updatedIsolated = cpuset.New(isolated.List()[1:]...) + } + + testlog.Infof("shared cpu ids to be updated are: %q", updatedShared.String()) + profile.Spec.CPU.Isolated = cpuSetToPerformanceCPUSet(&updatedIsolated) + profile.Spec.CPU.Shared = cpuSetToPerformanceCPUSet(&updatedShared) + profile.Spec.WorkloadHints.MixedCpus = ptr.To(true) // if not already + + profiles.UpdateWithRetry(profile) + + poolName := poolname.GetByProfile(context.TODO(), profile) + By(fmt.Sprintf("Applying changes in performance profile and waiting until %s will start updating", poolName)) + profilesupdate.WaitForTuningUpdating(context.TODO(), profile) + By(fmt.Sprintf("Waiting when %s finishes updates", poolName)) + profilesupdate.WaitForTuningUpdated(context.TODO(), profile) + needsUpdate = true + } + + workerRTNodes, err := nodes.GetByLabels(testutils.NodeSelectorLabels) + Expect(err).ToNot(HaveOccurred()) + workerRTNodes, err = nodes.MatchingOptionalSelector(workerRTNodes) + Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("error looking for the optional selector: %v", err)) + Expect(workerRTNodes).ToNot(BeEmpty()) + workerRTNode = &workerRTNodes[0] + + getter, err = cgroup.BuildGetter(ctx, testclient.DataPlaneClient, testclient.K8sClient) + Expect(err).ToNot(HaveOccurred()) + }) + + AfterEach(func() { + if needsUpdate { + By("Reverting the cluster to previous state") + profiles.UpdateWithRetry(initialProfile) + poolName := poolname.GetByProfile(context.TODO(), initialProfile) + By(fmt.Sprintf("Applying changes in performance profile and waiting until %s will start updating", poolName)) + profilesupdate.WaitForTuningUpdating(context.TODO(), initialProfile) + By(fmt.Sprintf("Waiting when %s finishes updates", poolName)) + profilesupdate.WaitForTuningUpdated(context.TODO(), initialProfile) + } + }) + + DescribeTable("should pin exec process to the first CPU of the right CPU", func(qos corev1.PodQOSClass, containersResources []corev1.ResourceList) { + By("Creating the test pod") + isolatedCpus, _ := cpuset.Parse(string(*profile.Spec.CPU.Isolated)) + switch qos { + case corev1.PodQOSGuaranteed: + totalPodCpus := resources.TotalCPUsRounded(containersResources) + if isolatedCpus.Size() < totalPodCpus { + Skip("Skipping test: Insufficient isolated CPUs") + } + case corev1.PodQOSBurstable: + maxPodCpus := resources.MaxCPURequestsRounded(containersResources) + if isolatedCpus.Size() < maxPodCpus { + Skip("Skipping test: Insufficient isolated CPUs") + } + case corev1.PodQOSBestEffort: + testlog.Info("test best-effort pod") + default: + Fail("Invalid QoS class") + } + + var err error + testPod := pods.MakePodWithResources(ctx, workerRTNode, qos, containersResources) + Expect(testclient.Client.Create(ctx, testPod)).To(Succeed(), "Failed to create test pod") + testPod, err = pods.WaitForCondition(ctx, client.ObjectKeyFromObject(testPod), corev1.PodReady, corev1.ConditionTrue, 5*time.Minute) + Expect(err).ToNot(HaveOccurred()) + defer func() { + if testPod != nil { + testlog.Infof("deleting pod %q", testPod.Name) + Expect(pods.Delete(ctx, testPod)).To(BeTrue(), "Failed to delete test pod") + } + }() + + cpusetCfg := &controller.CpuSet{} + for _, container := range testPod.Spec.Containers { + By(fmt.Sprintf("Prepare comparable data for container %s, resource list: %v", container.Name, container.Resources.String())) + Expect(getter.Container(ctx, testPod, container.Name, cpusetCfg)).To(Succeed(), "Failed to get cpuset config for the container") + + isExclusiveCPURequest := false + if qos == corev1.PodQOSGuaranteed { + cpuRequestFloat := container.Resources.Requests.Name(corev1.ResourceCPU, resource.DecimalSI).AsFloat64Slow() + testlog.Infof("float value of cpu request: %f", cpuRequestFloat) + mod := int(cpuRequestFloat*1000) % 1000 + if mod == 0 { + isExclusiveCPURequest = true + } + } + + cpusIncludingShared, err := cpuset.Parse(cpusetCfg.Cpus) + Expect(err).ToNot(HaveOccurred(), "Failed to parse cpuset config for test pod cpus=%q", cpusetCfg.Cpus) + testlog.Infof("cpus including shared: %s", cpusIncludingShared.String()) + firstCPU := cpusIncludingShared.List()[0] + // high enough default + retries := 10 + + if container.Resources.Limits.Name(sharedCpusResource, resource.DecimalSI).Value() > 0 { + cntShared := cpusIncludingShared.Difference(updatedIsolated) + firstCPU = cntShared.List()[0] + testlog.Infof("container %s: first shared CPU: %d; all shared CPUs: %s", container.Name, firstCPU, cntShared.String()) + // high enough factor to ensure that even with only 2 cpus, the functionality is preserved + f := 20 + retries = int(math.Ceil(float64(f) / float64(cntShared.Size()))) + } + + By(fmt.Sprintf("Run exec command on the pod and verify the process is pinned to the right CPU for container %s", container.Name)) + for i := 0; i < retries; i++ { + cmd := []string{"/bin/bash", "-c", "sleep 10 & SLPID=$!; ps -o psr -p $SLPID;"} + output, err := pods.ExecCommandOnPod(testclient.K8sClient, testPod, container.Name, cmd) + Expect(err).ToNot(HaveOccurred(), "Failed to exec command on the pod; retry %d", i) + strout := string(output) + testlog.Infof("retry %d: exec command output: %s", i, strout) + + strout = strings.ReplaceAll(strout, "PSR", "") + execProcessCPUs := strings.TrimSpace(strout) + Expect(execProcessCPUs).ToNot(BeEmpty(), "Failed to get exec process CPU; retry %d", i) + execProcessCPUInt, err := strconv.Atoi(execProcessCPUs) + Expect(err).ToNot(HaveOccurred()) + if isExclusiveCPURequest { + testlog.Infof("exec process CPU: %d, first shared CPU: %d", execProcessCPUInt, firstCPU) + Expect(execProcessCPUs).To(Equal(firstCPU), "Exec process CPU is not the first shared CPU; retry %d", i) + } else { + if execProcessCPUInt != firstCPU { + testlog.Infof("retry %d: exec process was pinned to a different CPU than the first exclusive CPU: firstExclusiveCPU: %d, foundCPU: %d", i, firstCPU, execProcessCPUInt) + break + } + } + } + } + }, + Entry("guaranteed pod with single container with shared CPU request", + corev1.PodQOSGuaranteed, + []corev1.ResourceList{ + { // cnt1 resources + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + sharedCpusResource: resource.MustParse("1"), + }, + }), + Entry("guaranteed pod with single container with shared CPU request and fractinal CPU requests", + corev1.PodQOSGuaranteed, + []corev1.ResourceList{ + { // cnt1 resources + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + sharedCpusResource: resource.MustParse("1"), + }, + }), + Entry("guaranteed pod with multiple containers with shared CPU request", + corev1.PodQOSGuaranteed, + []corev1.ResourceList{ + { // cnt1 resources + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + sharedCpusResource: resource.MustParse("1"), + }, + { // cnt2 resources + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + sharedCpusResource: resource.MustParse("1"), + }, + }), + Entry("guaranteed pod with mixed containers: with shared CPU and without shared CPU", + corev1.PodQOSGuaranteed, + []corev1.ResourceList{ + { // cnt1 resources + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + sharedCpusResource: resource.MustParse("1"), + }, + { // cnt2 resources + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }), + Entry("guaranteed pod with fractional CPU requests", + corev1.PodQOSGuaranteed, + []corev1.ResourceList{ + { // cnt1 resources + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + sharedCpusResource: resource.MustParse("1"), + }, + { // cnt2 resources + corev1.ResourceCPU: resource.MustParse("2300m"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + sharedCpusResource: resource.MustParse("1"), + }, + }), + Entry("best-effort pod with shared CPU request", + corev1.PodQOSBestEffort, + // shared CPUs are only allowed for guaranteed pods + []corev1.ResourceList{ + //cnt1 resources + {}, + }), + Entry("burstable pod with shared CPU request", + corev1.PodQOSBurstable, + // shared CPUs are only allowed for guaranteed pods + []corev1.ResourceList{ + { // cnt1 resources + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }), + ) + }) + }) }) func setup(ctx context.Context) func(ctx2 context.Context) { @@ -659,6 +914,12 @@ func withRuntime(name string) func(p *corev1.Pod) { } } +func onNode(nodeName string) func(p *corev1.Pod) { + return func(p *corev1.Pod) { + p.Spec.NodeName = nodeName + } +} + func getTestingNamespace() corev1.Namespace { return *namespaces.TestingNamespace } diff --git a/test/e2e/performanceprofile/functests/1_performance/cpu_management.go b/test/e2e/performanceprofile/functests/1_performance/cpu_management.go index 1e45154b4a..11fbeb488a 100644 --- a/test/e2e/performanceprofile/functests/1_performance/cpu_management.go +++ b/test/e2e/performanceprofile/functests/1_performance/cpu_management.go @@ -4,6 +4,7 @@ import ( "context" "encoding/json" "fmt" + "math" "os" "regexp" "strconv" @@ -12,7 +13,6 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" @@ -44,6 +44,7 @@ import ( "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/nodes" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/pods" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" + "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/resources" ) var workerRTNode *corev1.Node @@ -248,7 +249,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { }) AfterEach(func() { - deleteTestPod(context.TODO(), testpod) + Expect(pods.Delete(context.TODO(), testpod)).To(BeTrue(), "Failed to delete pod") }) DescribeTable("Verify CPU usage by stress PODs", func(ctx context.Context, guaranteed bool) { @@ -339,7 +340,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { Expect(err).ToNot(HaveOccurred()) }) AfterEach(func() { - deleteTestPod(context.TODO(), testpod) + Expect(pods.Delete(context.TODO(), testpod)).To(BeTrue(), "Failed to delete pod") }) When("kubelet is restart", func() { It("[test_id: 73501] defaultCpuset should not change", func() { @@ -422,7 +423,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { AfterEach(func() { if testpod != nil { - deleteTestPod(context.TODO(), testpod) + Expect(pods.Delete(context.TODO(), testpod)).To(BeTrue(), "Failed to delete pod") } }) @@ -481,7 +482,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { fmt.Sprintf("IRQ still active on CPU%s", psr)) By("Checking that after removing POD default smp affinity is returned back to all active CPUs") - deleteTestPod(context.TODO(), testpod) + Expect(pods.Delete(context.TODO(), testpod)).To(BeTrue(), "Failed to delete pod") defaultSmpAffinitySet, err = nodes.GetDefaultSmpAffinitySet(context.TODO(), workerRTNode) Expect(err).ToNot(HaveOccurred()) @@ -580,7 +581,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { if testpod == nil { return } - deleteTestPod(context.TODO(), testpod) + Expect(pods.Delete(context.TODO(), testpod)).To(BeTrue(), "Failed to delete pod") }) It("[test_id:49149] should reject pods which request integral CPUs not aligned with machine SMT level", func() { @@ -633,7 +634,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { if testpod == nil { return } - deleteTestPod(context.TODO(), testpod) + Expect(pods.Delete(context.TODO(), testpod)).To(BeTrue(), "Failed to delete pod") }) DescribeTable("Verify Hyper-Thread aware scheduling for guaranteed pods", @@ -680,7 +681,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { testpod = startHTtestPod(ctx, cpuCount) Expect(checkPodHTSiblings(ctx, testpod)).To(BeTrue(), "Pod cpu set does not map to host cpu sibling pairs") By("Deleting test pod...") - deleteTestPod(ctx, testpod) + Expect(pods.Delete(ctx, testpod)).To(BeTrue(), "Failed to delete pod") } }, @@ -983,7 +984,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { defer func() { if guaranteedPod != nil { testlog.Infof("deleting pod %q", guaranteedPod.Name) - deleteTestPod(ctx, guaranteedPod) + Expect(pods.Delete(ctx, guaranteedPod)).To(BeTrue(), "Failed to delete guaranteed pod") } }() @@ -1014,7 +1015,7 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { defer func() { if bestEffortPod != nil { testlog.Infof("deleting pod %q", bestEffortPod.Name) - deleteTestPod(ctx, bestEffortPod) + Expect(pods.Delete(ctx, bestEffortPod)).To(BeTrue(), "Failed to delete best-effort pod") } }() @@ -1142,15 +1143,179 @@ var _ = Describe("[rfe_id:27363][performance] CPU Management", Ordered, func() { defer func() { if guPod != nil { testlog.Infof("deleting pod %q", guPod.Name) - deleteTestPod(ctx, guPod) + Expect(pods.Delete(ctx, guPod)).To(BeTrue(), "Failed to delete guaranteed pod") } if buPod != nil { testlog.Infof("deleting pod %q", buPod.Name) - deleteTestPod(ctx, buPod) + Expect(pods.Delete(ctx, buPod)).To(BeTrue(), "Failed to delete burstable pod") } }() }) }) + + Context("Check exec-cpu-affinity feature", func() { + When("exec-cpu-affinity is enabled (default in PP)", func() { + BeforeEach(func() { + By("Checking if exec-cpu-affinity is enabled by default in the profile") + profile, _ := profiles.GetByNodeLabels(testutils.NodeSelectorLabels) + Expect(profile).ToNot(BeNil(), "Failed to get performance profile") + if profile.Annotations != nil { + val, ok := profile.Annotations[performancev2.PerformanceProfileExecCPUAffinityAnnotation] + if ok && val == performancev2.PerformanceProfileExecCPUAffinityNone { + // fail loudly because the default should be enabled + Fail("exec-cpu-affinity is disabled in the profile") + } + } + }) + + DescribeTable( + "should pin exec process to specific CPU dedicated to the container", func(qos corev1.PodQOSClass, containersResources []corev1.ResourceList) { + By("Creating the test pod") + isolatedCpus, _ := cpuset.Parse(string(*profile.Spec.CPU.Isolated)) + switch qos { + case corev1.PodQOSGuaranteed: + totalPodCpus := resources.TotalCPUsRounded(containersResources) + if isolatedCpus.Size() < totalPodCpus { + Skip("Skipping test: Insufficient isolated CPUs") + } + case corev1.PodQOSBurstable: + maxPodCpus := resources.MaxCPURequestsRounded(containersResources) + if isolatedCpus.Size() < maxPodCpus { + Skip("Skipping test: Insufficient isolated CPUs") + } + case corev1.PodQOSBestEffort: + testlog.Info("test best-effort pod") + default: + Fail("Invalid QoS class") + } + + testPod := pods.MakePodWithResources(ctx, workerRTNode, qos, containersResources) + Expect(testclient.Client.Create(ctx, testPod)).To(Succeed(), "Failed to create test pod") + testPod, err = pods.WaitForCondition(ctx, client.ObjectKeyFromObject(testPod), corev1.PodReady, corev1.ConditionTrue, 5*time.Minute) + Expect(err).ToNot(HaveOccurred()) + defer func() { + if testPod != nil { + testlog.Infof("deleting pod %q", testPod.Name) + Expect(pods.Delete(ctx, testPod)).To(BeTrue(), "Failed to delete test pod") + } + }() + + cpusetCfg := &controller.CpuSet{} + for _, container := range testPod.Spec.Containers { + By(fmt.Sprintf("Collect comparable data for container %s", container.Name)) + Expect(getter.Container(ctx, testPod, container.Name, cpusetCfg)).To(Succeed(), "Failed to get cpuset config for test pod container %s", container.Name) + + cpusList, err := cpuset.Parse(cpusetCfg.Cpus) + Expect(err).ToNot(HaveOccurred(), "Failed to parse cpuset config for test pod container %s", container.Name) + Expect(cpusList.List()).ToNot(BeEmpty()) + // assumes no shared configured in this suite + firstExclusiveCPU := cpusList.List()[0] + testlog.Infof("first exclusive CPU: %d, all exclusive CPUs: %s", firstExclusiveCPU, cpusList.String()) + + cpuRequest := container.Resources.Requests.Name(corev1.ResourceCPU, resource.DecimalSI).Value() + // high enough factor to ensure that even with only 2 cpus, the functionality is preserved + retries := int(math.Ceil(float64(20) / float64(cpuRequest))) + testlog.Infof("cpu request: %d, retries: %d", cpuRequest, retries) + + // should we expect cpu affinity to the first CPU + isExclusiveCPURequest := false + if qos == corev1.PodQOSGuaranteed { + cpuRequestFloat := container.Resources.Requests.Name(corev1.ResourceCPU, resource.DecimalSI).AsFloat64Slow() + testlog.Infof("float value of cpu request: %f", cpuRequestFloat) + mod := int(cpuRequestFloat*1000) % 1000 + if mod == 0 { + isExclusiveCPURequest = true + } else { + testlog.Infof("cpu request is not an exclusive CPU request; exec process will be pinned to the any isolated CPU") + } + } + + By("Run exec command on the pod and verify the process is pinned to the correct CPU") + testlog.Infof("exclusive CPU request: %d", 2) + for i := 0; i < retries; i++ { + cmd := []string{"/bin/bash", "-c", "sleep 10 & SLPID=$!; ps -o psr -p $SLPID;"} + output, err := pods.ExecCommandOnPod(testclient.K8sClient, testPod, container.Name, cmd) + Expect(err).ToNot(HaveOccurred(), "Failed to exec command on the pod; retry %d", i) + strout := string(output) + testlog.Infof("retry %d: exec command output: %s", i, strout) + + strout = strings.ReplaceAll(strout, "PSR", "") + execProcessCPUs := strings.TrimSpace(strout) + Expect(execProcessCPUs).ToNot(BeEmpty(), "Failed to get exec process CPU; retry %d", i) + execProcessCPUInt, err := strconv.Atoi(execProcessCPUs) + Expect(err).ToNot(HaveOccurred()) + if isExclusiveCPURequest { + Expect(execProcessCPUInt).To(Equal(firstExclusiveCPU), "Exec process CPU is not the first exclusive CPU; retry %d", i) + } else { + if execProcessCPUInt != firstExclusiveCPU { + testlog.Infof("retry %d: exec process was pinned to a different CPU than the first exclusive CPU: firstExclusiveCPU: %d, foundCPU: %d", i, firstExclusiveCPU, execProcessCPUInt) + break + } + } + } + } + }, + Entry("guaranteed pod single container with exclusive CPUs", + corev1.PodQOSGuaranteed, + []corev1.ResourceList{ + { // cnt1 resources + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }), + Entry("guaranteed pod multiple containers with fraction CPU request", + corev1.PodQOSGuaranteed, + []corev1.ResourceList{ + { // cnt1 resources + corev1.ResourceCPU: resource.MustParse("300m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + { // cnt2 resources + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }), + Entry("guaranteed pod with two containers with exclusive CPUs", + corev1.PodQOSGuaranteed, + []corev1.ResourceList{ + { // cnt1 resources + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + { // cnt2 resources + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }), + Entry("guaranteed pod single container with fraction CPU request", + corev1.PodQOSGuaranteed, + []corev1.ResourceList{ + { // cnt1 resources + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + { // cnt2 resources + corev1.ResourceCPU: resource.MustParse("2300m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }), + Entry("best-effort pod with shared CPU request", + corev1.PodQOSBestEffort, + []corev1.ResourceList{ + //cnt1 resources + {}, + }), + Entry("burstable pod with shared CPU request", + corev1.PodQOSBurstable, + []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }), + ) + }) + }) }) func extractConfigInfo(output string) (*ContainerConfig, error) { @@ -1401,24 +1566,6 @@ func getTestPodWithAnnotations(annotations map[string]string, cpus int) *corev1. return testpod } -func deleteTestPod(ctx context.Context, testpod *corev1.Pod) (types.UID, bool) { - // it possible that the pod already was deleted as part of the test, in this case we want to skip teardown - err := testclient.DataPlaneClient.Get(ctx, client.ObjectKeyFromObject(testpod), testpod) - if errors.IsNotFound(err) { - return "", false - } - - testpodUID := testpod.UID - - err = testclient.DataPlaneClient.Delete(ctx, testpod) - Expect(err).ToNot(HaveOccurred()) - - err = pods.WaitForDeletion(ctx, testpod, pods.DefaultDeletionTimeout*time.Second) - Expect(err).ToNot(HaveOccurred()) - - return testpodUID, true -} - func cpuSpecToString(cpus *performancev2.CPU) (string, error) { if cpus == nil { return "", fmt.Errorf("performance CPU field is nil") diff --git a/test/e2e/performanceprofile/functests/1_performance/irqbalance.go b/test/e2e/performanceprofile/functests/1_performance/irqbalance.go index 35cae4d387..818a370f97 100644 --- a/test/e2e/performanceprofile/functests/1_performance/irqbalance.go +++ b/test/e2e/performanceprofile/functests/1_performance/irqbalance.go @@ -213,7 +213,7 @@ var _ = Describe("[performance] Checking IRQBalance settings", Ordered, func() { defer func() { if testpod != nil { testlog.Infof("deleting pod %q", testpod.Name) - deleteTestPod(context.TODO(), testpod) + Expect(pods.Delete(context.TODO(), testpod)).To(BeTrue(), "Failed to delete pod") } bannedCPUs, err := getIrqBalanceBannedCPUs(context.TODO(), targetNode) Expect(err).ToNot(HaveOccurred(), "failed to extract the banned CPUs from node %q", targetNode.Name) diff --git a/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go b/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go index 856788b764..17ee13e440 100644 --- a/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go +++ b/test/e2e/performanceprofile/functests/2_performance_update/updating_profile.go @@ -14,6 +14,7 @@ import ( . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" @@ -29,6 +30,8 @@ import ( profilecomponent "github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/controller/performanceprofile/components/profile" manifestsutil "github.com/openshift/cluster-node-tuning-operator/pkg/util" testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils" + "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cgroup" + "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cgroup/controller" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cgroup/runtime" testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/cluster" @@ -1270,6 +1273,109 @@ var _ = Describe("[rfe_id:28761][performance] Updating parameters in performance Entry("test without ContainerRuntimeConfig", false), Entry("create and test with ContainerRuntimeConfig", true), ) + + When("exec-cpu-affinity is disabled", func() { + var getter cgroup.ControllersGetter + var initialProfile, profile *performancev2.PerformanceProfile + var needsUpdate bool + + BeforeEach(func() { + By("Checking if exec-cpu-affinity is disabled if not disable it") + initialProfile, _ = profiles.GetByNodeLabels(testutils.NodeSelectorLabels) + Expect(initialProfile).ToNot(BeNil(), "Failed to get performance profile") + profile = initialProfile.DeepCopy() + needsUpdate = true + if initialProfile.Annotations != nil { + val, ok := initialProfile.Annotations[performancev2.PerformanceProfileExecCPUAffinityAnnotation] + if ok && val == performancev2.PerformanceProfileExecCPUAffinityNone { + profile.Annotations[performancev2.PerformanceProfileExecCPUAffinityAnnotation] = performancev2.PerformanceProfileExecCPUAffinityNone + needsUpdate = false + } + } + + if !needsUpdate { + testlog.Infof("profile already has disabled execCPUAffinity: %+v", initialProfile.Annotations) + return + } + + By("Updating the profile") + profiles.UpdateWithRetry(profile) + poolName := poolname.GetByProfile(context.TODO(), profile) + By(fmt.Sprintf("Applying changes in performance profile and waiting until %s will start updating", poolName)) + profilesupdate.WaitForTuningUpdating(context.TODO(), profile) + By(fmt.Sprintf("Waiting when %s finishes updates", poolName)) + profilesupdate.WaitForTuningUpdated(context.TODO(), profile) + + var err error + getter, err = cgroup.BuildGetter(context.TODO(), testclient.DataPlaneClient, testclient.K8sClient) + Expect(err).ToNot(HaveOccurred()) + Expect(getter).ToNot(BeNil()) + }) + AfterEach(func() { + if needsUpdate { + By("Restoring the profile") + profiles.UpdateWithRetry(initialProfile) + poolName := poolname.GetByProfile(context.TODO(), initialProfile) + By(fmt.Sprintf("Applying changes in performance profile and waiting until %s will start updating", poolName)) + profilesupdate.WaitForTuningUpdating(context.TODO(), initialProfile) + By(fmt.Sprintf("Waiting when %s finishes updates", poolName)) + profilesupdate.WaitForTuningUpdated(context.TODO(), initialProfile) + } + }) + + It("should pin exec process to any exclusive CPU - guaranteed pod single container", func() { + By("Creating a guaranteed test pod") + ctx := context.TODO() + testPod := testpodTemplate.DeepCopy() + testPod.Spec.NodeName = workerRTNodes[0].Name + testPod.Spec.NodeSelector = map[string]string{testutils.LabelHostname: workerRTNodes[0].Name} + testPod.Spec.Containers[0].Resources.Limits = corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + } + Expect(testclient.Client.Create(ctx, testPod)).To(Succeed(), "Failed to create test pod: %+v", testPod) + testPod, err = pods.WaitForCondition(ctx, client.ObjectKeyFromObject(testPod), corev1.PodReady, corev1.ConditionTrue, 5*time.Minute) + Expect(err).ToNot(HaveOccurred()) + defer func() { + if testPod != nil { + testlog.Infof("deleting pod %q", testPod.Name) + Expect(pods.Delete(ctx, testPod)).To(BeTrue(), "Failed to delete test pod") + } + }() + + cpusetCfg := &controller.CpuSet{} + Expect(getter.Container(ctx, testPod, testPod.Spec.Containers[0].Name, cpusetCfg)).To(Succeed(), "Failed to get cpuset config for test pod") + + cpusList, err := cpuset.Parse(cpusetCfg.Cpus) + Expect(err).ToNot(HaveOccurred()) + Expect(cpusList).ToNot(BeEmpty()) + firstExclusiveCPU := cpusList.List()[0] + testlog.Infof("first exclusive CPU: %d, all exclusive CPUs: %s", firstExclusiveCPU, cpusList.String()) + + // high enough factor to ensure that even with only 2 cpus, the functionality is preserved + retries := 20 + By("Run exec command on the pod and verify the process is pinned not only to the first exclusive CPU") + + for i := 0; i < retries; i++ { + cmd := []string{"/bin/bash", "-c", "sleep 10 & SLPID=$!; ps -o psr -p $SLPID;"} + output, err := pods.ExecCommandOnPod(testclient.K8sClient, testPod, testPod.Spec.Containers[0].Name, cmd) + Expect(err).ToNot(HaveOccurred(), "Failed to exec command on the pod; retry %d", i) + strout := string(output) + testlog.Infof("retry %d: exec command output: %s", i, strout) + + strout = strings.ReplaceAll(strout, "PSR", "") + execProcessCPUs := strings.TrimSpace(strout) + Expect(execProcessCPUs).ToNot(BeEmpty(), "Failed to get exec process CPU; retry %d", i) + execProcessCPUInt, err := strconv.Atoi(execProcessCPUs) + Expect(err).ToNot(HaveOccurred()) + if execProcessCPUInt != firstExclusiveCPU { + testlog.Infof(" retry %d: exec process was pinned to a different CPU than the first exclusive CPU: firstExclusiveCPU: %d, foundCPU: %d", i, firstExclusiveCPU, execProcessCPUInt) + return + } + } + Fail("Exec process was not pinned to the different CPU than the first exclusive CPU; retried %d times", retries) + }) + }) }) }) diff --git a/test/e2e/performanceprofile/functests/utils/pods/pods.go b/test/e2e/performanceprofile/functests/utils/pods/pods.go index 8e3f93d200..4be95d985c 100644 --- a/test/e2e/performanceprofile/functests/utils/pods/pods.go +++ b/test/e2e/performanceprofile/functests/utils/pods/pods.go @@ -20,11 +20,15 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/remotecommand" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/openshift/cluster-node-tuning-operator/pkg/performanceprofile/controller/performanceprofile/components" + testutils "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils" testclient "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/client" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/events" "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/images" + "github.com/openshift/cluster-node-tuning-operator/test/e2e/performanceprofile/functests/utils/profiles" ) // DefaultDeletionTimeout contains the default pod deletion timeout in seconds @@ -51,6 +55,55 @@ func GetTestPod() *corev1.Pod { } } +func MakePodWithResources(ctx context.Context, workerRTNode *corev1.Node, qos corev1.PodQOSClass, containersResources []corev1.ResourceList) *corev1.Pod { + if len(containersResources) == 0 { + klog.ErrorS(fmt.Errorf("no containers resources provided"), "you must provide at least one container resource data (it can be empty)") + return nil + } + + testPod := GetTestPod() + testPod.Namespace = testutils.NamespaceTesting + testPod.Spec.NodeSelector = map[string]string{testutils.LabelHostname: workerRTNode.Name} + cntSample := testPod.Spec.Containers[0].DeepCopy() + testPod.Spec.Containers = []corev1.Container{} + + for i, containerResources := range containersResources { + cnt := cntSample.DeepCopy() + cnt.Name = fmt.Sprintf("test%d", i+1) + cnt.Resources.Requests = containerResources + if qos == corev1.PodQOSGuaranteed { + cnt.Resources.Limits = containerResources + } + testPod.Spec.Containers = append(testPod.Spec.Containers, *cnt) + } + profile, _ := profiles.GetByNodeLabels(testutils.NodeSelectorLabels) + runtimeClass := components.GetComponentName(profile.Name, components.ComponentNamePrefix) + testPod.Spec.RuntimeClassName = &runtimeClass + return testPod +} + +func Delete(ctx context.Context, pod *corev1.Pod) bool { + err := testclient.DataPlaneClient.Get(ctx, client.ObjectKeyFromObject(pod), pod) + if errors.IsNotFound(err) { + klog.InfoS("pod already deleted", "namespace", pod.Namespace, "name", pod.Name) + return true + } + + err = testclient.DataPlaneClient.Delete(ctx, pod) + if err != nil { + klog.ErrorS(err, "failed to delete pod", "namespace", pod.Namespace, "name", pod.Name) + return false + } + + err = WaitForDeletion(ctx, pod, DefaultDeletionTimeout*time.Second) + if err != nil { + klog.ErrorS(err, "failed to wait for pod deletion", "namespace", pod.Namespace, "name", pod.Name) + return false + } + + return true +} + // WaitForDeletion waits until the pod will be removed from the cluster func WaitForDeletion(ctx context.Context, pod *corev1.Pod, timeout time.Duration) error { return wait.PollUntilContextTimeout(ctx, time.Second, timeout, true, func(ctx context.Context) (bool, error) { diff --git a/test/e2e/performanceprofile/functests/utils/resources/resources.go b/test/e2e/performanceprofile/functests/utils/resources/resources.go new file mode 100644 index 0000000000..3256613213 --- /dev/null +++ b/test/e2e/performanceprofile/functests/utils/resources/resources.go @@ -0,0 +1,27 @@ +package resources + +import ( + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +// TODO: handle init containers vs application containers + +func TotalCPUsRounded(containersResources []corev1.ResourceList) int { + totalCPUs := *resource.NewQuantity(0, resource.DecimalSI) + for _, containerResources := range containersResources { + totalCPUs.Add(*containerResources.Cpu()) + } + return int(totalCPUs.Value()) +} + +func MaxCPURequestsRounded(containersResources []corev1.ResourceList) int { + maxPodCpus := 0 + for _, containerResources := range containersResources { + current := int(containerResources.Cpu().Value()) + if current > maxPodCpus { + maxPodCpus = current + } + } + return maxPodCpus +} diff --git a/test/e2e/performanceprofile/functests/utils/resources/resources_test.go b/test/e2e/performanceprofile/functests/utils/resources/resources_test.go new file mode 100644 index 0000000000..985fd10de8 --- /dev/null +++ b/test/e2e/performanceprofile/functests/utils/resources/resources_test.go @@ -0,0 +1,630 @@ +package resources + +import ( + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +func TestTotalCPUsRounded(t *testing.T) { + tests := []struct { + name string + containersResources []corev1.ResourceList + expectedTotalCPUs int + }{ + { + name: "zero containers - empty slice", + containersResources: []corev1.ResourceList{}, + expectedTotalCPUs: 0, + }, + { + name: "single container - no CPU resource", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + expectedTotalCPUs: 0, + }, + { + name: "single container - empty ResourceList", + containersResources: []corev1.ResourceList{ + {}, + }, + expectedTotalCPUs: 0, + }, + { + name: "single container - 1 CPU", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + expectedTotalCPUs: 1, + }, + { + name: "single container - 2 CPUs", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + expectedTotalCPUs: 2, + }, + { + name: "single container - fractional CPU (500m)", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + expectedTotalCPUs: 1, // 500m (0.5 CPU) rounded up to 1 + }, + { + name: "single container - fractional CPU (1500m)", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("1500m"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + expectedTotalCPUs: 2, // 1500m (1.5 CPU) rounded up to 2 + }, + { + name: "two containers - same CPU value", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + expectedTotalCPUs: 4, + }, + { + name: "two containers - different CPU values", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("300Mi"), + }, + }, + expectedTotalCPUs: 4, + }, + { + name: "two containers - one with no CPU", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + { + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + expectedTotalCPUs: 2, + }, + { + name: "two containers - both with no CPU", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + { + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + expectedTotalCPUs: 0, + }, + { + name: "three containers - mixed CPU values", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("400Mi"), + }, + }, + expectedTotalCPUs: 7, + }, + { + name: "three containers - with fractional CPUs that sum exactly", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("50Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("50Mi"), + }, + }, + expectedTotalCPUs: 1, // 500m + 500m = 1000m = 1 CPU + }, + { + name: "multiple containers - with fractional CPUs", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("2300m"), // 2.3 CPUs + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("500m"), // 0.5 CPUs + corev1.ResourceMemory: resource.MustParse("50Mi"), + }, + }, + expectedTotalCPUs: 4, // 1 + 2.3 + 0.5 = 3.8, rounded up to 4 + }, + { + name: "four containers - large CPU values", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("8"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + { + corev1.ResourceCPU: resource.MustParse("16"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + { + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("32"), + corev1.ResourceMemory: resource.MustParse("4Gi"), + }, + }, + expectedTotalCPUs: 60, + }, + { + name: "multiple containers - all empty ResourceLists", + containersResources: []corev1.ResourceList{ + {}, + {}, + {}, + }, + expectedTotalCPUs: 0, + }, + { + name: "multiple containers - mix of empty and non-empty", + containersResources: []corev1.ResourceList{ + {}, + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + {}, + { + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + expectedTotalCPUs: 3, + }, + { + name: "five containers - sequential CPU values", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("300Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("400Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("5"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + }, + }, + expectedTotalCPUs: 15, + }, + { + name: "multiple containers - fractional CPUs that sum to whole number", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("250m"), + corev1.ResourceMemory: resource.MustParse("25Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("250m"), + corev1.ResourceMemory: resource.MustParse("25Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("250m"), + corev1.ResourceMemory: resource.MustParse("25Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("250m"), + corev1.ResourceMemory: resource.MustParse("25Mi"), + }, + }, + expectedTotalCPUs: 1, // 250m * 4 = 1000m = 1 CPU exactly + }, + { + name: "multiple containers - mix of whole and fractional CPUs", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("750m"), + corev1.ResourceMemory: resource.MustParse("75Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("250m"), + corev1.ResourceMemory: resource.MustParse("25Mi"), + }, + }, + expectedTotalCPUs: 4, // 1 + 0.75 + 2 + 0.25 = 4.0 exactly + }, + { + name: "multiple containers - some with CPU, some without", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + { + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("300Mi"), + }, + { + corev1.ResourceMemory: resource.MustParse("150Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + expectedTotalCPUs: 6, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := TotalCPUsRounded(tt.containersResources) + if result != tt.expectedTotalCPUs { + t.Errorf("TotalCPUsRounded() = %d, want %d", result, tt.expectedTotalCPUs) + } + }) + } +} + +func TestMaxCPURequestsRounded(t *testing.T) { + tests := []struct { + name string + containersResources []corev1.ResourceList + expectedMaxCPUs int + }{ + { + name: "zero containers - empty slice", + containersResources: []corev1.ResourceList{}, + expectedMaxCPUs: 0, + }, + { + name: "single container - no CPU resource", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + expectedMaxCPUs: 0, + }, + { + name: "single container - empty ResourceList", + containersResources: []corev1.ResourceList{ + {}, + }, + expectedMaxCPUs: 0, + }, + { + name: "single container - 1 CPU", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + expectedMaxCPUs: 1, + }, + { + name: "single container - 5 CPUs", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("5"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + }, + }, + expectedMaxCPUs: 5, + }, + { + name: "single container - fractional CPU (500m rounded up to 1)", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + expectedMaxCPUs: 1, // 500m (0.5 CPU) rounded up to 1 + }, + { + name: "single container - fractional CPU (2300m rounded up to 3)", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("2300m"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + expectedMaxCPUs: 3, // 2300m (2.3 CPU) rounded up to 3 + }, + { + name: "two containers - same CPU value", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + expectedMaxCPUs: 2, + }, + { + name: "two containers - first is max", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("5"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + expectedMaxCPUs: 5, + }, + { + name: "two containers - second is max", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("8"), + corev1.ResourceMemory: resource.MustParse("800Mi"), + }, + }, + expectedMaxCPUs: 8, + }, + { + name: "two containers - one with no CPU", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("300Mi"), + }, + { + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + }, + expectedMaxCPUs: 3, + }, + { + name: "two containers - both with no CPU", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + { + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + expectedMaxCPUs: 0, + }, + { + name: "three containers - middle is max", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("10"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + { + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("400Mi"), + }, + }, + expectedMaxCPUs: 10, + }, + { + name: "three containers - last is max", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("16"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + expectedMaxCPUs: 16, + }, + { + name: "multiple containers - with fractional CPUs", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("2300m"), // 2.3 CPUs, rounded up to 3 + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("500m"), // 0.5 CPUs, rounded up to 1 + corev1.ResourceMemory: resource.MustParse("50Mi"), + }, + }, + expectedMaxCPUs: 3, // max of 1, 3, 1 + }, + { + name: "multiple containers - large CPU values", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("8"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + { + corev1.ResourceCPU: resource.MustParse("64"), + corev1.ResourceMemory: resource.MustParse("8Gi"), + }, + { + corev1.ResourceCPU: resource.MustParse("32"), + corev1.ResourceMemory: resource.MustParse("4Gi"), + }, + { + corev1.ResourceCPU: resource.MustParse("16"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + expectedMaxCPUs: 64, + }, + { + name: "multiple containers - all empty ResourceLists", + containersResources: []corev1.ResourceList{ + {}, + {}, + {}, + }, + expectedMaxCPUs: 0, + }, + { + name: "multiple containers - mix of empty and non-empty", + containersResources: []corev1.ResourceList{ + {}, + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + {}, + { + corev1.ResourceCPU: resource.MustParse("5"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + }, + }, + expectedMaxCPUs: 5, + }, + { + name: "five containers - ascending order", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("1"), + corev1.ResourceMemory: resource.MustParse("100Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("3"), + corev1.ResourceMemory: resource.MustParse("300Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("400Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("5"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + }, + }, + expectedMaxCPUs: 5, + }, + { + name: "five containers - descending order", + containersResources: []corev1.ResourceList{ + { + corev1.ResourceCPU: resource.MustParse("10"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + { + corev1.ResourceCPU: resource.MustParse("8"), + corev1.ResourceMemory: resource.MustParse("800Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("6"), + corev1.ResourceMemory: resource.MustParse("600Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("4"), + corev1.ResourceMemory: resource.MustParse("400Mi"), + }, + { + corev1.ResourceCPU: resource.MustParse("2"), + corev1.ResourceMemory: resource.MustParse("200Mi"), + }, + }, + expectedMaxCPUs: 10, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := MaxCPURequestsRounded(tt.containersResources) + if result != tt.expectedMaxCPUs { + t.Errorf("MaxCPURequestsRounded() = %d, want %d", result, tt.expectedMaxCPUs) + } + }) + } +} diff --git a/test/e2e/performanceprofile/testdata/render-expected-output/bootstrap/extra-mcp/openshift-bootstrap-master_machineconfig.yaml b/test/e2e/performanceprofile/testdata/render-expected-output/bootstrap/extra-mcp/openshift-bootstrap-master_machineconfig.yaml index 9e03d4af61..5ea02d354f 100644 --- a/test/e2e/performanceprofile/testdata/render-expected-output/bootstrap/extra-mcp/openshift-bootstrap-master_machineconfig.yaml +++ b/test/e2e/performanceprofile/testdata/render-expected-output/bootstrap/extra-mcp/openshift-bootstrap-master_machineconfig.yaml @@ -43,7 +43,7 @@ spec: path: /usr/local/bin/clear-irqbalance-banned-cpus.sh user: {} - contents: - source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMi03IgoKCgoKIyBUaGUgQ1JJLU8gd2lsbCBjaGVjayB0aGUgYWxsb3dlZF9hbm5vdGF0aW9ucyB1bmRlciB0aGUgcnVudGltZSBoYW5kbGVyIGFuZCBhcHBseSBoaWdoLXBlcmZvcm1hbmNlIGhvb2tzIHdoZW4gb25lIG9mCiMgaGlnaC1wZXJmb3JtYW5jZSBhbm5vdGF0aW9ucyBwcmVzZW50cyB1bmRlciBpdC4KIyBXZSBzaG91bGQgcHJvdmlkZSB0aGUgcnVudGltZV9wYXRoIGJlY2F1c2Ugd2UgbmVlZCB0byBpbmZvcm0gdGhhdCB3ZSB3YW50IHRvIHJlLXVzZSBydW5jIGJpbmFyeSBhbmQgd2UKIyBkbyBub3QgaGF2ZSBoaWdoLXBlcmZvcm1hbmNlIGJpbmFyeSB1bmRlciB0aGUgJFBBVEggdGhhdCB3aWxsIHBvaW50IHRvIGl0LgpbY3Jpby5ydW50aW1lLnJ1bnRpbWVzLmhpZ2gtcGVyZm9ybWFuY2VdCmluaGVyaXRfZGVmYXVsdF9ydW50aW1lID0gdHJ1ZQphbGxvd2VkX2Fubm90YXRpb25zID0gWyJjcHUtbG9hZC1iYWxhbmNpbmcuY3Jpby5pbyIsICJjcHUtcXVvdGEuY3Jpby5pbyIsICJpcnEtbG9hZC1iYWxhbmNpbmcuY3Jpby5pbyIsICJjcHUtYy1zdGF0ZXMuY3Jpby5pbyIsICJjcHUtZnJlcS1nb3Zlcm5vci5jcmlvLmlvIl0K + source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMi03IgoKCgoKIyBUaGUgQ1JJLU8gd2lsbCBjaGVjayB0aGUgYWxsb3dlZF9hbm5vdGF0aW9ucyB1bmRlciB0aGUgcnVudGltZSBoYW5kbGVyIGFuZCBhcHBseSBoaWdoLXBlcmZvcm1hbmNlIGhvb2tzIHdoZW4gb25lIG9mCiMgaGlnaC1wZXJmb3JtYW5jZSBhbm5vdGF0aW9ucyBwcmVzZW50cyB1bmRlciBpdC4KIyBXZSBzaG91bGQgcHJvdmlkZSB0aGUgcnVudGltZV9wYXRoIGJlY2F1c2Ugd2UgbmVlZCB0byBpbmZvcm0gdGhhdCB3ZSB3YW50IHRvIHJlLXVzZSBydW5jIGJpbmFyeSBhbmQgd2UKIyBkbyBub3QgaGF2ZSBoaWdoLXBlcmZvcm1hbmNlIGJpbmFyeSB1bmRlciB0aGUgJFBBVEggdGhhdCB3aWxsIHBvaW50IHRvIGl0LgpbY3Jpby5ydW50aW1lLnJ1bnRpbWVzLmhpZ2gtcGVyZm9ybWFuY2VdCmluaGVyaXRfZGVmYXVsdF9ydW50aW1lID0gdHJ1ZQpleGVjX2NwdV9hZmZpbml0eSA9ICJmaXJzdCIKYWxsb3dlZF9hbm5vdGF0aW9ucyA9IFsiY3B1LWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LXF1b3RhLmNyaW8uaW8iLCAiaXJxLWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LWMtc3RhdGVzLmNyaW8uaW8iLCAiY3B1LWZyZXEtZ292ZXJub3IuY3Jpby5pbyJdCg== verification: {} group: {} mode: 420 diff --git a/test/e2e/performanceprofile/testdata/render-expected-output/bootstrap/extra-mcp/openshift-bootstrap-worker_machineconfig.yaml b/test/e2e/performanceprofile/testdata/render-expected-output/bootstrap/extra-mcp/openshift-bootstrap-worker_machineconfig.yaml index 509cfc04f8..f7964d18de 100644 --- a/test/e2e/performanceprofile/testdata/render-expected-output/bootstrap/extra-mcp/openshift-bootstrap-worker_machineconfig.yaml +++ b/test/e2e/performanceprofile/testdata/render-expected-output/bootstrap/extra-mcp/openshift-bootstrap-worker_machineconfig.yaml @@ -43,7 +43,7 @@ spec: path: /usr/local/bin/clear-irqbalance-banned-cpus.sh user: {} - contents: - source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMi0zIgoKCgoKIyBUaGUgQ1JJLU8gd2lsbCBjaGVjayB0aGUgYWxsb3dlZF9hbm5vdGF0aW9ucyB1bmRlciB0aGUgcnVudGltZSBoYW5kbGVyIGFuZCBhcHBseSBoaWdoLXBlcmZvcm1hbmNlIGhvb2tzIHdoZW4gb25lIG9mCiMgaGlnaC1wZXJmb3JtYW5jZSBhbm5vdGF0aW9ucyBwcmVzZW50cyB1bmRlciBpdC4KIyBXZSBzaG91bGQgcHJvdmlkZSB0aGUgcnVudGltZV9wYXRoIGJlY2F1c2Ugd2UgbmVlZCB0byBpbmZvcm0gdGhhdCB3ZSB3YW50IHRvIHJlLXVzZSBydW5jIGJpbmFyeSBhbmQgd2UKIyBkbyBub3QgaGF2ZSBoaWdoLXBlcmZvcm1hbmNlIGJpbmFyeSB1bmRlciB0aGUgJFBBVEggdGhhdCB3aWxsIHBvaW50IHRvIGl0LgpbY3Jpby5ydW50aW1lLnJ1bnRpbWVzLmhpZ2gtcGVyZm9ybWFuY2VdCmluaGVyaXRfZGVmYXVsdF9ydW50aW1lID0gdHJ1ZQphbGxvd2VkX2Fubm90YXRpb25zID0gWyJjcHUtbG9hZC1iYWxhbmNpbmcuY3Jpby5pbyIsICJjcHUtcXVvdGEuY3Jpby5pbyIsICJpcnEtbG9hZC1iYWxhbmNpbmcuY3Jpby5pbyIsICJjcHUtYy1zdGF0ZXMuY3Jpby5pbyIsICJjcHUtZnJlcS1nb3Zlcm5vci5jcmlvLmlvIl0K + source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMi0zIgoKCgoKIyBUaGUgQ1JJLU8gd2lsbCBjaGVjayB0aGUgYWxsb3dlZF9hbm5vdGF0aW9ucyB1bmRlciB0aGUgcnVudGltZSBoYW5kbGVyIGFuZCBhcHBseSBoaWdoLXBlcmZvcm1hbmNlIGhvb2tzIHdoZW4gb25lIG9mCiMgaGlnaC1wZXJmb3JtYW5jZSBhbm5vdGF0aW9ucyBwcmVzZW50cyB1bmRlciBpdC4KIyBXZSBzaG91bGQgcHJvdmlkZSB0aGUgcnVudGltZV9wYXRoIGJlY2F1c2Ugd2UgbmVlZCB0byBpbmZvcm0gdGhhdCB3ZSB3YW50IHRvIHJlLXVzZSBydW5jIGJpbmFyeSBhbmQgd2UKIyBkbyBub3QgaGF2ZSBoaWdoLXBlcmZvcm1hbmNlIGJpbmFyeSB1bmRlciB0aGUgJFBBVEggdGhhdCB3aWxsIHBvaW50IHRvIGl0LgpbY3Jpby5ydW50aW1lLnJ1bnRpbWVzLmhpZ2gtcGVyZm9ybWFuY2VdCmluaGVyaXRfZGVmYXVsdF9ydW50aW1lID0gdHJ1ZQpleGVjX2NwdV9hZmZpbml0eSA9ICJmaXJzdCIKYWxsb3dlZF9hbm5vdGF0aW9ucyA9IFsiY3B1LWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LXF1b3RhLmNyaW8uaW8iLCAiaXJxLWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LWMtc3RhdGVzLmNyaW8uaW8iLCAiY3B1LWZyZXEtZ292ZXJub3IuY3Jpby5pbyJdCg== verification: {} group: {} mode: 420 diff --git a/test/e2e/performanceprofile/testdata/render-expected-output/bootstrap/no-mcp/openshift-bootstrap-master_machineconfig.yaml b/test/e2e/performanceprofile/testdata/render-expected-output/bootstrap/no-mcp/openshift-bootstrap-master_machineconfig.yaml index 9e03d4af61..5ea02d354f 100644 --- a/test/e2e/performanceprofile/testdata/render-expected-output/bootstrap/no-mcp/openshift-bootstrap-master_machineconfig.yaml +++ b/test/e2e/performanceprofile/testdata/render-expected-output/bootstrap/no-mcp/openshift-bootstrap-master_machineconfig.yaml @@ -43,7 +43,7 @@ spec: path: /usr/local/bin/clear-irqbalance-banned-cpus.sh user: {} - contents: - source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMi03IgoKCgoKIyBUaGUgQ1JJLU8gd2lsbCBjaGVjayB0aGUgYWxsb3dlZF9hbm5vdGF0aW9ucyB1bmRlciB0aGUgcnVudGltZSBoYW5kbGVyIGFuZCBhcHBseSBoaWdoLXBlcmZvcm1hbmNlIGhvb2tzIHdoZW4gb25lIG9mCiMgaGlnaC1wZXJmb3JtYW5jZSBhbm5vdGF0aW9ucyBwcmVzZW50cyB1bmRlciBpdC4KIyBXZSBzaG91bGQgcHJvdmlkZSB0aGUgcnVudGltZV9wYXRoIGJlY2F1c2Ugd2UgbmVlZCB0byBpbmZvcm0gdGhhdCB3ZSB3YW50IHRvIHJlLXVzZSBydW5jIGJpbmFyeSBhbmQgd2UKIyBkbyBub3QgaGF2ZSBoaWdoLXBlcmZvcm1hbmNlIGJpbmFyeSB1bmRlciB0aGUgJFBBVEggdGhhdCB3aWxsIHBvaW50IHRvIGl0LgpbY3Jpby5ydW50aW1lLnJ1bnRpbWVzLmhpZ2gtcGVyZm9ybWFuY2VdCmluaGVyaXRfZGVmYXVsdF9ydW50aW1lID0gdHJ1ZQphbGxvd2VkX2Fubm90YXRpb25zID0gWyJjcHUtbG9hZC1iYWxhbmNpbmcuY3Jpby5pbyIsICJjcHUtcXVvdGEuY3Jpby5pbyIsICJpcnEtbG9hZC1iYWxhbmNpbmcuY3Jpby5pbyIsICJjcHUtYy1zdGF0ZXMuY3Jpby5pbyIsICJjcHUtZnJlcS1nb3Zlcm5vci5jcmlvLmlvIl0K + source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMi03IgoKCgoKIyBUaGUgQ1JJLU8gd2lsbCBjaGVjayB0aGUgYWxsb3dlZF9hbm5vdGF0aW9ucyB1bmRlciB0aGUgcnVudGltZSBoYW5kbGVyIGFuZCBhcHBseSBoaWdoLXBlcmZvcm1hbmNlIGhvb2tzIHdoZW4gb25lIG9mCiMgaGlnaC1wZXJmb3JtYW5jZSBhbm5vdGF0aW9ucyBwcmVzZW50cyB1bmRlciBpdC4KIyBXZSBzaG91bGQgcHJvdmlkZSB0aGUgcnVudGltZV9wYXRoIGJlY2F1c2Ugd2UgbmVlZCB0byBpbmZvcm0gdGhhdCB3ZSB3YW50IHRvIHJlLXVzZSBydW5jIGJpbmFyeSBhbmQgd2UKIyBkbyBub3QgaGF2ZSBoaWdoLXBlcmZvcm1hbmNlIGJpbmFyeSB1bmRlciB0aGUgJFBBVEggdGhhdCB3aWxsIHBvaW50IHRvIGl0LgpbY3Jpby5ydW50aW1lLnJ1bnRpbWVzLmhpZ2gtcGVyZm9ybWFuY2VdCmluaGVyaXRfZGVmYXVsdF9ydW50aW1lID0gdHJ1ZQpleGVjX2NwdV9hZmZpbml0eSA9ICJmaXJzdCIKYWxsb3dlZF9hbm5vdGF0aW9ucyA9IFsiY3B1LWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LXF1b3RhLmNyaW8uaW8iLCAiaXJxLWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LWMtc3RhdGVzLmNyaW8uaW8iLCAiY3B1LWZyZXEtZ292ZXJub3IuY3Jpby5pbyJdCg== verification: {} group: {} mode: 420 diff --git a/test/e2e/performanceprofile/testdata/render-expected-output/bootstrap/no-mcp/openshift-bootstrap-worker_machineconfig.yaml b/test/e2e/performanceprofile/testdata/render-expected-output/bootstrap/no-mcp/openshift-bootstrap-worker_machineconfig.yaml index 509cfc04f8..f7964d18de 100644 --- a/test/e2e/performanceprofile/testdata/render-expected-output/bootstrap/no-mcp/openshift-bootstrap-worker_machineconfig.yaml +++ b/test/e2e/performanceprofile/testdata/render-expected-output/bootstrap/no-mcp/openshift-bootstrap-worker_machineconfig.yaml @@ -43,7 +43,7 @@ spec: path: /usr/local/bin/clear-irqbalance-banned-cpus.sh user: {} - contents: - source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMi0zIgoKCgoKIyBUaGUgQ1JJLU8gd2lsbCBjaGVjayB0aGUgYWxsb3dlZF9hbm5vdGF0aW9ucyB1bmRlciB0aGUgcnVudGltZSBoYW5kbGVyIGFuZCBhcHBseSBoaWdoLXBlcmZvcm1hbmNlIGhvb2tzIHdoZW4gb25lIG9mCiMgaGlnaC1wZXJmb3JtYW5jZSBhbm5vdGF0aW9ucyBwcmVzZW50cyB1bmRlciBpdC4KIyBXZSBzaG91bGQgcHJvdmlkZSB0aGUgcnVudGltZV9wYXRoIGJlY2F1c2Ugd2UgbmVlZCB0byBpbmZvcm0gdGhhdCB3ZSB3YW50IHRvIHJlLXVzZSBydW5jIGJpbmFyeSBhbmQgd2UKIyBkbyBub3QgaGF2ZSBoaWdoLXBlcmZvcm1hbmNlIGJpbmFyeSB1bmRlciB0aGUgJFBBVEggdGhhdCB3aWxsIHBvaW50IHRvIGl0LgpbY3Jpby5ydW50aW1lLnJ1bnRpbWVzLmhpZ2gtcGVyZm9ybWFuY2VdCmluaGVyaXRfZGVmYXVsdF9ydW50aW1lID0gdHJ1ZQphbGxvd2VkX2Fubm90YXRpb25zID0gWyJjcHUtbG9hZC1iYWxhbmNpbmcuY3Jpby5pbyIsICJjcHUtcXVvdGEuY3Jpby5pbyIsICJpcnEtbG9hZC1iYWxhbmNpbmcuY3Jpby5pbyIsICJjcHUtYy1zdGF0ZXMuY3Jpby5pbyIsICJjcHUtZnJlcS1nb3Zlcm5vci5jcmlvLmlvIl0K + source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMi0zIgoKCgoKIyBUaGUgQ1JJLU8gd2lsbCBjaGVjayB0aGUgYWxsb3dlZF9hbm5vdGF0aW9ucyB1bmRlciB0aGUgcnVudGltZSBoYW5kbGVyIGFuZCBhcHBseSBoaWdoLXBlcmZvcm1hbmNlIGhvb2tzIHdoZW4gb25lIG9mCiMgaGlnaC1wZXJmb3JtYW5jZSBhbm5vdGF0aW9ucyBwcmVzZW50cyB1bmRlciBpdC4KIyBXZSBzaG91bGQgcHJvdmlkZSB0aGUgcnVudGltZV9wYXRoIGJlY2F1c2Ugd2UgbmVlZCB0byBpbmZvcm0gdGhhdCB3ZSB3YW50IHRvIHJlLXVzZSBydW5jIGJpbmFyeSBhbmQgd2UKIyBkbyBub3QgaGF2ZSBoaWdoLXBlcmZvcm1hbmNlIGJpbmFyeSB1bmRlciB0aGUgJFBBVEggdGhhdCB3aWxsIHBvaW50IHRvIGl0LgpbY3Jpby5ydW50aW1lLnJ1bnRpbWVzLmhpZ2gtcGVyZm9ybWFuY2VdCmluaGVyaXRfZGVmYXVsdF9ydW50aW1lID0gdHJ1ZQpleGVjX2NwdV9hZmZpbml0eSA9ICJmaXJzdCIKYWxsb3dlZF9hbm5vdGF0aW9ucyA9IFsiY3B1LWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LXF1b3RhLmNyaW8uaW8iLCAiaXJxLWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LWMtc3RhdGVzLmNyaW8uaW8iLCAiY3B1LWZyZXEtZ292ZXJub3IuY3Jpby5pbyJdCg== verification: {} group: {} mode: 420 diff --git a/test/e2e/performanceprofile/testdata/render-expected-output/default/arm/manual_machineconfig.yaml b/test/e2e/performanceprofile/testdata/render-expected-output/default/arm/manual_machineconfig.yaml index 97f6c682ec..8f749a1eab 100644 --- a/test/e2e/performanceprofile/testdata/render-expected-output/default/arm/manual_machineconfig.yaml +++ b/test/e2e/performanceprofile/testdata/render-expected-output/default/arm/manual_machineconfig.yaml @@ -42,7 +42,7 @@ spec: path: /usr/local/bin/clear-irqbalance-banned-cpus.sh user: {} - contents: - source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMCIKCgoKCiMgVGhlIENSSS1PIHdpbGwgY2hlY2sgdGhlIGFsbG93ZWRfYW5ub3RhdGlvbnMgdW5kZXIgdGhlIHJ1bnRpbWUgaGFuZGxlciBhbmQgYXBwbHkgaGlnaC1wZXJmb3JtYW5jZSBob29rcyB3aGVuIG9uZSBvZgojIGhpZ2gtcGVyZm9ybWFuY2UgYW5ub3RhdGlvbnMgcHJlc2VudHMgdW5kZXIgaXQuCiMgV2Ugc2hvdWxkIHByb3ZpZGUgdGhlIHJ1bnRpbWVfcGF0aCBiZWNhdXNlIHdlIG5lZWQgdG8gaW5mb3JtIHRoYXQgd2Ugd2FudCB0byByZS11c2UgcnVuYyBiaW5hcnkgYW5kIHdlCiMgZG8gbm90IGhhdmUgaGlnaC1wZXJmb3JtYW5jZSBiaW5hcnkgdW5kZXIgdGhlICRQQVRIIHRoYXQgd2lsbCBwb2ludCB0byBpdC4KW2NyaW8ucnVudGltZS5ydW50aW1lcy5oaWdoLXBlcmZvcm1hbmNlXQppbmhlcml0X2RlZmF1bHRfcnVudGltZSA9IHRydWUKYWxsb3dlZF9hbm5vdGF0aW9ucyA9IFsiY3B1LWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LXF1b3RhLmNyaW8uaW8iLCAiaXJxLWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LWMtc3RhdGVzLmNyaW8uaW8iLCAiY3B1LWZyZXEtZ292ZXJub3IuY3Jpby5pbyJdCg== + source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMCIKCgoKCiMgVGhlIENSSS1PIHdpbGwgY2hlY2sgdGhlIGFsbG93ZWRfYW5ub3RhdGlvbnMgdW5kZXIgdGhlIHJ1bnRpbWUgaGFuZGxlciBhbmQgYXBwbHkgaGlnaC1wZXJmb3JtYW5jZSBob29rcyB3aGVuIG9uZSBvZgojIGhpZ2gtcGVyZm9ybWFuY2UgYW5ub3RhdGlvbnMgcHJlc2VudHMgdW5kZXIgaXQuCiMgV2Ugc2hvdWxkIHByb3ZpZGUgdGhlIHJ1bnRpbWVfcGF0aCBiZWNhdXNlIHdlIG5lZWQgdG8gaW5mb3JtIHRoYXQgd2Ugd2FudCB0byByZS11c2UgcnVuYyBiaW5hcnkgYW5kIHdlCiMgZG8gbm90IGhhdmUgaGlnaC1wZXJmb3JtYW5jZSBiaW5hcnkgdW5kZXIgdGhlICRQQVRIIHRoYXQgd2lsbCBwb2ludCB0byBpdC4KW2NyaW8ucnVudGltZS5ydW50aW1lcy5oaWdoLXBlcmZvcm1hbmNlXQppbmhlcml0X2RlZmF1bHRfcnVudGltZSA9IHRydWUKZXhlY19jcHVfYWZmaW5pdHkgPSAiZmlyc3QiCmFsbG93ZWRfYW5ub3RhdGlvbnMgPSBbImNwdS1sb2FkLWJhbGFuY2luZy5jcmlvLmlvIiwgImNwdS1xdW90YS5jcmlvLmlvIiwgImlycS1sb2FkLWJhbGFuY2luZy5jcmlvLmlvIiwgImNwdS1jLXN0YXRlcy5jcmlvLmlvIiwgImNwdS1mcmVxLWdvdmVybm9yLmNyaW8uaW8iXQo= verification: {} group: {} mode: 420 diff --git a/test/e2e/performanceprofile/testdata/render-expected-output/default/cpuFrequency/manual_machineconfig.yaml b/test/e2e/performanceprofile/testdata/render-expected-output/default/cpuFrequency/manual_machineconfig.yaml index 561902fb3d..b3c6203238 100644 --- a/test/e2e/performanceprofile/testdata/render-expected-output/default/cpuFrequency/manual_machineconfig.yaml +++ b/test/e2e/performanceprofile/testdata/render-expected-output/default/cpuFrequency/manual_machineconfig.yaml @@ -42,7 +42,7 @@ spec: path: /usr/local/bin/clear-irqbalance-banned-cpus.sh user: {} - contents: - source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMC0xIgoKCgoKIyBUaGUgQ1JJLU8gd2lsbCBjaGVjayB0aGUgYWxsb3dlZF9hbm5vdGF0aW9ucyB1bmRlciB0aGUgcnVudGltZSBoYW5kbGVyIGFuZCBhcHBseSBoaWdoLXBlcmZvcm1hbmNlIGhvb2tzIHdoZW4gb25lIG9mCiMgaGlnaC1wZXJmb3JtYW5jZSBhbm5vdGF0aW9ucyBwcmVzZW50cyB1bmRlciBpdC4KIyBXZSBzaG91bGQgcHJvdmlkZSB0aGUgcnVudGltZV9wYXRoIGJlY2F1c2Ugd2UgbmVlZCB0byBpbmZvcm0gdGhhdCB3ZSB3YW50IHRvIHJlLXVzZSBydW5jIGJpbmFyeSBhbmQgd2UKIyBkbyBub3QgaGF2ZSBoaWdoLXBlcmZvcm1hbmNlIGJpbmFyeSB1bmRlciB0aGUgJFBBVEggdGhhdCB3aWxsIHBvaW50IHRvIGl0LgpbY3Jpby5ydW50aW1lLnJ1bnRpbWVzLmhpZ2gtcGVyZm9ybWFuY2VdCmluaGVyaXRfZGVmYXVsdF9ydW50aW1lID0gdHJ1ZQphbGxvd2VkX2Fubm90YXRpb25zID0gWyJjcHUtbG9hZC1iYWxhbmNpbmcuY3Jpby5pbyIsICJjcHUtcXVvdGEuY3Jpby5pbyIsICJpcnEtbG9hZC1iYWxhbmNpbmcuY3Jpby5pbyIsICJjcHUtYy1zdGF0ZXMuY3Jpby5pbyIsICJjcHUtZnJlcS1nb3Zlcm5vci5jcmlvLmlvIl0K + source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMC0xIgoKCgoKIyBUaGUgQ1JJLU8gd2lsbCBjaGVjayB0aGUgYWxsb3dlZF9hbm5vdGF0aW9ucyB1bmRlciB0aGUgcnVudGltZSBoYW5kbGVyIGFuZCBhcHBseSBoaWdoLXBlcmZvcm1hbmNlIGhvb2tzIHdoZW4gb25lIG9mCiMgaGlnaC1wZXJmb3JtYW5jZSBhbm5vdGF0aW9ucyBwcmVzZW50cyB1bmRlciBpdC4KIyBXZSBzaG91bGQgcHJvdmlkZSB0aGUgcnVudGltZV9wYXRoIGJlY2F1c2Ugd2UgbmVlZCB0byBpbmZvcm0gdGhhdCB3ZSB3YW50IHRvIHJlLXVzZSBydW5jIGJpbmFyeSBhbmQgd2UKIyBkbyBub3QgaGF2ZSBoaWdoLXBlcmZvcm1hbmNlIGJpbmFyeSB1bmRlciB0aGUgJFBBVEggdGhhdCB3aWxsIHBvaW50IHRvIGl0LgpbY3Jpby5ydW50aW1lLnJ1bnRpbWVzLmhpZ2gtcGVyZm9ybWFuY2VdCmluaGVyaXRfZGVmYXVsdF9ydW50aW1lID0gdHJ1ZQpleGVjX2NwdV9hZmZpbml0eSA9ICJmaXJzdCIKYWxsb3dlZF9hbm5vdGF0aW9ucyA9IFsiY3B1LWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LXF1b3RhLmNyaW8uaW8iLCAiaXJxLWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LWMtc3RhdGVzLmNyaW8uaW8iLCAiY3B1LWZyZXEtZ292ZXJub3IuY3Jpby5pbyJdCg== verification: {} group: {} mode: 420 diff --git a/test/e2e/performanceprofile/testdata/render-expected-output/default/manual_machineconfig.yaml b/test/e2e/performanceprofile/testdata/render-expected-output/default/manual_machineconfig.yaml index 8a6ef4029d..c7d67f403a 100644 --- a/test/e2e/performanceprofile/testdata/render-expected-output/default/manual_machineconfig.yaml +++ b/test/e2e/performanceprofile/testdata/render-expected-output/default/manual_machineconfig.yaml @@ -43,7 +43,7 @@ spec: path: /usr/local/bin/clear-irqbalance-banned-cpus.sh user: {} - contents: - source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMCIKCgoKCiMgVGhlIENSSS1PIHdpbGwgY2hlY2sgdGhlIGFsbG93ZWRfYW5ub3RhdGlvbnMgdW5kZXIgdGhlIHJ1bnRpbWUgaGFuZGxlciBhbmQgYXBwbHkgaGlnaC1wZXJmb3JtYW5jZSBob29rcyB3aGVuIG9uZSBvZgojIGhpZ2gtcGVyZm9ybWFuY2UgYW5ub3RhdGlvbnMgcHJlc2VudHMgdW5kZXIgaXQuCiMgV2Ugc2hvdWxkIHByb3ZpZGUgdGhlIHJ1bnRpbWVfcGF0aCBiZWNhdXNlIHdlIG5lZWQgdG8gaW5mb3JtIHRoYXQgd2Ugd2FudCB0byByZS11c2UgcnVuYyBiaW5hcnkgYW5kIHdlCiMgZG8gbm90IGhhdmUgaGlnaC1wZXJmb3JtYW5jZSBiaW5hcnkgdW5kZXIgdGhlICRQQVRIIHRoYXQgd2lsbCBwb2ludCB0byBpdC4KW2NyaW8ucnVudGltZS5ydW50aW1lcy5oaWdoLXBlcmZvcm1hbmNlXQppbmhlcml0X2RlZmF1bHRfcnVudGltZSA9IHRydWUKYWxsb3dlZF9hbm5vdGF0aW9ucyA9IFsiY3B1LWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LXF1b3RhLmNyaW8uaW8iLCAiaXJxLWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LWMtc3RhdGVzLmNyaW8uaW8iLCAiY3B1LWZyZXEtZ292ZXJub3IuY3Jpby5pbyJdCg== + source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMCIKCgoKCiMgVGhlIENSSS1PIHdpbGwgY2hlY2sgdGhlIGFsbG93ZWRfYW5ub3RhdGlvbnMgdW5kZXIgdGhlIHJ1bnRpbWUgaGFuZGxlciBhbmQgYXBwbHkgaGlnaC1wZXJmb3JtYW5jZSBob29rcyB3aGVuIG9uZSBvZgojIGhpZ2gtcGVyZm9ybWFuY2UgYW5ub3RhdGlvbnMgcHJlc2VudHMgdW5kZXIgaXQuCiMgV2Ugc2hvdWxkIHByb3ZpZGUgdGhlIHJ1bnRpbWVfcGF0aCBiZWNhdXNlIHdlIG5lZWQgdG8gaW5mb3JtIHRoYXQgd2Ugd2FudCB0byByZS11c2UgcnVuYyBiaW5hcnkgYW5kIHdlCiMgZG8gbm90IGhhdmUgaGlnaC1wZXJmb3JtYW5jZSBiaW5hcnkgdW5kZXIgdGhlICRQQVRIIHRoYXQgd2lsbCBwb2ludCB0byBpdC4KW2NyaW8ucnVudGltZS5ydW50aW1lcy5oaWdoLXBlcmZvcm1hbmNlXQppbmhlcml0X2RlZmF1bHRfcnVudGltZSA9IHRydWUKZXhlY19jcHVfYWZmaW5pdHkgPSAiZmlyc3QiCmFsbG93ZWRfYW5ub3RhdGlvbnMgPSBbImNwdS1sb2FkLWJhbGFuY2luZy5jcmlvLmlvIiwgImNwdS1xdW90YS5jcmlvLmlvIiwgImlycS1sb2FkLWJhbGFuY2luZy5jcmlvLmlvIiwgImNwdS1jLXN0YXRlcy5jcmlvLmlvIiwgImNwdS1mcmVxLWdvdmVybm9yLmNyaW8uaW8iXQo= verification: {} group: {} mode: 420 diff --git a/test/e2e/performanceprofile/testdata/render-expected-output/default/pp-norps/manual_machineconfig.yaml b/test/e2e/performanceprofile/testdata/render-expected-output/default/pp-norps/manual_machineconfig.yaml index 8a6ef4029d..c7d67f403a 100644 --- a/test/e2e/performanceprofile/testdata/render-expected-output/default/pp-norps/manual_machineconfig.yaml +++ b/test/e2e/performanceprofile/testdata/render-expected-output/default/pp-norps/manual_machineconfig.yaml @@ -43,7 +43,7 @@ spec: path: /usr/local/bin/clear-irqbalance-banned-cpus.sh user: {} - contents: - source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMCIKCgoKCiMgVGhlIENSSS1PIHdpbGwgY2hlY2sgdGhlIGFsbG93ZWRfYW5ub3RhdGlvbnMgdW5kZXIgdGhlIHJ1bnRpbWUgaGFuZGxlciBhbmQgYXBwbHkgaGlnaC1wZXJmb3JtYW5jZSBob29rcyB3aGVuIG9uZSBvZgojIGhpZ2gtcGVyZm9ybWFuY2UgYW5ub3RhdGlvbnMgcHJlc2VudHMgdW5kZXIgaXQuCiMgV2Ugc2hvdWxkIHByb3ZpZGUgdGhlIHJ1bnRpbWVfcGF0aCBiZWNhdXNlIHdlIG5lZWQgdG8gaW5mb3JtIHRoYXQgd2Ugd2FudCB0byByZS11c2UgcnVuYyBiaW5hcnkgYW5kIHdlCiMgZG8gbm90IGhhdmUgaGlnaC1wZXJmb3JtYW5jZSBiaW5hcnkgdW5kZXIgdGhlICRQQVRIIHRoYXQgd2lsbCBwb2ludCB0byBpdC4KW2NyaW8ucnVudGltZS5ydW50aW1lcy5oaWdoLXBlcmZvcm1hbmNlXQppbmhlcml0X2RlZmF1bHRfcnVudGltZSA9IHRydWUKYWxsb3dlZF9hbm5vdGF0aW9ucyA9IFsiY3B1LWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LXF1b3RhLmNyaW8uaW8iLCAiaXJxLWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LWMtc3RhdGVzLmNyaW8uaW8iLCAiY3B1LWZyZXEtZ292ZXJub3IuY3Jpby5pbyJdCg== + source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMCIKCgoKCiMgVGhlIENSSS1PIHdpbGwgY2hlY2sgdGhlIGFsbG93ZWRfYW5ub3RhdGlvbnMgdW5kZXIgdGhlIHJ1bnRpbWUgaGFuZGxlciBhbmQgYXBwbHkgaGlnaC1wZXJmb3JtYW5jZSBob29rcyB3aGVuIG9uZSBvZgojIGhpZ2gtcGVyZm9ybWFuY2UgYW5ub3RhdGlvbnMgcHJlc2VudHMgdW5kZXIgaXQuCiMgV2Ugc2hvdWxkIHByb3ZpZGUgdGhlIHJ1bnRpbWVfcGF0aCBiZWNhdXNlIHdlIG5lZWQgdG8gaW5mb3JtIHRoYXQgd2Ugd2FudCB0byByZS11c2UgcnVuYyBiaW5hcnkgYW5kIHdlCiMgZG8gbm90IGhhdmUgaGlnaC1wZXJmb3JtYW5jZSBiaW5hcnkgdW5kZXIgdGhlICRQQVRIIHRoYXQgd2lsbCBwb2ludCB0byBpdC4KW2NyaW8ucnVudGltZS5ydW50aW1lcy5oaWdoLXBlcmZvcm1hbmNlXQppbmhlcml0X2RlZmF1bHRfcnVudGltZSA9IHRydWUKZXhlY19jcHVfYWZmaW5pdHkgPSAiZmlyc3QiCmFsbG93ZWRfYW5ub3RhdGlvbnMgPSBbImNwdS1sb2FkLWJhbGFuY2luZy5jcmlvLmlvIiwgImNwdS1xdW90YS5jcmlvLmlvIiwgImlycS1sb2FkLWJhbGFuY2luZy5jcmlvLmlvIiwgImNwdS1jLXN0YXRlcy5jcmlvLmlvIiwgImNwdS1mcmVxLWdvdmVybm9yLmNyaW8uaW8iXQo= verification: {} group: {} mode: 420 diff --git a/test/e2e/performanceprofile/testdata/render-expected-output/no-ref/manual_machineconfig.yaml b/test/e2e/performanceprofile/testdata/render-expected-output/no-ref/manual_machineconfig.yaml index a2cccd1aeb..ce3a294222 100644 --- a/test/e2e/performanceprofile/testdata/render-expected-output/no-ref/manual_machineconfig.yaml +++ b/test/e2e/performanceprofile/testdata/render-expected-output/no-ref/manual_machineconfig.yaml @@ -42,7 +42,7 @@ spec: path: /usr/local/bin/clear-irqbalance-banned-cpus.sh user: {} - contents: - source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMCIKCgoKCiMgVGhlIENSSS1PIHdpbGwgY2hlY2sgdGhlIGFsbG93ZWRfYW5ub3RhdGlvbnMgdW5kZXIgdGhlIHJ1bnRpbWUgaGFuZGxlciBhbmQgYXBwbHkgaGlnaC1wZXJmb3JtYW5jZSBob29rcyB3aGVuIG9uZSBvZgojIGhpZ2gtcGVyZm9ybWFuY2UgYW5ub3RhdGlvbnMgcHJlc2VudHMgdW5kZXIgaXQuCiMgV2Ugc2hvdWxkIHByb3ZpZGUgdGhlIHJ1bnRpbWVfcGF0aCBiZWNhdXNlIHdlIG5lZWQgdG8gaW5mb3JtIHRoYXQgd2Ugd2FudCB0byByZS11c2UgcnVuYyBiaW5hcnkgYW5kIHdlCiMgZG8gbm90IGhhdmUgaGlnaC1wZXJmb3JtYW5jZSBiaW5hcnkgdW5kZXIgdGhlICRQQVRIIHRoYXQgd2lsbCBwb2ludCB0byBpdC4KW2NyaW8ucnVudGltZS5ydW50aW1lcy5oaWdoLXBlcmZvcm1hbmNlXQppbmhlcml0X2RlZmF1bHRfcnVudGltZSA9IHRydWUKYWxsb3dlZF9hbm5vdGF0aW9ucyA9IFsiY3B1LWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LXF1b3RhLmNyaW8uaW8iLCAiaXJxLWxvYWQtYmFsYW5jaW5nLmNyaW8uaW8iLCAiY3B1LWMtc3RhdGVzLmNyaW8uaW8iLCAiY3B1LWZyZXEtZ292ZXJub3IuY3Jpby5pbyJdCg== + source: data:text/plain;charset=utf-8;base64,CltjcmlvLnJ1bnRpbWVdCmluZnJhX2N0cl9jcHVzZXQgPSAiMCIKCgoKCiMgVGhlIENSSS1PIHdpbGwgY2hlY2sgdGhlIGFsbG93ZWRfYW5ub3RhdGlvbnMgdW5kZXIgdGhlIHJ1bnRpbWUgaGFuZGxlciBhbmQgYXBwbHkgaGlnaC1wZXJmb3JtYW5jZSBob29rcyB3aGVuIG9uZSBvZgojIGhpZ2gtcGVyZm9ybWFuY2UgYW5ub3RhdGlvbnMgcHJlc2VudHMgdW5kZXIgaXQuCiMgV2Ugc2hvdWxkIHByb3ZpZGUgdGhlIHJ1bnRpbWVfcGF0aCBiZWNhdXNlIHdlIG5lZWQgdG8gaW5mb3JtIHRoYXQgd2Ugd2FudCB0byByZS11c2UgcnVuYyBiaW5hcnkgYW5kIHdlCiMgZG8gbm90IGhhdmUgaGlnaC1wZXJmb3JtYW5jZSBiaW5hcnkgdW5kZXIgdGhlICRQQVRIIHRoYXQgd2lsbCBwb2ludCB0byBpdC4KW2NyaW8ucnVudGltZS5ydW50aW1lcy5oaWdoLXBlcmZvcm1hbmNlXQppbmhlcml0X2RlZmF1bHRfcnVudGltZSA9IHRydWUKZXhlY19jcHVfYWZmaW5pdHkgPSAiZmlyc3QiCmFsbG93ZWRfYW5ub3RhdGlvbnMgPSBbImNwdS1sb2FkLWJhbGFuY2luZy5jcmlvLmlvIiwgImNwdS1xdW90YS5jcmlvLmlvIiwgImlycS1sb2FkLWJhbGFuY2luZy5jcmlvLmlvIiwgImNwdS1jLXN0YXRlcy5jcmlvLmlvIiwgImNwdS1mcmVxLWdvdmVybm9yLmNyaW8uaW8iXQo= verification: {} group: {} mode: 420