From 10c8c8957d12d8be3ab61a721098291ee6bf9737 Mon Sep 17 00:00:00 2001 From: Rodrigo Fior Kuntzer Date: Tue, 13 May 2025 16:30:18 +0200 Subject: [PATCH] feat: add support for showing the normalized node allocation based on the higher allocated dimension among CPU and Memory Signed-off-by: Rodrigo Fior Kuntzer --- cmd/eks-node-viewer/flag.go | 23 ++++--- cmd/eks-node-viewer/main.go | 13 +++- pkg/model/cluster.go | 20 +++--- pkg/model/cluster_test.go | 12 ++-- pkg/model/node.go | 50 +++++++++++++++ pkg/model/node_test.go | 123 ++++++++++++++++++++++++++++++++++++ pkg/model/uimodel.go | 56 ++++++++-------- 7 files changed, 244 insertions(+), 53 deletions(-) diff --git a/cmd/eks-node-viewer/flag.go b/cmd/eks-node-viewer/flag.go index 4adf8ea..3f4181b 100644 --- a/cmd/eks-node-viewer/flag.go +++ b/cmd/eks-node-viewer/flag.go @@ -41,16 +41,17 @@ func init() { } type Flags struct { - Context string - NodeSelector string - ExtraLabels string - NodeSort string - Style string - Kubeconfig string - Resources string - DisablePricing bool - ShowAttribution bool - Version bool + Context string + NodeSelector string + ExtraLabels string + NodeSort string + Style string + Kubeconfig string + Resources string + DisablePricing bool + ShowAttribution bool + NormalizedAllocation bool + Version bool } func ParseFlags() (Flags, error) { @@ -92,6 +93,8 @@ func ParseFlags() (Flags, error) { flagSet.BoolVar(&flags.ShowAttribution, "attribution", false, "Show the Open Source Attribution") + flagSet.BoolVar(&flags.NormalizedAllocation, "normalized-allocation", false, "Normalize the node allocation based on the higher allocated dimension among CPU and Memory") + if err := flagSet.Parse(os.Args[1:]); err != nil { return Flags{}, err } diff --git a/cmd/eks-node-viewer/main.go b/cmd/eks-node-viewer/main.go index 6092683..7c653e8 100644 --- a/cmd/eks-node-viewer/main.go +++ b/cmd/eks-node-viewer/main.go @@ -58,6 +58,15 @@ func main() { os.Exit(0) } + resources := strings.FieldsFunc(flags.Resources, func(r rune) bool { return r == ',' }) + if flags.NormalizedAllocation { + for _, res := range resources { + if res != "cpu" && res != "memory" { + log.Fatalf("normalized allocation only supports cpu and memory, got %s", res) + } + } + } + cs, err := client.NewKubernetes(flags.Kubeconfig, flags.Context) if err != nil { log.Fatalf("creating client, %s", err) @@ -73,9 +82,9 @@ func main() { if err != nil { log.Fatalf("creating style, %s", err) } - m := model.NewUIModel(strings.Split(flags.ExtraLabels, ","), flags.NodeSort, style) + m := model.NewUIModel(strings.Split(flags.ExtraLabels, ","), flags.NodeSort, style, flags.NormalizedAllocation) m.DisablePricing = flags.DisablePricing - m.SetResources(strings.FieldsFunc(flags.Resources, func(r rune) bool { return r == ',' })) + m.SetResources(resources) var nodeSelector labels.Selector if ns, err := labels.Parse(flags.NodeSelector); err != nil { diff --git a/pkg/model/cluster.go b/pkg/model/cluster.go index 3f6c7a3..1804f1a 100644 --- a/pkg/model/cluster.go +++ b/pkg/model/cluster.go @@ -21,17 +21,19 @@ import ( ) type Cluster struct { - mu sync.RWMutex - nodes map[string]*Node - pods map[objectKey]*Pod - resources []v1.ResourceName + mu sync.RWMutex + nodes map[string]*Node + pods map[objectKey]*Pod + resources []v1.ResourceName + normalizedAllocation bool } -func NewCluster() *Cluster { +func NewCluster(normalizedAllocation bool) *Cluster { return &Cluster{ - nodes: map[string]*Node{}, - pods: map[objectKey]*Pod{}, - resources: []v1.ResourceName{v1.ResourceCPU}, + nodes: map[string]*Node{}, + pods: map[objectKey]*Pod{}, + resources: []v1.ResourceName{v1.ResourceCPU}, + normalizedAllocation: normalizedAllocation, } } func (c *Cluster) AddNode(node *Node) *Node { @@ -165,7 +167,7 @@ func (c *Cluster) Stats() Stats { st.NumNodes++ st.Nodes = append(st.Nodes, n) addResources(st.AllocatableResources, n.Allocatable()) - addResources(st.UsedResources, n.Used()) + addResources(st.UsedResources, n.UsedNormalized(c.normalizedAllocation)) } return st } diff --git a/pkg/model/cluster_test.go b/pkg/model/cluster_test.go index d454681..9a0c5e0 100644 --- a/pkg/model/cluster_test.go +++ b/pkg/model/cluster_test.go @@ -23,7 +23,7 @@ import ( ) func TestClusterAddNode(t *testing.T) { - cluster := model.NewCluster() + cluster := model.NewCluster(false) if got := len(cluster.Stats().Nodes); got != 0 { t.Errorf("expected 0 nodes, got %d", got) @@ -63,7 +63,7 @@ func TestClusterAddNode(t *testing.T) { } func TestClusterGetNodeByProviderID(t *testing.T) { - cluster := model.NewCluster() + cluster := model.NewCluster(false) _, ok := cluster.GetNode("mynode-id") if ok { @@ -88,7 +88,7 @@ func TestClusterGetNodeByProviderID(t *testing.T) { } func TestClusterGetNodeByName(t *testing.T) { - cluster := model.NewCluster() + cluster := model.NewCluster(false) _, ok := cluster.GetNodeByName("mynode") if ok { @@ -105,7 +105,7 @@ func TestClusterGetNodeByName(t *testing.T) { } func TestClusterUpdateNode(t *testing.T) { - cluster := model.NewCluster() + cluster := model.NewCluster(false) n1 := testNode("mynode") n1.Status.Allocatable = v1.ResourceList{ @@ -135,7 +135,7 @@ func TestClusterUpdateNode(t *testing.T) { } func TestClusterAddPod(t *testing.T) { - cluster := model.NewCluster() + cluster := model.NewCluster(false) n := testNode("mynode") n.Spec.ProviderID = "mynode-id" @@ -175,7 +175,7 @@ func TestClusterAddPod(t *testing.T) { } func TestClusterDeleteNodeDeletesPods(t *testing.T) { - cluster := model.NewCluster() + cluster := model.NewCluster(false) // add a node and pod bound to that node n := testNode("mynode") diff --git a/pkg/model/node.go b/pkg/model/node.go index cc89734..0508c9f 100644 --- a/pkg/model/node.go +++ b/pkg/model/node.go @@ -16,6 +16,7 @@ package model import ( "fmt" + "k8s.io/apimachinery/pkg/api/resource" "regexp" "sync" "time" @@ -184,6 +185,55 @@ func (n *Node) Used() v1.ResourceList { return used } +func (n *Node) UsedNormalized(normalizedAllocation bool) v1.ResourceList { + used := n.Used() + if !normalizedAllocation { + return used + } + allocatable := n.Allocatable() + pctCpu := n.UsedPct(v1.ResourceCPU, false) + pctMem := n.UsedPct(v1.ResourceMemory, false) + if pctCpu > pctMem { + allocatableRes := allocatable[v1.ResourceMemory] + newMem := allocatableRes.AsApproximateFloat64() * pctCpu + used[v1.ResourceMemory] = resource.NewMilliQuantity(int64(newMem*1000), resource.DecimalSI).DeepCopy() + } else if pctMem > pctCpu { + allocatableRes := allocatable[v1.ResourceCPU] + newCpu := allocatableRes.AsApproximateFloat64() * pctMem + used[v1.ResourceCPU] = resource.NewMilliQuantity(int64(newCpu*1000), resource.DecimalSI).DeepCopy() + } + return used +} + +func (n *Node) UsedPct(res v1.ResourceName, normalizedAllocation bool) float64 { + used := n.Used() + allocatable := n.Allocatable() + + usedRes := used[res] + allocatableRes := allocatable[res] + pct := usedRes.AsApproximateFloat64() / allocatableRes.AsApproximateFloat64() + if allocatableRes.AsApproximateFloat64() == 0 { + pct = 0 + } else if normalizedAllocation { + var resRev v1.ResourceName + switch res { + case v1.ResourceCPU: + resRev = v1.ResourceMemory + case v1.ResourceMemory: + resRev = v1.ResourceCPU + } + if resRev != "" { + pctRev := n.UsedPct(resRev, false) + if pctRev > pct { + newUsedRes := allocatableRes.AsApproximateFloat64() * pctRev + pct = newUsedRes / allocatableRes.AsApproximateFloat64() + } + } + } + + return pct +} + func (n *Node) Cordoned() bool { n.mu.RLock() defer n.mu.RUnlock() diff --git a/pkg/model/node_test.go b/pkg/model/node_test.go index f4a05f7..43a3929 100644 --- a/pkg/model/node_test.go +++ b/pkg/model/node_test.go @@ -14,6 +14,8 @@ limitations under the License. package model_test import ( + "k8s.io/apimachinery/pkg/api/resource" + "reflect" "testing" "time" @@ -188,3 +190,124 @@ func TestNodeNotReadyNoCondition(t *testing.T) { }) } } + +func TestNode_UsedPct(t *testing.T) { + type args struct { + res v1.ResourceName + normalizedAllocation bool + } + tests := []struct { + name string + args args + want float64 + }{ + { + name: "cpu used", + args: args{ + res: v1.ResourceCPU, + }, + want: 0.25, + }, + { + name: "memory used", + args: args{ + res: v1.ResourceMemory, + }, + want: 0.50, + }, + { + name: "cpu used normalized", + args: args{ + res: v1.ResourceCPU, + normalizedAllocation: true, + }, + want: 0.50, + }, + { + name: "memory used normalized", + args: args{ + res: v1.ResourceMemory, + normalizedAllocation: true, + }, + want: 0.50, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := testNode("mynode") + n.Status.Allocatable = v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("8"), + v1.ResourceMemory: resource.MustParse("4Gi"), + } + node := model.NewNode(n) + + p := testPod("default", "mypod") + p.Spec.NodeName = n.Name + pod := model.NewPod(p) + node.BindPod(pod) + + if got := node.UsedPct(tt.args.res, tt.args.normalizedAllocation); got != tt.want { + t.Errorf("UsedPct() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestNode_UsedNormalized(t *testing.T) { + type args struct { + normalizedAllocation bool + } + tests := []struct { + name string + args args + want v1.ResourceList + }{ + { + name: "not normalized", + args: args{}, + want: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourcePods: resource.MustParse("1"), + }, + }, + { + name: "normalized", + args: args{ + normalizedAllocation: true, + }, + want: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("4000m"), + v1.ResourceMemory: resource.MustParse("2Gi"), + v1.ResourcePods: resource.MustParse("1"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + n := testNode("mynode") + n.Status.Allocatable = v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("8"), + v1.ResourceMemory: resource.MustParse("4Gi"), + } + node := model.NewNode(n) + + p := testPod("default", "mypod") + p.Spec.NodeName = n.Name + pod := model.NewPod(p) + node.BindPod(pod) + + // remove the string notation from the resource so + // reflect.DeepEqual can work + want := v1.ResourceList{} + for k, v := range tt.want { + v.Add(resource.MustParse("0")) + want[k] = v + } + + if got := node.UsedNormalized(tt.args.normalizedAllocation); !reflect.DeepEqual(got, want) { + t.Errorf("UsedNormalized() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/pkg/model/uimodel.go b/pkg/model/uimodel.go index 56044be..10e18a8 100644 --- a/pkg/model/uimodel.go +++ b/pkg/model/uimodel.go @@ -44,29 +44,31 @@ var ( ) type UIModel struct { - progress progress.Model - cluster *Cluster - extraLabels []string - paginator paginator.Model - height int - nodeSorter func(lhs, rhs *Node) bool - style *Style - DisablePricing bool + progress progress.Model + cluster *Cluster + extraLabels []string + paginator paginator.Model + height int + nodeSorter func(lhs, rhs *Node) bool + style *Style + DisablePricing bool + normalizedAllocation bool } -func NewUIModel(extraLabels []string, nodeSort string, style *Style) *UIModel { +func NewUIModel(extraLabels []string, nodeSort string, style *Style, normalizedAllocation bool) *UIModel { pager := paginator.New() pager.Type = paginator.Dots pager.ActiveDot = activeDot pager.InactiveDot = inactiveDot return &UIModel{ // red to green - progress: progress.New(style.gradient), - cluster: NewCluster(), - extraLabels: extraLabels, - paginator: pager, - nodeSorter: makeNodeSorter(nodeSort), - style: style, + progress: progress.New(style.gradient), + cluster: NewCluster(normalizedAllocation), + extraLabels: extraLabels, + paginator: pager, + nodeSorter: makeNodeSorter(nodeSort), + style: style, + normalizedAllocation: normalizedAllocation, } } @@ -126,8 +128,6 @@ func (u *UIModel) View() string { } func (u *UIModel) writeNodeInfo(n *Node, w io.Writer, resources []v1.ResourceName) { - allocatable := n.Allocatable() - used := n.Used() firstLine := true resNameLen := 0 for _, res := range resources { @@ -136,11 +136,11 @@ func (u *UIModel) writeNodeInfo(n *Node, w io.Writer, resources []v1.ResourceNam } } for _, res := range resources { - usedRes := used[res] - allocatableRes := allocatable[res] - pct := usedRes.AsApproximateFloat64() / allocatableRes.AsApproximateFloat64() - if allocatableRes.AsApproximateFloat64() == 0 { - pct = 0 + pct := n.UsedPct(res, u.normalizedAllocation) + + resStr := res + if u.normalizedAllocation { + resStr = res + " (normalized)" } if firstLine { @@ -148,7 +148,7 @@ func (u *UIModel) writeNodeInfo(n *Node, w io.Writer, resources []v1.ResourceNam if !n.HasPrice() || u.DisablePricing { priceLabel = "" } - fmt.Fprintf(w, "%s\t%s\t%s\t(%d pods)\t%s%s", n.Name(), res, u.progress.ViewAs(pct), n.NumPods(), n.InstanceType(), priceLabel) + fmt.Fprintf(w, "%s\t%s\t%s\t(%d pods)\t%s%s", n.Name(), resStr, u.progress.ViewAs(pct), n.NumPods(), n.InstanceType(), priceLabel) // node compute type if n.IsOnDemand() { @@ -193,7 +193,7 @@ func (u *UIModel) writeNodeInfo(n *Node, w io.Writer, resources []v1.ResourceNam } } else { - fmt.Fprintf(w, " \t%s\t%s\t\t\t\t\t", res, u.progress.ViewAs(pct)) + fmt.Fprintf(w, " \t%s\t%s\t\t\t\t\t", resStr, u.progress.ViewAs(pct)) for range u.extraLabels { fmt.Fprintf(w, "\t") } @@ -221,6 +221,10 @@ func (u *UIModel) writeClusterSummary(resources []v1.ResourceName, stats Stats, } else { pctUsedStr = u.style.red(pctUsedStr) } + resStr := res + if u.normalizedAllocation { + resStr = res + " (normalized)" + } u.progress.ShowPercentage = false monthlyPrice := stats.TotalPrice * (365 * 24) / 12 // average hours per month @@ -232,10 +236,10 @@ func (u *UIModel) writeClusterSummary(resources []v1.ResourceName, stats Stats, } if firstLine { enPrinter.Fprintf(w, "%d nodes\t(%10s/%s)\t%s\t%s\t%s\t%s\n", - stats.NumNodes, used.String(), allocatable.String(), pctUsedStr, res, u.progress.ViewAs(pctUsed/100.0), clusterPrice) + stats.NumNodes, used.String(), allocatable.String(), pctUsedStr, resStr, u.progress.ViewAs(pctUsed/100.0), clusterPrice) } else { enPrinter.Fprintf(w, " \t%s/%s\t%s\t%s\t%s\t\n", - used.String(), allocatable.String(), pctUsedStr, res, u.progress.ViewAs(pctUsed/100.0)) + used.String(), allocatable.String(), pctUsedStr, resStr, u.progress.ViewAs(pctUsed/100.0)) } firstLine = false }