From f3e410f4f56d8df9ac169c57d93f7062287eb5bc Mon Sep 17 00:00:00 2001 From: Ingo Oppermann Date: Thu, 11 May 2023 21:58:55 +0200 Subject: [PATCH] Change resources to absolute values --- cluster/leader.go | 42 +++------- cluster/leader_test.go | 164 ++++------------------------------------ cluster/proxy/node.go | 45 +++++------ cluster/proxy/proxy.go | 4 +- http/api/cluster.go | 4 +- restream/app/process.go | 2 +- 6 files changed, 50 insertions(+), 211 deletions(-) diff --git a/cluster/leader.go b/cluster/leader.go index 2696b994..4d746540 100644 --- a/cluster/leader.go +++ b/cluster/leader.go @@ -480,26 +480,21 @@ func (c *cluster) doRebalance() { } // normalizeProcessesAndResources normalizes the CPU and memory consumption of the processes and resources in-place. +// +// Deprecated: all values are absolute or already normed to 0-100*ncpu percent func normalizeProcessesAndResources(processes []proxy.ProcessConfig, resources map[string]proxy.NodeResources) { maxNCPU := .0 - maxMemTotal := .0 for _, r := range resources { if r.NCPU > maxNCPU { maxNCPU = r.NCPU } - if r.MemTotal > maxMemTotal { - maxMemTotal = r.MemTotal - } } for id, r := range resources { factor := maxNCPU / r.NCPU r.CPU = 100 - (100-r.CPU)/factor - factor = maxMemTotal / r.MemTotal - r.Mem = 100 - (100-r.Mem)/factor - resources[id] = r } @@ -513,15 +508,11 @@ func normalizeProcessesAndResources(processes []proxy.ProcessConfig, resources m factor := maxNCPU / r.NCPU p.CPU = 100 - (100-p.CPU)/factor - factor = maxMemTotal / r.MemTotal - p.Mem = 100 - (100-p.Mem)/factor - processes[i] = p } for id, r := range resources { r.NCPU = maxNCPU - r.MemTotal = maxMemTotal resources[id] = r } @@ -530,8 +521,6 @@ func normalizeProcessesAndResources(processes []proxy.ProcessConfig, resources m // synchronize returns a list of operations in order to adjust the "have" list to the "want" list // with taking the available resources on each node into account. func synchronize(want []app.Config, have []proxy.ProcessConfig, resources map[string]proxy.NodeResources) []interface{} { - normalizeProcessesAndResources(have, resources) - // A map from the process ID to the process config of the processes // we want to be running on the nodes. wantMap := map[string]*app.Config{} @@ -578,8 +567,6 @@ func synchronize(want []app.Config, have []proxy.ProcessConfig, resources map[st have = haveAfterRemove - createReferenceAffinityMap(have) - // A map from the process reference to the node it is running on haveReferenceAffinityMap := createReferenceAffinityMap(have) @@ -605,8 +592,8 @@ func synchronize(want []app.Config, have []proxy.ProcessConfig, resources map[st if len(config.Reference) != 0 { for _, count := range haveReferenceAffinityMap[config.Reference] { r := resources[count.nodeid] - cpu := config.LimitCPU / r.NCPU - mem := float64(config.LimitMemory) / r.MemTotal * 100 + cpu := config.LimitCPU * r.NCPU // TODO: in the vod branch this changed if system-wide limits are given + mem := config.LimitMemory if r.CPU+cpu < r.CPULimit && r.Mem+mem < r.MemLimit { nodeid = count.nodeid @@ -618,8 +605,8 @@ func synchronize(want []app.Config, have []proxy.ProcessConfig, resources map[st // Find the node with the most resources available if len(nodeid) == 0 { for id, r := range resources { - cpu := config.LimitCPU / r.NCPU - mem := float64(config.LimitMemory) / float64(r.MemTotal) * 100 + cpu := config.LimitCPU * r.NCPU // TODO: in the vod branch this changed if system-wide limits are given + mem := config.LimitMemory if len(nodeid) == 0 { if r.CPU+cpu < r.CPULimit && r.Mem+mem < r.MemLimit { @@ -629,18 +616,9 @@ func synchronize(want []app.Config, have []proxy.ProcessConfig, resources map[st continue } - if r.CPU+r.Mem < resources[nodeid].CPU+resources[nodeid].Mem { + if r.CPU < resources[nodeid].CPU && r.Mem <= resources[nodeid].Mem { nodeid = id } - /* - if r.CPU < resources[nodeid].CPU && r.Mem < resources[nodeid].Mem { - nodeid = id - } else if r.Mem < resources[nodeid].Mem { - nodeid = id - } else if r.CPU < resources[nodeid].CPU { - nodeid = id - } - */ } } @@ -653,8 +631,8 @@ func synchronize(want []app.Config, have []proxy.ProcessConfig, resources map[st // Adjust the resources r, ok := resources[nodeid] if ok { - r.CPU += config.LimitCPU / r.NCPU - r.Mem += float64(config.LimitMemory) / float64(r.MemTotal) * 100 + r.CPU += config.LimitCPU * r.NCPU // TODO: in the vod branch this changed if system-wide limits are given + r.Mem += config.LimitMemory resources[nodeid] = r } } else { @@ -719,8 +697,6 @@ func createReferenceAffinityMap(processes []proxy.ProcessConfig) map[string][]re // rebalance returns a list of operations that will move running processes away from nodes // that are overloaded. func rebalance(have []proxy.ProcessConfig, resources map[string]proxy.NodeResources) []interface{} { - normalizeProcessesAndResources(have, resources) - // Group the processes by node processNodeMap := map[string][]proxy.ProcessConfig{} diff --git a/cluster/leader_test.go b/cluster/leader_test.go index 7add6ae8..d3516a77 100644 --- a/cluster/leader_test.go +++ b/cluster/leader_test.go @@ -9,106 +9,12 @@ import ( "github.com/stretchr/testify/require" ) -func TestNormalize(t *testing.T) { - have := []proxy.ProcessConfig{ - { - NodeID: "node2", - Order: "start", - State: "running", - CPU: 12, - Mem: 5, - Runtime: 42, - Config: &app.Config{ - ID: "foobar", - }, - }, - } - - resources := map[string]proxy.NodeResources{ - "node1": { - NCPU: 2, - CPU: 7, - Mem: 35, - MemTotal: 2 * 1024 * 1024 * 1024, // 2GB - }, - "node2": { - NCPU: 1, - CPU: 75, - Mem: 11, - MemTotal: 4 * 1024 * 1024 * 1024, // 4GB - }, - } - - normalizeProcessesAndResources(have, resources) - - require.Equal(t, []proxy.ProcessConfig{ - { - NodeID: "node2", - Order: "start", - State: "running", - CPU: 56, - Mem: 5, - Runtime: 42, - Config: &app.Config{ - ID: "foobar", - }, - }, - }, have) - - require.Equal(t, map[string]proxy.NodeResources{ - "node1": { - NCPU: 2, - CPU: 7, - Mem: 67.5, - MemTotal: 4 * 1024 * 1024 * 1024, - }, - "node2": { - NCPU: 2, - CPU: 87.5, - Mem: 11, - MemTotal: 4 * 1024 * 1024 * 1024, - }, - }, resources) - - // test idempotency - normalizeProcessesAndResources(have, resources) - - require.Equal(t, []proxy.ProcessConfig{ - { - NodeID: "node2", - Order: "start", - State: "running", - CPU: 56, - Mem: 5, - Runtime: 42, - Config: &app.Config{ - ID: "foobar", - }, - }, - }, have) - - require.Equal(t, map[string]proxy.NodeResources{ - "node1": { - NCPU: 2, - CPU: 7, - Mem: 67.5, - MemTotal: 4 * 1024 * 1024 * 1024, - }, - "node2": { - NCPU: 2, - CPU: 87.5, - Mem: 11, - MemTotal: 4 * 1024 * 1024 * 1024, - }, - }, resources) -} - func TestSynchronizeAdd(t *testing.T) { want := []app.Config{ { ID: "foobar", LimitCPU: 10, - LimitMemory: 50 * 1024 * 1024, + LimitMemory: 50, }, } @@ -118,8 +24,7 @@ func TestSynchronizeAdd(t *testing.T) { "node1": { NCPU: 1, CPU: 7, - Mem: 65, - MemTotal: 4 * 1024 * 1024 * 1024, + Mem: 35, CPULimit: 90, MemLimit: 90, }, @@ -127,7 +32,6 @@ func TestSynchronizeAdd(t *testing.T) { NCPU: 1, CPU: 85, Mem: 11, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -141,7 +45,7 @@ func TestSynchronizeAdd(t *testing.T) { config: &app.Config{ ID: "foobar", LimitCPU: 10, - LimitMemory: 50 * 1024 * 1024, + LimitMemory: 50, }, }, }, stack) @@ -150,8 +54,7 @@ func TestSynchronizeAdd(t *testing.T) { "node1": { NCPU: 1, CPU: 17, - Mem: 65 + (50. / (4. * 1024) * 100), - MemTotal: 4 * 1024 * 1024 * 1024, + Mem: 85, CPULimit: 90, MemLimit: 90, }, @@ -159,7 +62,6 @@ func TestSynchronizeAdd(t *testing.T) { NCPU: 1, CPU: 85, Mem: 11, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -172,13 +74,13 @@ func TestSynchronizeAddReferenceAffinity(t *testing.T) { ID: "foobar", Reference: "barfoo", LimitCPU: 10, - LimitMemory: 50 * 1024 * 1024, + LimitMemory: 20, }, { ID: "foobar2", Reference: "barfoo", LimitCPU: 10, - LimitMemory: 50 * 1024 * 1024, + LimitMemory: 30, }, } @@ -202,7 +104,6 @@ func TestSynchronizeAddReferenceAffinity(t *testing.T) { NCPU: 1, CPU: 1, Mem: 1, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -210,7 +111,6 @@ func TestSynchronizeAddReferenceAffinity(t *testing.T) { NCPU: 1, CPU: 1, Mem: 1, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -225,7 +125,7 @@ func TestSynchronizeAddReferenceAffinity(t *testing.T) { ID: "foobar2", Reference: "barfoo", LimitCPU: 10, - LimitMemory: 50 * 1024 * 1024, + LimitMemory: 30, }, }, }, stack) @@ -236,7 +136,7 @@ func TestSynchronizeAddLimit(t *testing.T) { { ID: "foobar", LimitCPU: 10, - LimitMemory: 50 * 1024 * 1024, + LimitMemory: 5, }, } @@ -247,7 +147,6 @@ func TestSynchronizeAddLimit(t *testing.T) { NCPU: 1, CPU: 81, Mem: 72, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -255,7 +154,6 @@ func TestSynchronizeAddLimit(t *testing.T) { NCPU: 1, CPU: 79, Mem: 72, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -269,7 +167,7 @@ func TestSynchronizeAddLimit(t *testing.T) { config: &app.Config{ ID: "foobar", LimitCPU: 10, - LimitMemory: 50 * 1024 * 1024, + LimitMemory: 5, }, }, }, stack) @@ -279,15 +177,13 @@ func TestSynchronizeAddLimit(t *testing.T) { NCPU: 1, CPU: 81, Mem: 72, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, "node2": { NCPU: 1, CPU: 89, - Mem: 72 + (50. / (4. * 1024) * 100), - MemTotal: 4 * 1024 * 1024 * 1024, + Mem: 77, CPULimit: 90, MemLimit: 90, }, @@ -299,7 +195,7 @@ func TestSynchronizeAddNoResourcesCPU(t *testing.T) { { ID: "foobar", LimitCPU: 30, - LimitMemory: 50 * 1024 * 1024, + LimitMemory: 5, }, } @@ -310,7 +206,6 @@ func TestSynchronizeAddNoResourcesCPU(t *testing.T) { NCPU: 1, CPU: 81, Mem: 72, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -318,7 +213,6 @@ func TestSynchronizeAddNoResourcesCPU(t *testing.T) { NCPU: 1, CPU: 79, Mem: 72, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -339,7 +233,7 @@ func TestSynchronizeAddNoResourcesMemory(t *testing.T) { { ID: "foobar", LimitCPU: 1, - LimitMemory: 2 * 1024 * 1024 * 1024, + LimitMemory: 50, }, } @@ -350,7 +244,6 @@ func TestSynchronizeAddNoResourcesMemory(t *testing.T) { NCPU: 1, CPU: 81, Mem: 72, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -358,7 +251,6 @@ func TestSynchronizeAddNoResourcesMemory(t *testing.T) { NCPU: 1, CPU: 79, Mem: 72, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -388,7 +280,6 @@ func TestSynchronizeAddNoLimits(t *testing.T) { NCPU: 1, CPU: 81, Mem: 72, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -396,7 +287,6 @@ func TestSynchronizeAddNoLimits(t *testing.T) { NCPU: 1, CPU: 79, Mem: 72, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -434,7 +324,6 @@ func TestSynchronizeRemove(t *testing.T) { NCPU: 1, CPU: 7, Mem: 65, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -442,7 +331,6 @@ func TestSynchronizeRemove(t *testing.T) { NCPU: 1, CPU: 85, Mem: 11, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -462,7 +350,6 @@ func TestSynchronizeRemove(t *testing.T) { NCPU: 1, CPU: 7, Mem: 65, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -470,7 +357,6 @@ func TestSynchronizeRemove(t *testing.T) { NCPU: 1, CPU: 73, Mem: 6, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -482,7 +368,7 @@ func TestSynchronizeAddRemove(t *testing.T) { { ID: "foobar1", LimitCPU: 10, - LimitMemory: 50 * 1024 * 1024, + LimitMemory: 5, }, } @@ -505,7 +391,6 @@ func TestSynchronizeAddRemove(t *testing.T) { NCPU: 1, CPU: 7, Mem: 65, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -513,7 +398,6 @@ func TestSynchronizeAddRemove(t *testing.T) { NCPU: 1, CPU: 85, Mem: 11, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -531,7 +415,7 @@ func TestSynchronizeAddRemove(t *testing.T) { config: &app.Config{ ID: "foobar1", LimitCPU: 10, - LimitMemory: 50 * 1024 * 1024, + LimitMemory: 5, }, }, }, stack) @@ -540,8 +424,7 @@ func TestSynchronizeAddRemove(t *testing.T) { "node1": { NCPU: 1, CPU: 17, - Mem: 65 + (50. / (4. * 1024) * 100), - MemTotal: 4 * 1024 * 1024 * 1024, + Mem: 70, CPULimit: 90, MemLimit: 90, }, @@ -549,7 +432,6 @@ func TestSynchronizeAddRemove(t *testing.T) { NCPU: 1, CPU: 73, Mem: 6, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -587,7 +469,6 @@ func TestRebalanceNothingToDo(t *testing.T) { NCPU: 1, CPU: 42, Mem: 35, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -595,7 +476,6 @@ func TestRebalanceNothingToDo(t *testing.T) { NCPU: 1, CPU: 37, Mem: 11, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -648,7 +528,6 @@ func TestRebalanceOverload(t *testing.T) { NCPU: 1, CPU: 91, Mem: 35, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -656,7 +535,6 @@ func TestRebalanceOverload(t *testing.T) { NCPU: 1, CPU: 15, Mem: 11, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -681,7 +559,6 @@ func TestRebalanceOverload(t *testing.T) { NCPU: 1, CPU: 74, Mem: 4, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -689,7 +566,6 @@ func TestRebalanceOverload(t *testing.T) { NCPU: 1, CPU: 32, Mem: 42, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -738,7 +614,6 @@ func TestRebalanceSkip(t *testing.T) { NCPU: 1, CPU: 91, Mem: 35, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -746,7 +621,6 @@ func TestRebalanceSkip(t *testing.T) { NCPU: 1, CPU: 15, Mem: 92, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -779,7 +653,6 @@ func TestRebalanceSkip(t *testing.T) { NCPU: 1, CPU: 91, Mem: 35, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -787,7 +660,6 @@ func TestRebalanceSkip(t *testing.T) { NCPU: 1, CPU: 15, Mem: 92, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -862,7 +734,6 @@ func TestRebalanceReferenceAffinity(t *testing.T) { NCPU: 1, CPU: 90, Mem: 90, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -870,7 +741,6 @@ func TestRebalanceReferenceAffinity(t *testing.T) { NCPU: 1, CPU: 1, Mem: 1, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -878,7 +748,6 @@ func TestRebalanceReferenceAffinity(t *testing.T) { NCPU: 1, CPU: 1, Mem: 1, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -904,7 +773,6 @@ func TestRebalanceReferenceAffinity(t *testing.T) { NCPU: 1, CPU: 89, Mem: 89, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -912,7 +780,6 @@ func TestRebalanceReferenceAffinity(t *testing.T) { NCPU: 1, CPU: 1, Mem: 1, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, @@ -920,7 +787,6 @@ func TestRebalanceReferenceAffinity(t *testing.T) { NCPU: 1, CPU: 2, Mem: 2, - MemTotal: 4 * 1024 * 1024 * 1024, CPULimit: 90, MemLimit: 90, }, diff --git a/cluster/proxy/node.go b/cluster/proxy/node.go index b0993428..6fea2539 100644 --- a/cluster/proxy/node.go +++ b/cluster/proxy/node.go @@ -51,12 +51,11 @@ type NodeFiles struct { } type NodeResources struct { - NCPU float64 - CPU float64 - CPULimit float64 - Mem float64 - MemTotal float64 - MemLimit float64 + NCPU float64 // Number of CPU on this node + CPU float64 // Current CPU load, 0-100*ncpu + CPULimit float64 // Defined CPU load limit, 0-100*ncpu + Mem uint64 // Currently used memory in bytes + MemLimit uint64 // Defined memory limit in bytes } type NodeState struct { @@ -91,8 +90,8 @@ type node struct { resources struct { ncpu float64 cpu float64 - mem float64 - memTotal float64 + mem uint64 + memTotal uint64 } state nodeState @@ -262,15 +261,14 @@ func (n *node) Connect() error { n.stateLock.Lock() n.resources.cpu = 100 n.resources.ncpu = 1 - n.resources.mem = 100 - n.resources.memTotal = 1 + n.resources.mem = 0 n.stateLock.Unlock() } cpu_ncpu := .0 cpu_idle := .0 - mem_total := .0 - mem_free := .0 + mem_total := uint64(0) + mem_free := uint64(0) for _, x := range metrics.Metrics { if x.Name == "cpu_idle" { @@ -278,21 +276,21 @@ func (n *node) Connect() error { } else if x.Name == "cpu_ncpu" { cpu_ncpu = x.Values[0].Value } else if x.Name == "mem_total" { - mem_total = x.Values[0].Value + mem_total = uint64(x.Values[0].Value) } else if x.Name == "mem_free" { - mem_free = x.Values[0].Value + mem_free = uint64(x.Values[0].Value) } } n.stateLock.Lock() n.resources.ncpu = cpu_ncpu - n.resources.cpu = 100 - cpu_idle + n.resources.cpu = (100 - cpu_idle) * cpu_ncpu if mem_total != 0 { - n.resources.mem = (mem_total - mem_free) / mem_total * 100 + n.resources.mem = mem_total - mem_free n.resources.memTotal = mem_total } else { - n.resources.mem = 100 - n.resources.memTotal = 1 + n.resources.mem = 0 + n.resources.memTotal = 0 } n.lastContact = time.Now() n.stateLock.Unlock() @@ -414,10 +412,9 @@ func (n *node) State() NodeState { Resources: NodeResources{ NCPU: n.resources.ncpu, CPU: n.resources.cpu, - CPULimit: 90, + CPULimit: 90 * n.resources.ncpu, Mem: n.resources.mem, - MemTotal: n.resources.memTotal, - MemLimit: 90, + MemLimit: uint64(float64(n.resources.memTotal) * 0.9), }, } @@ -629,15 +626,15 @@ func (n *node) ProcessList() ([]ProcessConfig, error) { NodeID: n.ID(), Order: p.State.Order, State: p.State.State, - Mem: float64(p.State.Memory) / float64(n.resources.memTotal), + Mem: p.State.Memory, Runtime: time.Duration(p.State.Runtime) * time.Second, Config: p.Config.Marshal(), } if x, err := p.State.CPU.Float64(); err == nil { - process.CPU = x / n.resources.ncpu + process.CPU = x * n.resources.ncpu } else { - process.CPU = 100 + process.CPU = 100 * n.resources.ncpu } processes = append(processes, process) diff --git a/cluster/proxy/proxy.go b/cluster/proxy/proxy.go index 55f5f15e..52a41ea2 100644 --- a/cluster/proxy/proxy.go +++ b/cluster/proxy/proxy.go @@ -453,8 +453,8 @@ type ProcessConfig struct { NodeID string Order string State string - CPU float64 - Mem float64 + CPU float64 // Current CPU load of this process, 0-100*ncpu + Mem uint64 // Currently consumed memory of this process in bytes Runtime time.Duration Config *app.Config } diff --git a/http/api/cluster.go b/http/api/cluster.go index 513ce242..807394b5 100644 --- a/http/api/cluster.go +++ b/http/api/cluster.go @@ -12,8 +12,8 @@ type ClusterNode struct { LastContact int64 `json:"last_contact"` // unix timestamp Latency float64 `json:"latency_ms"` // milliseconds State string `json:"state"` - CPU float64 `json:"cpu_used"` // percent - Mem float64 `json:"mem_used"` // percent + CPU float64 `json:"cpu_used"` // percent 0-100*npcu + Mem uint64 `json:"mem_used" format:"uint64"` // bytes } type ClusterNodeFiles struct { diff --git a/restream/app/process.go b/restream/app/process.go index 5e301c39..ab78d0f5 100644 --- a/restream/app/process.go +++ b/restream/app/process.go @@ -44,7 +44,7 @@ type Config struct { ReconnectDelay uint64 `json:"reconnect_delay_seconds"` // seconds Autostart bool `json:"autostart"` StaleTimeout uint64 `json:"stale_timeout_seconds"` // seconds - LimitCPU float64 `json:"limit_cpu_usage"` // percent + LimitCPU float64 `json:"limit_cpu_usage"` // percent 0-100 LimitMemory uint64 `json:"limit_memory_bytes"` // bytes LimitWaitFor uint64 `json:"limit_waitfor_seconds"` // seconds }