Skip to content

Commit 457af26

Browse files
committed
Remove memory statistics
1 parent acf4b23 commit 457af26

File tree

3 files changed

+1
-48
lines changed

3 files changed

+1
-48
lines changed

core/memallocator.go

Lines changed: 0 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@ package core
22

33
import (
44
"errors"
5-
"sync/atomic"
65
)
76

87
// Define a memory allocator
@@ -11,20 +10,6 @@ type MemAllocator interface {
1110
Inner(buf []byte) []byte
1211
Protect(buf []byte, readonly bool) error
1312
Free(buf []byte) error
14-
Stats() *MemStats
15-
}
16-
17-
// AllocatorStatistics statistics about memory allocations and errors
18-
type MemStats struct {
19-
PageAllocs atomic.Uint64
20-
PageAllocErrors atomic.Uint64
21-
PageFrees atomic.Uint64
22-
PageFreeErrors atomic.Uint64
23-
ObjectAllocs atomic.Uint64
24-
ObjectAllocErrors atomic.Uint64
25-
ObjectFrees atomic.Uint64
26-
ObjectFreeErrors atomic.Uint64
27-
Slabs atomic.Uint64
2813
}
2914

3015
var (

core/memallocator_page.go

Lines changed: 1 addition & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -8,27 +8,23 @@ import (
88
)
99

1010
type pageAllocator struct {
11-
stats *MemStats
1211
objects map[int]*pageObject
1312
sync.Mutex
1413
}
1514

1615
func NewPageAllocator() MemAllocator {
1716
a := &pageAllocator{
1817
objects: make(map[int]*pageObject),
19-
stats: &MemStats{},
2018
}
2119
return a
2220
}
2321

2422
func (a *pageAllocator) Alloc(size int) ([]byte, error) {
25-
a.stats.ObjectAllocs.Add(1)
2623
if size < 1 {
2724
return nil, ErrNullAlloc
2825
}
2926
o, err := a.newPageObject(size)
3027
if err != nil {
31-
a.stats.ObjectAllocErrors.Add(1)
3228
return nil, err
3329
}
3430

@@ -79,12 +75,9 @@ func (a *pageAllocator) Inner(buf []byte) []byte {
7975
}
8076

8177
func (a *pageAllocator) Free(buf []byte) error {
82-
a.stats.ObjectFrees.Add(1)
83-
8478
// Determine the address of the buffer we should free
8579
o, found := a.pop(buf)
8680
if !found {
87-
a.stats.ObjectFreeErrors.Add(1)
8881
return ErrBufferNotOwnedByAllocator
8982
}
9083

@@ -94,19 +87,13 @@ func (a *pageAllocator) Free(buf []byte) error {
9487
}
9588

9689
// Free the related memory
97-
a.stats.PageFrees.Add(uint64(len(o.memory) / pageSize))
9890
if err := memcall.Free(o.memory); err != nil {
99-
a.stats.PageFreeErrors.Add(1)
10091
return err
10192
}
10293

10394
return nil
10495
}
10596

106-
func (a *pageAllocator) Stats() *MemStats {
107-
return a.stats
108-
}
109-
11097
// *** INTERNAL FUNCTIONS *** //
11198
func (a *pageAllocator) lookup(buf []byte) (*pageObject, bool) {
11299
if len(buf) == 0 {
@@ -158,10 +145,8 @@ func (a *pageAllocator) newPageObject(size int) (*pageObject, error) {
158145
innerLen := roundToPageSize(size)
159146

160147
// Allocate the total needed memory
161-
a.stats.PageAllocs.Add(uint64(2 + innerLen/pageSize))
162-
memory, err := memcall.Alloc((2 * pageSize) + innerLen)
148+
memory, err := memcall.Alloc(2*pageSize + innerLen)
163149
if err != nil {
164-
a.stats.PageAllocErrors.Add(1)
165150
return nil, err
166151
}
167152

core/memallocator_slab.go

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@ func WithMinCanarySize(size int) SlabOption {
3838
// Memory allocator implementation
3939
type slabAllocator struct {
4040
maxSlabSize int
41-
stats *MemStats
4241
cfg *SlabAllocatorConfig
4342
allocator *pageAllocator
4443
slabs []*slab
@@ -61,18 +60,15 @@ func NewSlabAllocator(options ...SlabOption) MemAllocator {
6160
// Setup the allocator and initialize the slabs
6261
a := &slabAllocator{
6362
maxSlabSize: cfg.Sizes[len(cfg.Sizes)-1],
64-
stats: &MemStats{},
6563
cfg: cfg,
6664
slabs: make([]*slab, 0, len(cfg.Sizes)),
6765
allocator: &pageAllocator{
6866
objects: make(map[int]*pageObject),
69-
stats: &MemStats{},
7067
},
7168
}
7269
for _, size := range cfg.Sizes {
7370
s := &slab{
7471
objSize: size,
75-
stats: a.stats,
7672
allocator: a.allocator,
7773
}
7874
a.slabs = append(a.slabs, s)
@@ -177,10 +173,6 @@ func (a *slabAllocator) Free(buf []byte) error {
177173
return s.free(buf)
178174
}
179175

180-
func (a *slabAllocator) Stats() *MemStats {
181-
return a.stats
182-
}
183-
184176
// *** INTERNAL FUNCTIONS *** //
185177

186178
// Page implementation
@@ -232,7 +224,6 @@ func newPage(page []byte, size int) *slabPage {
232224
// Slab is a container for all Pages serving the same size
233225
type slab struct {
234226
objSize int
235-
stats *MemStats
236227
allocator *pageAllocator
237228
pages []*slabPage
238229
sync.Mutex
@@ -255,10 +246,8 @@ func (s *slab) alloc(size int) ([]byte, error) {
255246
// Use the page allocator to get a new guarded memory page
256247
page, err := s.allocator.Alloc(pageSize - s.objSize)
257248
if err != nil {
258-
s.stats.PageAllocErrors.Add(1)
259249
return nil, err
260250
}
261-
s.stats.PageAllocs.Store(s.allocator.stats.PageAllocs.Load())
262251
c = newPage(page, s.objSize)
263252
s.pages = append(s.pages, c)
264253
}
@@ -268,7 +257,6 @@ func (s *slab) alloc(size int) ([]byte, error) {
268257
c.head = c.head.next
269258
c.used++
270259

271-
s.stats.ObjectAllocs.Add(1)
272260
data := getBufferPart(c.buffer, obj.offset, size)
273261
canary := getBufferPart(c.buffer, obj.offset+size, s.objSize-size)
274262

@@ -309,11 +297,8 @@ func (s *slab) free(buf []byte) error {
309297
return ErrBufferNotOwnedByAllocator
310298
}
311299

312-
s.stats.ObjectFrees.Add(1)
313-
314300
// Wipe the buffer including the canary check
315301
if err := s.wipe(c, offset, len(buf)); err != nil {
316-
s.stats.ObjectFreeErrors.Add(1)
317302
return err
318303
}
319304
obj := &slabObject{
@@ -327,9 +312,7 @@ func (s *slab) free(buf []byte) error {
327312
// free the underlying memory
328313
if c.used == 0 {
329314
err := s.allocator.Free(c.buffer)
330-
s.stats.PageFrees.Store(s.allocator.stats.PageFrees.Load())
331315
if err != nil {
332-
s.stats.PageFreeErrors.Add(1)
333316
return err
334317
}
335318

0 commit comments

Comments
 (0)