Skip to content

Commit 147dfe3

Browse files
refactor: rename CGroup to cgroup (#23)
Rename all occurences but one to `Cgroup` instead of `CGroup`. `WithISCGroupV2` is an exported function (although it appears I must've removed usage of it in a recent commit), so we cannot rename/remove this without bumping the major version.
1 parent 50cfe85 commit 147dfe3

File tree

7 files changed

+32
-32
lines changed

7 files changed

+32
-32
lines changed

cgroup.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ type cgroupStatter interface {
3535
memory(p Prefix) (*Result, error)
3636
}
3737

38-
func (s *Statter) getCGroupStatter() (cgroupStatter, error) {
38+
func (s *Statter) getCgroupStatter() (cgroupStatter, error) {
3939
isContainerized, err := s.IsContainerized()
4040
if err != nil {
4141
return nil, err
@@ -44,7 +44,7 @@ func (s *Statter) getCGroupStatter() (cgroupStatter, error) {
4444
return nil, errNotContainerized
4545
}
4646

47-
if s.isCGroupV2() {
47+
if s.isCgroupV2() {
4848
cgroupPath, err := currentProcCgroup(s.fs)
4949
if err != nil {
5050
return nil, xerrors.Errorf("get current cgroup: %w", err)
@@ -105,7 +105,7 @@ func (s *Statter) ContainerCPU() (*Result, error) {
105105
return r, nil
106106
}
107107

108-
func (s *Statter) isCGroupV2() bool {
108+
func (s *Statter) isCgroupV2() bool {
109109
return s.cgroupV2Detector(s.fs)
110110
}
111111

cgroup_linux.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ import (
44
"syscall"
55
)
66

7-
func isCGroupV2(path string) bool {
7+
func isCgroupV2(path string) bool {
88
var stat syscall.Statfs_t
99
if err := syscall.Statfs(path, &stat); err != nil {
1010
return false

cgroup_other.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,6 @@
22

33
package clistat
44

5-
func isCGroupV2(path string) bool {
5+
func isCgroupV2(_ string) bool {
66
return false
77
}

cgroupv1.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ import (
1111
"tailscale.com/types/ptr"
1212
)
1313

14-
// Paths for CGroupV1.
14+
// Paths for CgroupV1.
1515
// Ref: https://www.kernel.org/doc/Documentation/cgroup-v1/cpuacct.txt
1616
const (
1717
// CPU usage of all tasks in cgroup in nanoseconds.

cgroupv2.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ import (
1111
"tailscale.com/types/ptr"
1212
)
1313

14-
// Paths for CGroupV2.
14+
// Paths for CgroupV2.
1515
// Ref: https://docs.kernel.org/admin-guide/cgroup-v2.html
1616
const (
1717
// Contains quota and period in microseconds separated by a space.

stat.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ func New(opts ...Option) (*Statter, error) {
193193
<-time.After(d)
194194
},
195195
cgroupV2Detector: func(_ afero.Fs) bool {
196-
return isCGroupV2(cgroupRootPath)
196+
return isCgroupV2(cgroupRootPath)
197197
},
198198
}
199199
for _, opt := range opts {
@@ -202,7 +202,7 @@ func New(opts ...Option) (*Statter, error) {
202202

203203
s.nproc = s.numCPU()
204204

205-
statter, err := s.getCGroupStatter()
205+
statter, err := s.getCgroupStatter()
206206
if err != nil && !errors.Is(err, errNotContainerized) {
207207
return nil, xerrors.Errorf("get cgroup statter: %v", err)
208208
}

stat_internal_test.go

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ func TestStatter(t *testing.T) {
132132
}
133133
}
134134

135-
withIsCGroupV2 := func(state bool) Option {
135+
withIsCgroupV2 := func(state bool) Option {
136136
return func(s *Statter) {
137137
s.cgroupV2Detector = func(_ afero.Fs) bool {
138138
return state
@@ -143,7 +143,7 @@ func TestStatter(t *testing.T) {
143143
// For container-specific measurements, everything we need
144144
// can be read from the filesystem. We control the FS, so
145145
// we control the data.
146-
t.Run("CGroupV1", func(t *testing.T) {
146+
t.Run("CgroupV1", func(t *testing.T) {
147147
t.Parallel()
148148

149149
t.Run("ContainerCPU/Limit", func(t *testing.T) {
@@ -154,7 +154,7 @@ func TestStatter(t *testing.T) {
154154
// Fake 1 second in ns of usage
155155
mungeFS(t, fs, cgroupV1CPUAcctUsage, "100000000")
156156
}
157-
s, err := New(WithFS(fs), withWait(fakeWait), withIsCGroupV2(false))
157+
s, err := New(WithFS(fs), withWait(fakeWait), withIsCgroupV2(false))
158158
require.NoError(t, err)
159159

160160
cpu, err := s.ContainerCPU()
@@ -175,7 +175,7 @@ func TestStatter(t *testing.T) {
175175
// Fake 1 second in ns of usage
176176
mungeFS(t, fs, cgroupV1CPUAcctUsage, "100000000")
177177
}
178-
s, err := New(WithFS(fs), withNproc(2), withWait(fakeWait), withIsCGroupV2(false))
178+
s, err := New(WithFS(fs), withNproc(2), withWait(fakeWait), withIsCgroupV2(false))
179179
require.NoError(t, err)
180180

181181
cpu, err := s.ContainerCPU()
@@ -195,7 +195,7 @@ func TestStatter(t *testing.T) {
195195
// Fake 1 second in ns of usage
196196
mungeFS(t, fs, "/sys/fs/cgroup/cpuacct/cpuacct.usage", "100000000")
197197
}
198-
s, err := New(WithFS(fs), withNproc(2), withWait(fakeWait), withIsCGroupV2(false))
198+
s, err := New(WithFS(fs), withNproc(2), withWait(fakeWait), withIsCgroupV2(false))
199199
require.NoError(t, err)
200200

201201
cpu, err := s.ContainerCPU()
@@ -212,7 +212,7 @@ func TestStatter(t *testing.T) {
212212
t.Parallel()
213213

214214
fs := initFS(t, fsContainerCgroupV1)
215-
s, err := New(WithFS(fs), withNoWait, withIsCGroupV2(false))
215+
s, err := New(WithFS(fs), withNoWait, withIsCgroupV2(false))
216216
require.NoError(t, err)
217217

218218
mem, err := s.ContainerMemory(PrefixDefault)
@@ -229,7 +229,7 @@ func TestStatter(t *testing.T) {
229229
t.Parallel()
230230

231231
fs := initFS(t, fsContainerCgroupV1NoLimit)
232-
s, err := New(WithFS(fs), withNoWait, withIsCGroupV2(false))
232+
s, err := New(WithFS(fs), withNoWait, withIsCgroupV2(false))
233233
require.NoError(t, err)
234234

235235
mem, err := s.ContainerMemory(PrefixDefault)
@@ -245,7 +245,7 @@ func TestStatter(t *testing.T) {
245245
t.Parallel()
246246

247247
fs := initFS(t, fsContainerCgroupV1DockerNoMemoryLimit)
248-
s, err := New(WithFS(fs), withNoWait, withIsCGroupV2(false))
248+
s, err := New(WithFS(fs), withNoWait, withIsCgroupV2(false))
249249
require.NoError(t, err)
250250

251251
mem, err := s.ContainerMemory(PrefixDefault)
@@ -258,7 +258,7 @@ func TestStatter(t *testing.T) {
258258
})
259259
})
260260

261-
t.Run("CGroupV2", func(t *testing.T) {
261+
t.Run("CgroupV2", func(t *testing.T) {
262262
t.Parallel()
263263

264264
t.Run("ContainerCPU/Limit", func(t *testing.T) {
@@ -268,7 +268,7 @@ func TestStatter(t *testing.T) {
268268
fakeWait := func(time.Duration) {
269269
mungeFS(t, fs, filepath.Join(cgroupRootPath, cgroupV2Path, cgroupV2CPUStat), "usage_usec 100000")
270270
}
271-
s, err := New(WithFS(fs), withWait(fakeWait), withIsCGroupV2(true))
271+
s, err := New(WithFS(fs), withWait(fakeWait), withIsCgroupV2(true))
272272

273273
require.NoError(t, err)
274274
cpu, err := s.ContainerCPU()
@@ -288,7 +288,7 @@ func TestStatter(t *testing.T) {
288288
fakeWait := func(time.Duration) {
289289
mungeFS(t, fs, filepath.Join(cgroupRootPath, cgroupV2Path, cgroupV2CPUStat), "usage_usec 100000")
290290
}
291-
s, err := New(WithFS(fs), withNproc(2), withWait(fakeWait), withIsCGroupV2(true))
291+
s, err := New(WithFS(fs), withNproc(2), withWait(fakeWait), withIsCgroupV2(true))
292292
require.NoError(t, err)
293293

294294
cpu, err := s.ContainerCPU()
@@ -304,7 +304,7 @@ func TestStatter(t *testing.T) {
304304
t.Parallel()
305305

306306
fs := initFS(t, fsContainerCgroupV2)
307-
s, err := New(WithFS(fs), withNoWait, withIsCGroupV2(true))
307+
s, err := New(WithFS(fs), withNoWait, withIsCgroupV2(true))
308308
require.NoError(t, err)
309309

310310
mem, err := s.ContainerMemory(PrefixDefault)
@@ -321,7 +321,7 @@ func TestStatter(t *testing.T) {
321321
t.Parallel()
322322

323323
fs := initFS(t, fsContainerCgroupV2NoLimit)
324-
s, err := New(WithFS(fs), withNoWait, withIsCGroupV2(true))
324+
s, err := New(WithFS(fs), withNoWait, withIsCgroupV2(true))
325325
require.NoError(t, err)
326326

327327
mem, err := s.ContainerMemory(PrefixDefault)
@@ -343,7 +343,7 @@ func TestStatter(t *testing.T) {
343343
fakeWait := func(time.Duration) {
344344
mungeFS(t, fs, filepath.Join(cgroupRootPath, fsContainerCgroupV2KubernetesPath, cgroupV2CPUStat), "usage_usec 100000")
345345
}
346-
s, err := New(WithFS(fs), withWait(fakeWait), withIsCGroupV2(true))
346+
s, err := New(WithFS(fs), withWait(fakeWait), withIsCgroupV2(true))
347347
require.NoError(t, err)
348348

349349
cpu, err := s.ContainerCPU()
@@ -364,7 +364,7 @@ func TestStatter(t *testing.T) {
364364
fakeWait := func(time.Duration) {
365365
mungeFS(t, fs, filepath.Join(cgroupRootPath, fsContainerCgroupV2KubernetesPath, cgroupV2CPUStat), "usage_usec 100000")
366366
}
367-
s, err := New(WithFS(fs), withWait(fakeWait), withIsCGroupV2(true))
367+
s, err := New(WithFS(fs), withWait(fakeWait), withIsCgroupV2(true))
368368
require.NoError(t, err)
369369

370370
cpu, err := s.ContainerCPU()
@@ -384,7 +384,7 @@ func TestStatter(t *testing.T) {
384384
fakeWait := func(time.Duration) {
385385
mungeFS(t, fs, filepath.Join(cgroupRootPath, fsContainerCgroupV2KubernetesPath, cgroupV2CPUStat), "usage_usec 100000")
386386
}
387-
s, err := New(WithFS(fs), withWait(fakeWait), withIsCGroupV2(true))
387+
s, err := New(WithFS(fs), withWait(fakeWait), withIsCgroupV2(true))
388388
require.NoError(t, err)
389389

390390
cpu, err := s.ContainerCPU()
@@ -400,7 +400,7 @@ func TestStatter(t *testing.T) {
400400
t.Parallel()
401401

402402
fs := initFS(t, fsContainerCgroupV2KubernetesWithLimits)
403-
s, err := New(WithFS(fs), withNoWait, withIsCGroupV2(true))
403+
s, err := New(WithFS(fs), withNoWait, withIsCgroupV2(true))
404404
require.NoError(t, err)
405405

406406
mem, err := s.ContainerMemory(PrefixDefault)
@@ -419,7 +419,7 @@ func TestStatter(t *testing.T) {
419419
fs := initFS(t, fsContainerCgroupV2Kubernetes)
420420
mungeFS(t, fs, filepath.Join(cgroupRootPath, cgroupV2MemoryMaxBytes), "1073741824")
421421

422-
s, err := New(WithFS(fs), withNoWait, withIsCGroupV2(true))
422+
s, err := New(WithFS(fs), withNoWait, withIsCgroupV2(true))
423423
require.NoError(t, err)
424424

425425
mem, err := s.ContainerMemory(PrefixDefault)
@@ -436,7 +436,7 @@ func TestStatter(t *testing.T) {
436436
t.Parallel()
437437

438438
fs := initFS(t, fsContainerCgroupV2Kubernetes)
439-
s, err := New(WithFS(fs), withNoWait, withIsCGroupV2(true))
439+
s, err := New(WithFS(fs), withNoWait, withIsCgroupV2(true))
440440
require.NoError(t, err)
441441

442442
mem, err := s.ContainerMemory(PrefixDefault)
@@ -451,10 +451,10 @@ func TestStatter(t *testing.T) {
451451
})
452452
}
453453

454-
func TestCGroupV2Detection(t *testing.T) {
454+
func TestCgroupV2Detection(t *testing.T) {
455455
t.Parallel()
456456

457-
hostISCGroupV2 := os.Getenv("CLISTAT_IS_CGROUPV2") == "yes"
457+
hostIsCgroupV2 := os.Getenv("CLISTAT_IS_CGROUPV2") == "yes"
458458

459459
tests := []struct {
460460
name string
@@ -477,8 +477,8 @@ func TestCGroupV2Detection(t *testing.T) {
477477
s, err := New(WithFS(tt.fs))
478478
require.NoError(t, err)
479479

480-
isCGroupV2 := s.cgroupV2Detector(s.fs)
481-
assert.Equal(t, hostISCGroupV2, isCGroupV2)
480+
isCgroupV2 := s.cgroupV2Detector(s.fs)
481+
assert.Equal(t, hostIsCgroupV2, isCgroupV2)
482482
})
483483
}
484484
}

0 commit comments

Comments
 (0)