Skip to content

Commit

Permalink
chore: enable thelper linter
Browse files Browse the repository at this point in the history
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
  • Loading branch information
mmorel-35 committed Jan 25, 2025
1 parent 8731c31 commit 693c68d
Show file tree
Hide file tree
Showing 21 changed files with 175 additions and 179 deletions.
4 changes: 2 additions & 2 deletions server/etcdserver/api/membership/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -652,8 +652,8 @@ func TestNodeToMember(t *testing.T) {
}
}

func newTestCluster(t testing.TB, membs []*Member) *RaftCluster {
c := &RaftCluster{lg: zaptest.NewLogger(t), members: make(map[types.ID]*Member), removed: make(map[types.ID]bool)}
func newTestCluster(tb testing.TB, membs []*Member) *RaftCluster {
c := &RaftCluster{lg: zaptest.NewLogger(tb), members: make(map[types.ID]*Member), removed: make(map[types.ID]bool)}
for _, m := range membs {
c.members[m.ID] = m
}
Expand Down
8 changes: 4 additions & 4 deletions server/etcdserver/server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1513,12 +1513,12 @@ func (n *nodeConfChangeCommitterRecorder) ApplyConfChange(conf raftpb.ConfChange
return &raftpb.ConfState{}
}

func newTestCluster(t testing.TB) *membership.RaftCluster {
return membership.NewCluster(zaptest.NewLogger(t))
func newTestCluster(tb testing.TB) *membership.RaftCluster {
return membership.NewCluster(zaptest.NewLogger(tb))
}

func newTestClusterWithBackend(t testing.TB, membs []*membership.Member, be backend.Backend) *membership.RaftCluster {
lg := zaptest.NewLogger(t)
func newTestClusterWithBackend(tb testing.TB, membs []*membership.Member, be backend.Backend) *membership.RaftCluster {
lg := zaptest.NewLogger(tb)
c := membership.NewCluster(lg)
c.SetBackend(schema.NewMembershipBackend(lg, be))
for _, m := range membs {
Expand Down
4 changes: 2 additions & 2 deletions server/lease/lessor_bench_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,9 +60,9 @@ func demote(le *lessor) {
}

// return new lessor and tearDown to release resource
func setUp(t testing.TB) (le *lessor, tearDown func()) {
func setUp(tb testing.TB) (le *lessor, tearDown func()) {
lg := zap.NewNop()
be, _ := betesting.NewDefaultTmpBackend(t)
be, _ := betesting.NewDefaultTmpBackend(tb)
// MinLeaseTTL is negative, so we can grant expired lease in benchmark.
// ExpiredLeasesRetryInterval should small, so benchmark of findExpired will recheck expired lease.
le = newLessor(lg, be, nil, LessorConfig{MinLeaseTTL: -1000, ExpiredLeasesRetryInterval: 10 * time.Microsecond})
Expand Down
6 changes: 3 additions & 3 deletions server/storage/backend/batch_tx_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -393,9 +393,9 @@ func checkUnsafeForEach(t *testing.T, tx backend.UnsafeReader, expectedKeys, exp

// runWriteback is used test the txWriteBuffer.writeback function, which is called inside tx.Unlock().
// The parameters are chosen based on defaultBatchLimit = 10000
func runWriteback(t testing.TB, kss, vss [][]string, isSeq bool) {
b, _ := betesting.NewTmpBackend(t, time.Hour, 10000)
defer betesting.Close(t, b)
func runWriteback(tb testing.TB, kss, vss [][]string, isSeq bool) {
b, _ := betesting.NewTmpBackend(tb, time.Hour, 10000)
defer betesting.Close(tb, b)

tx := b.BatchTx()

Expand Down
22 changes: 11 additions & 11 deletions server/storage/backend/hooks_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,11 +94,11 @@ func TestBackendAutoCommitBatchIntervalHook(t *testing.T) {
waitUntil(ctx, t, func() bool { return getCommitsKey(t, be) == ">ccc" })
}

func waitUntil(ctx context.Context, t testing.TB, f func() bool) {
func waitUntil(ctx context.Context, tb testing.TB, f func() bool) {
for !f() {
select {
case <-ctx.Done():
t.Fatalf("Context cancelled/timedout without condition met: %v", ctx.Err())
tb.Fatalf("Context cancelled/timedout without condition met: %v", ctx.Err())
default:
}
time.Sleep(10 * time.Millisecond)
Expand All @@ -112,28 +112,28 @@ func prepareBuckenAndKey(tx backend.BatchTx) {
tx.UnsafePut(bucket, key, []byte(">"))
}

func newTestHooksBackend(t testing.TB, baseConfig backend.BackendConfig) backend.Backend {
func newTestHooksBackend(tb testing.TB, baseConfig backend.BackendConfig) backend.Backend {
cfg := baseConfig
cfg.Hooks = backend.NewHooks(func(tx backend.UnsafeReadWriter) {
k, v := tx.UnsafeRange(bucket, key, nil, 1)
t.Logf("OnPreCommit executed: %v %v", string(k[0]), string(v[0]))
assert.Len(t, k, 1)
assert.Len(t, v, 1)
tb.Logf("OnPreCommit executed: %v %v", string(k[0]), string(v[0]))
assert.Len(tb, k, 1)
assert.Len(tb, v, 1)
tx.UnsafePut(bucket, key, append(v[0], byte('c')))
})

be, _ := betesting.NewTmpBackendFromCfg(t, cfg)
t.Cleanup(func() {
betesting.Close(t, be)
be, _ := betesting.NewTmpBackendFromCfg(tb, cfg)
tb.Cleanup(func() {
betesting.Close(tb, be)
})
return be
}

func getCommitsKey(t testing.TB, be backend.Backend) string {
func getCommitsKey(tb testing.TB, be backend.Backend) string {
rtx := be.BatchTx()
rtx.Lock()
defer rtx.Unlock()
_, v := rtx.UnsafeRange(bucket, key, nil, 1)
assert.Len(t, v, 1)
assert.Len(tb, v, 1)
return string(v[0])
}
20 changes: 10 additions & 10 deletions server/storage/backend/testing/betesting.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,28 +26,28 @@ import (
"go.etcd.io/etcd/server/v3/storage/backend"
)

func NewTmpBackendFromCfg(t testing.TB, bcfg backend.BackendConfig) (backend.Backend, string) {
dir, err := os.MkdirTemp(t.TempDir(), "etcd_backend_test")
func NewTmpBackendFromCfg(tb testing.TB, bcfg backend.BackendConfig) (backend.Backend, string) {
dir, err := os.MkdirTemp(tb.TempDir(), "etcd_backend_test")
if err != nil {
panic(err)
}
tmpPath := filepath.Join(dir, "database")
bcfg.Path = tmpPath
bcfg.Logger = zaptest.NewLogger(t)
bcfg.Logger = zaptest.NewLogger(tb)
return backend.New(bcfg), tmpPath
}

// NewTmpBackend creates a backend implementation for testing.
func NewTmpBackend(t testing.TB, batchInterval time.Duration, batchLimit int) (backend.Backend, string) {
bcfg := backend.DefaultBackendConfig(zaptest.NewLogger(t))
func NewTmpBackend(tb testing.TB, batchInterval time.Duration, batchLimit int) (backend.Backend, string) {
bcfg := backend.DefaultBackendConfig(zaptest.NewLogger(tb))
bcfg.BatchInterval, bcfg.BatchLimit = batchInterval, batchLimit
return NewTmpBackendFromCfg(t, bcfg)
return NewTmpBackendFromCfg(tb, bcfg)
}

func NewDefaultTmpBackend(t testing.TB) (backend.Backend, string) {
return NewTmpBackendFromCfg(t, backend.DefaultBackendConfig(zaptest.NewLogger(t)))
func NewDefaultTmpBackend(tb testing.TB) (backend.Backend, string) {
return NewTmpBackendFromCfg(tb, backend.DefaultBackendConfig(zaptest.NewLogger(tb)))
}

func Close(t testing.TB, b backend.Backend) {
assert.NoError(t, b.Close())
func Close(tb testing.TB, b backend.Backend) {
assert.NoError(tb, b.Close())
}
30 changes: 15 additions & 15 deletions server/storage/wal/testing/waltesting.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,32 +28,32 @@ import (
"go.etcd.io/raft/v3/raftpb"
)

func NewTmpWAL(t testing.TB, reqs []etcdserverpb.InternalRaftRequest) (*wal.WAL, string) {
t.Helper()
dir, err := os.MkdirTemp(t.TempDir(), "etcd_wal_test")
func NewTmpWAL(tb testing.TB, reqs []etcdserverpb.InternalRaftRequest) (*wal.WAL, string) {
tb.Helper()
dir, err := os.MkdirTemp(tb.TempDir(), "etcd_wal_test")
if err != nil {
panic(err)
}
tmpPath := filepath.Join(dir, "wal")
lg := zaptest.NewLogger(t)
lg := zaptest.NewLogger(tb)
w, err := wal.Create(lg, tmpPath, nil)
if err != nil {
t.Fatalf("Failed to create WAL: %v", err)
tb.Fatalf("Failed to create WAL: %v", err)

Check warning on line 41 in server/storage/wal/testing/waltesting.go

View check run for this annotation

Codecov / codecov/patch

server/storage/wal/testing/waltesting.go#L41

Added line #L41 was not covered by tests
}
err = w.Close()
if err != nil {
t.Fatalf("Failed to close WAL: %v", err)
tb.Fatalf("Failed to close WAL: %v", err)

Check warning on line 45 in server/storage/wal/testing/waltesting.go

View check run for this annotation

Codecov / codecov/patch

server/storage/wal/testing/waltesting.go#L45

Added line #L45 was not covered by tests
}
if len(reqs) != 0 {
w, err = wal.Open(lg, tmpPath, walpb.Snapshot{})
if err != nil {
t.Fatalf("Failed to open WAL: %v", err)
tb.Fatalf("Failed to open WAL: %v", err)

Check warning on line 50 in server/storage/wal/testing/waltesting.go

View check run for this annotation

Codecov / codecov/patch

server/storage/wal/testing/waltesting.go#L50

Added line #L50 was not covered by tests
}

var state raftpb.HardState
_, state, _, err = w.ReadAll()
if err != nil {
t.Fatalf("Failed to read WAL: %v", err)
tb.Fatalf("Failed to read WAL: %v", err)

Check warning on line 56 in server/storage/wal/testing/waltesting.go

View check run for this annotation

Codecov / codecov/patch

server/storage/wal/testing/waltesting.go#L56

Added line #L56 was not covered by tests
}
var entries []raftpb.Entry
for _, req := range reqs {
Expand All @@ -66,27 +66,27 @@ func NewTmpWAL(t testing.TB, reqs []etcdserverpb.InternalRaftRequest) (*wal.WAL,
}
err = w.Save(state, entries)
if err != nil {
t.Fatalf("Failed to save WAL: %v", err)
tb.Fatalf("Failed to save WAL: %v", err)

Check warning on line 69 in server/storage/wal/testing/waltesting.go

View check run for this annotation

Codecov / codecov/patch

server/storage/wal/testing/waltesting.go#L69

Added line #L69 was not covered by tests
}
err = w.Close()
if err != nil {
t.Fatalf("Failed to close WAL: %v", err)
tb.Fatalf("Failed to close WAL: %v", err)

Check warning on line 73 in server/storage/wal/testing/waltesting.go

View check run for this annotation

Codecov / codecov/patch

server/storage/wal/testing/waltesting.go#L73

Added line #L73 was not covered by tests
}
}

w, err = wal.OpenForRead(lg, tmpPath, walpb.Snapshot{})
if err != nil {
t.Fatalf("Failed to open WAL: %v", err)
tb.Fatalf("Failed to open WAL: %v", err)

Check warning on line 79 in server/storage/wal/testing/waltesting.go

View check run for this annotation

Codecov / codecov/patch

server/storage/wal/testing/waltesting.go#L79

Added line #L79 was not covered by tests
}
return w, tmpPath
}

func Reopen(t testing.TB, walPath string) *wal.WAL {
t.Helper()
lg := zaptest.NewLogger(t)
func Reopen(tb testing.TB, walPath string) *wal.WAL {
tb.Helper()
lg := zaptest.NewLogger(tb)
w, err := wal.OpenForRead(lg, walPath, walpb.Snapshot{})
if err != nil {
t.Fatalf("Failed to open WAL: %v", err)
tb.Fatalf("Failed to open WAL: %v", err)

Check warning on line 89 in server/storage/wal/testing/waltesting.go

View check run for this annotation

Codecov / codecov/patch

server/storage/wal/testing/waltesting.go#L89

Added line #L89 was not covered by tests
}
return w
}
4 changes: 2 additions & 2 deletions tests/e2e/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,13 +69,13 @@ func newClient(t *testing.T, entpoints []string, cfg e2e.ClientConfig) *clientv3
}

// tlsInfo follows the Client-to-server communication in https://etcd.io/docs/v3.6/op-guide/security/#basic-setup
func tlsInfo(t testing.TB, cfg e2e.ClientConfig) (*transport.TLSInfo, error) {
func tlsInfo(tb testing.TB, cfg e2e.ClientConfig) (*transport.TLSInfo, error) {
switch cfg.ConnectionType {
case e2e.ClientNonTLS, e2e.ClientTLSAndNonTLS:
return nil, nil
case e2e.ClientTLS:
if cfg.AutoTLS {
tls, err := transport.SelfCert(zap.NewNop(), t.TempDir(), []string{"localhost"}, 1)
tls, err := transport.SelfCert(zap.NewNop(), tb.TempDir(), []string{"localhost"}, 1)
if err != nil {
return nil, fmt.Errorf("failed to generate cert: %w", err)
}
Expand Down
56 changes: 28 additions & 28 deletions tests/e2e/v2store_deprecation_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,13 @@ import (
"go.etcd.io/etcd/tests/v3/framework/e2e"
)

func writeCustomV2Data(t testing.TB, epc *e2e.EtcdProcessCluster, count int) {
func writeCustomV2Data(tb testing.TB, epc *e2e.EtcdProcessCluster, count int) {
for i := 0; i < count; i++ {
if err := e2e.CURLPut(epc, e2e.CURLReq{
Endpoint: "/v2/keys/foo", Value: "bar" + fmt.Sprint(i),
Expected: expect.ExpectedResponse{Value: `{"action":"set","node":{"key":"/foo","value":"bar` + fmt.Sprint(i)},
}); err != nil {
t.Fatalf("failed put with curl (%v)", err)
tb.Fatalf("failed put with curl (%v)", err)
}
}
}
Expand Down Expand Up @@ -191,47 +191,47 @@ func TestV2DeprecationSnapshotRecover(t *testing.T) {
assert.NoError(t, epc.Close())
}

func runEtcdAndCreateSnapshot(t testing.TB, serverVersion e2e.ClusterVersion, dataDir string, snapshotCount uint64) *e2e.EtcdProcessCluster {
func runEtcdAndCreateSnapshot(tb testing.TB, serverVersion e2e.ClusterVersion, dataDir string, snapshotCount uint64) *e2e.EtcdProcessCluster {
cfg := e2e.ConfigStandalone(*e2e.NewConfig(
e2e.WithVersion(serverVersion),
e2e.WithDataDirPath(dataDir),
e2e.WithSnapshotCount(snapshotCount),
e2e.WithKeepDataDir(true),
))
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), t, e2e.WithConfig(cfg))
assert.NoError(t, err)
epc, err := e2e.NewEtcdProcessCluster(context.TODO(), tb, e2e.WithConfig(cfg))
assert.NoError(tb, err)
return epc
}

func addAndRemoveKeysAndMembers(ctx context.Context, t testing.TB, cc *e2e.EtcdctlV3, snapshotCount uint64) (members []uint64) {
func addAndRemoveKeysAndMembers(ctx context.Context, tb testing.TB, cc *e2e.EtcdctlV3, snapshotCount uint64) (members []uint64) {
// Execute some non-trivial key&member operation
var i uint64
for i = 0; i < snapshotCount*3; i++ {
err := cc.Put(ctx, fmt.Sprintf("%d", i), "1", config.PutOptions{})
require.NoError(t, err)
require.NoError(tb, err)
}
member1, err := cc.MemberAddAsLearner(ctx, "member1", []string{"http://127.0.0.1:2000"})
require.NoError(t, err)
require.NoError(tb, err)
members = append(members, member1.Member.ID)

for i = 0; i < snapshotCount*2; i++ {
_, err = cc.Delete(ctx, fmt.Sprintf("%d", i), config.DeleteOptions{})
require.NoError(t, err)
require.NoError(tb, err)
}
_, err = cc.MemberRemove(ctx, member1.Member.ID)
require.NoError(t, err)
require.NoError(tb, err)

for i = 0; i < snapshotCount; i++ {
err = cc.Put(ctx, fmt.Sprintf("%d", i), "2", config.PutOptions{})
require.NoError(t, err)
require.NoError(tb, err)
}
member2, err := cc.MemberAddAsLearner(ctx, "member2", []string{"http://127.0.0.1:2001"})
require.NoError(t, err)
require.NoError(tb, err)
members = append(members, member2.Member.ID)

for i = 0; i < snapshotCount/2; i++ {
err = cc.Put(ctx, fmt.Sprintf("%d", i), "3", config.PutOptions{})
assert.NoError(t, err)
assert.NoError(tb, err)
}
return members
}
Expand All @@ -240,39 +240,39 @@ func filterSnapshotFiles(path string) bool {
return strings.HasSuffix(path, ".snap")
}

func assertSnapshotsMatch(t testing.TB, firstDataDir, secondDataDir string, patch func([]byte) []byte) {
lg := zaptest.NewLogger(t)
func assertSnapshotsMatch(tb testing.TB, firstDataDir, secondDataDir string, patch func([]byte) []byte) {
lg := zaptest.NewLogger(tb)
firstFiles, err := fileutil.ListFiles(firstDataDir, filterSnapshotFiles)
require.NoError(t, err)
require.NoError(tb, err)
secondFiles, err := fileutil.ListFiles(secondDataDir, filterSnapshotFiles)
require.NoError(t, err)
assert.NotEmpty(t, firstFiles)
assert.NotEmpty(t, secondFiles)
assert.Equal(t, len(firstFiles), len(secondFiles))
require.NoError(tb, err)
assert.NotEmpty(tb, firstFiles)
assert.NotEmpty(tb, secondFiles)
assert.Equal(tb, len(firstFiles), len(secondFiles))
sort.Strings(firstFiles)
sort.Strings(secondFiles)
for i := 0; i < len(firstFiles); i++ {
firstSnapshot, err := snap.Read(lg, firstFiles[i])
require.NoError(t, err)
require.NoError(tb, err)
secondSnapshot, err := snap.Read(lg, secondFiles[i])
require.NoError(t, err)
assertMembershipEqual(t, openSnap(patch(firstSnapshot.Data)), openSnap(patch(secondSnapshot.Data)))
require.NoError(tb, err)
assertMembershipEqual(tb, openSnap(patch(firstSnapshot.Data)), openSnap(patch(secondSnapshot.Data)))
}
}

func assertMembershipEqual(t testing.TB, firstStore v2store.Store, secondStore v2store.Store) {
rc1 := membership.NewCluster(zaptest.NewLogger(t))
func assertMembershipEqual(tb testing.TB, firstStore v2store.Store, secondStore v2store.Store) {
rc1 := membership.NewCluster(zaptest.NewLogger(tb))
rc1.SetStore(firstStore)
rc1.Recover(func(lg *zap.Logger, v *semver.Version) {})

rc2 := membership.NewCluster(zaptest.NewLogger(t))
rc2 := membership.NewCluster(zaptest.NewLogger(tb))
rc2.SetStore(secondStore)
rc2.Recover(func(lg *zap.Logger, v *semver.Version) {})

// membership should match
if !reflect.DeepEqual(rc1.Members(), rc2.Members()) {
t.Logf("memberids_from_last_version = %+v, member_ids_from_current_version = %+v", rc1.MemberIDs(), rc2.MemberIDs())
t.Errorf("members_from_last_version_snapshot = %+v, members_from_current_version_snapshot %+v", rc1.Members(), rc2.Members())
tb.Logf("memberids_from_last_version = %+v, member_ids_from_current_version = %+v", rc1.MemberIDs(), rc2.MemberIDs())
tb.Errorf("members_from_last_version_snapshot = %+v, members_from_current_version_snapshot %+v", rc1.Members(), rc2.Members())
}
}

Expand Down
Loading

0 comments on commit 693c68d

Please sign in to comment.