From f4390a34b09558a5990a1af0e44ba2aa690164de Mon Sep 17 00:00:00 2001 From: Quang-Minh Nguyen Date: Thu, 18 Jul 2024 13:38:51 +0700 Subject: [PATCH 01/17] raft: Add replication factor to Raft config This commit adds the replication factor to the Raft config. This config defines the number of nodes where the data of a storage are replicated. It includes the main storage. For example, ReplicationFactor=3 means 1 main storage + 2 replicated storages. The replication factor is set at the storage level. All repositories of a storage share the same behavior. Although our Raft architecture makes it feasible to set the replication factor at the repository level, we don't need that fine-grained level of control at the moment. This decision is subject to change in the future. This commit also adds a field to cluster.proto. At this point, cluster-related protobufs are not used anywhere except in tests. So, it's okay to change protobuf field number, for the sake of simplicity. --- internal/gitaly/config/config.go | 10 ++ internal/gitaly/config/config_test.go | 126 +++++++++++++++++--------- proto/cluster.proto | 4 +- proto/go/gitalypb/cluster.pb.go | 26 ++++-- 4 files changed, 114 insertions(+), 52 deletions(-) diff --git a/internal/gitaly/config/config.go b/internal/gitaly/config/config.go index 4f45873838..ee7db84d80 100644 --- a/internal/gitaly/config/config.go +++ b/internal/gitaly/config/config.go @@ -1227,6 +1227,9 @@ type Raft struct { // InitialMembers contains the list of initial members of the cluster. It's a map of NodeID to // RaftAddr. Due to limitations of the TOML format, it's not possible to set the map key as a uint64. InitialMembers map[string]string `toml:"initial_members" json:"initial_members"` + // ReplicationFactor defines the number of nodes where data of this storage are replicated, + // including the original storage. + ReplicationFactor uint64 `toml:"replication_factor" json:"replication_factor"` // RTTMilliseconds is the maximum round trip between two nodes in the cluster. It's used to // calculate multiple types of timeouts of Raft protocol. RTTMilliseconds uint64 `toml:"rtt_milliseconds" json:"rtt_milliseconds"` @@ -1246,6 +1249,9 @@ const ( // RaftDefaultHeartbeatTicks is the default heartbeat RTT for the Raft cluster. The estimated election // timeout is DefaultRTT * DefaultHeartbeatTicks. RaftDefaultHeartbeatTicks = 2 + // RaftDefaultReplicationFactor is the default number of nodes where data of this storage are + // replicated. By default, the factor is 3, which means 1 main storage + 2 replicated storages. + RaftDefaultReplicationFactor = 3 ) func (r Raft) fulfillDefaults() Raft { @@ -1258,6 +1264,9 @@ func (r Raft) fulfillDefaults() Raft { if r.HeartbeatTicks == 0 { r.HeartbeatTicks = RaftDefaultHeartbeatTicks } + if r.ReplicationFactor == 0 { + r.ReplicationFactor = RaftDefaultReplicationFactor + } return r } @@ -1280,6 +1289,7 @@ func (r Raft) Validate(transactions Transactions) error { Append(cfgerror.NotEmpty(r.ClusterID), "cluster_id"). Append(cfgerror.Comparable(r.NodeID).GreaterThan(0), "node_id"). Append(cfgerror.NotEmpty(r.RaftAddr), "raft_addr"). + Append(cfgerror.Comparable(r.ReplicationFactor).GreaterThan(0), "replication_factor"). Append(cfgerror.Comparable(r.RTTMilliseconds).GreaterThan(0), "rtt_millisecond"). Append(cfgerror.Comparable(r.ElectionTicks).GreaterThan(0), "election_rtt"). Append(cfgerror.Comparable(r.HeartbeatTicks).GreaterThan(0), "heartbeat_rtt") diff --git a/internal/gitaly/config/config_test.go b/internal/gitaly/config/config_test.go index 7fdaedc511..1f5f5caa28 100644 --- a/internal/gitaly/config/config_test.go +++ b/internal/gitaly/config/config_test.go @@ -2667,9 +2667,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, }, @@ -2685,9 +2686,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2709,9 +2711,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 3, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2733,9 +2736,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2757,9 +2761,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2772,14 +2777,15 @@ func TestRaftConfig_Validate(t *testing.T) { { name: "empty initial members", cfgRaft: Raft{ - Enabled: true, - ClusterID: "4f04a0e2-0db8-4bfa-b846-01b5b4a093fb", - NodeID: 1, - RaftAddr: "localhost:3001", - InitialMembers: map[string]string{}, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + Enabled: true, + ClusterID: "4f04a0e2-0db8-4bfa-b846-01b5b4a093fb", + NodeID: 1, + RaftAddr: "localhost:3001", + InitialMembers: map[string]string{}, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2802,9 +2808,10 @@ func TestRaftConfig_Validate(t *testing.T) { "3": "localhost:3003", "4": "", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2827,9 +2834,10 @@ func TestRaftConfig_Validate(t *testing.T) { "3": "localhost:3003", "4": "1:2:3", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2851,9 +2859,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 0, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 0, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2875,9 +2884,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 200, - ElectionTicks: 0, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 0, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2899,9 +2909,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 0, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 0, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2923,9 +2934,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: false}, expectedErr: cfgerror.ValidationErrors{ @@ -2935,6 +2947,31 @@ func TestRaftConfig_Validate(t *testing.T) { ), }, }, + { + name: "invalid replication factor", + cfgRaft: Raft{ + Enabled: true, + ClusterID: "4f04a0e2-0db8-4bfa-b846-01b5b4a093fb", + NodeID: 1, + RaftAddr: "localhost:3001", + InitialMembers: map[string]string{ + "1": "localhost:3001", + "2": "localhost:3002", + "3": "localhost:3003", + }, + ReplicationFactor: 0, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, + }, + cfgTransactions: Transactions{Enabled: true}, + expectedErr: cfgerror.ValidationErrors{ + cfgerror.NewValidationError( + fmt.Errorf("%w: 0 is not greater than 0", cfgerror.ErrNotInRange), + "replication_factor", + ), + }, + }, } { t.Run(tc.name, func(t *testing.T) { err := tc.cfgRaft.Validate(tc.cfgTransactions) @@ -2965,9 +3002,10 @@ initial_members = {1 = "localhost:4001", 2 = "localhost:4002", 3 = "localhost:40 "2": "localhost:4002", "3": "localhost:4003", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 0, + ReplicationFactor: 3, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 0, }, } require.NoError(t, expectedCfg.Sanitize()) diff --git a/proto/cluster.proto b/proto/cluster.proto index fa954a1f42..69f2a9cb7a 100644 --- a/proto/cluster.proto +++ b/proto/cluster.proto @@ -23,8 +23,10 @@ message Storage { uint64 storage_id = 1; // name is the human-readable name of the storage. string name = 2; + // replication_factor defines the number of nodes where data of this storage are replicated. + uint64 replication_factor = 3; // replica_groups is a list of identifiers for the replica groups associated with this storage. - repeated uint64 replica_groups = 3; + repeated uint64 replica_groups = 4; } // LeaderState represents the current leader state of a Raft group. diff --git a/proto/go/gitalypb/cluster.pb.go b/proto/go/gitalypb/cluster.pb.go index b29434b7be..aae6d33122 100644 --- a/proto/go/gitalypb/cluster.pb.go +++ b/proto/go/gitalypb/cluster.pb.go @@ -100,8 +100,10 @@ type Storage struct { StorageId uint64 `protobuf:"varint,1,opt,name=storage_id,json=storageId,proto3" json:"storage_id,omitempty"` // name is the human-readable name of the storage. Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // replication_factor defines the number of nodes where data of this storage are replicated. + ReplicationFactor uint64 `protobuf:"varint,3,opt,name=replication_factor,json=replicationFactor,proto3" json:"replication_factor,omitempty"` // replica_groups is a list of identifiers for the replica groups associated with this storage. - ReplicaGroups []uint64 `protobuf:"varint,3,rep,packed,name=replica_groups,json=replicaGroups,proto3" json:"replica_groups,omitempty"` + ReplicaGroups []uint64 `protobuf:"varint,4,rep,packed,name=replica_groups,json=replicaGroups,proto3" json:"replica_groups,omitempty"` } func (x *Storage) Reset() { @@ -150,6 +152,13 @@ func (x *Storage) GetName() string { return "" } +func (x *Storage) GetReplicationFactor() uint64 { + if x != nil { + return x.ReplicationFactor + } + return 0 +} + func (x *Storage) GetReplicaGroups() []uint64 { if x != nil { return x.ReplicaGroups @@ -537,12 +546,15 @@ var file_cluster_proto_rawDesc = []byte{ 0x01, 0x28, 0x04, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x63, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x1d, - 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0d, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x02, 0x38, 0x01, 0x22, 0x92, 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, + 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x63, 0x74, 0x6f, + 0x72, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0d, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x22, 0x6f, 0x0a, 0x0b, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, -- GitLab From 63f0943846bcb3e2544ebeafdbc9bd10d52ee718 Mon Sep 17 00:00:00 2001 From: Quang-Minh Nguyen Date: Fri, 12 Jul 2024 17:06:30 +0700 Subject: [PATCH 02/17] raft: Attach more storage info to storage registration A prior commit added replication factor to Raft config. This commit attaches ReplicationFactor and NodeID to the storage registration request. The metadata Raft group persists that config to each member node. It will be used for replica placement in some later commits. --- internal/gitaly/storage/raft/manager_test.go | 14 +-- .../gitaly/storage/raft/metadata_group.go | 10 +- .../storage/raft/metadata_group_test.go | 8 +- .../storage/raft/metadata_statemachine.go | 6 +- .../raft/metadata_statemachine_test.go | 54 +++++---- .../gitaly/storage/raft/testhelper_test.go | 17 +-- proto/cluster.proto | 8 +- proto/go/gitalypb/cluster.pb.go | 109 ++++++++++++------ 8 files changed, 142 insertions(+), 84 deletions(-) diff --git a/internal/gitaly/storage/raft/manager_test.go b/internal/gitaly/storage/raft/manager_test.go index a7c9aade50..be16bed11d 100644 --- a/internal/gitaly/storage/raft/manager_test.go +++ b/internal/gitaly/storage/raft/manager_test.go @@ -72,7 +72,7 @@ func TestManager_Start(t *testing.T) { ClusterId: cluster.clusterID, NextStorageId: 2, Storages: map[uint64]*gitalypb.Storage{ - 1: {StorageId: 1, Name: "storage-1"}, + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, }, }, clusterInfo) }) @@ -118,7 +118,7 @@ func TestManager_Start(t *testing.T) { require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(numNode+1), clusterInfo.NextStorageId) require.Equal(t, &gitalypb.Storage{ - StorageId: storage.id.ToUint64(), Name: storage.name, + StorageId: storage.id.ToUint64(), Name: storage.name, ReplicationFactor: 3, NodeId: node.ToUint64(), }, clusterInfo.Storages[storage.id.ToUint64()]) }) }) @@ -155,7 +155,7 @@ func TestManager_Start(t *testing.T) { require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(3), clusterInfo.NextStorageId) require.Equal(t, &gitalypb.Storage{ - StorageId: storage.id.ToUint64(), Name: storage.name, + StorageId: storage.id.ToUint64(), Name: storage.name, ReplicationFactor: 3, NodeId: node.ToUint64(), }, clusterInfo.Storages[storage.id.ToUint64()]) }) @@ -173,7 +173,7 @@ func TestManager_Start(t *testing.T) { require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(4), clusterInfo.NextStorageId) require.Equal(t, &gitalypb.Storage{ - StorageId: storage.id.ToUint64(), Name: storage.name, + StorageId: storage.id.ToUint64(), Name: storage.name, ReplicationFactor: 3, NodeId: node.ToUint64(), }, clusterInfo.Storages[storage.id.ToUint64()]) }) }) @@ -254,7 +254,7 @@ func TestManager_Start(t *testing.T) { require.Equal(t, uint64(3), clusterInfo.NextStorageId) require.Equal(t, &gitalypb.Storage{ - StorageId: storage.id.ToUint64(), Name: storage.name, + StorageId: storage.id.ToUint64(), Name: storage.name, ReplicationFactor: 3, }, clusterInfo.Storages[storage.id.ToUint64()]) } }) @@ -287,7 +287,7 @@ func TestManager_Start(t *testing.T) { require.Equal(t, uint64(4), clusterInfo.NextStorageId) require.Equal(t, &gitalypb.Storage{ - StorageId: mgr.firstStorage.id.ToUint64(), Name: mgr.firstStorage.name, + StorageId: mgr.firstStorage.id.ToUint64(), Name: mgr.firstStorage.name, ReplicationFactor: 3, }, clusterInfo.Storages[mgr.firstStorage.id.ToUint64()]) }) }) @@ -320,7 +320,7 @@ func TestManager_Start(t *testing.T) { require.Equal(t, uint64(4), clusterInfo.NextStorageId) require.Equal(t, &gitalypb.Storage{ - StorageId: mgr.firstStorage.id.ToUint64(), Name: mgr.firstStorage.name, + StorageId: mgr.firstStorage.id.ToUint64(), Name: mgr.firstStorage.name, ReplicationFactor: 3, }, clusterInfo.Storages[mgr.firstStorage.id.ToUint64()]) }) }) diff --git a/internal/gitaly/storage/raft/metadata_group.go b/internal/gitaly/storage/raft/metadata_group.go index 405a37aaf8..e229f15f41 100644 --- a/internal/gitaly/storage/raft/metadata_group.go +++ b/internal/gitaly/storage/raft/metadata_group.go @@ -168,7 +168,7 @@ func (g *metadataRaftGroup) RegisterStorage(storageName string) (raftID, error) return 0, fmt.Errorf("storage %q already registered", storageName) } } - result, response, err := g.requestRegisterStorage(storageName) + result, response, err := g.requestRegisterStorage(storageName, g.clusterConfig) if err != nil { return 0, fmt.Errorf("registering storage: %w", err) } @@ -236,7 +236,7 @@ func (g *metadataRaftGroup) requestBootstrapCluster() (updateResult, *gitalypb.B return requester.SyncWrite(g.ctx, &gitalypb.BootstrapClusterRequest{ClusterId: g.clusterConfig.ClusterID}) } -func (g *metadataRaftGroup) requestRegisterStorage(storageName string) (updateResult, *gitalypb.RegisterStorageResponse, error) { +func (g *metadataRaftGroup) requestRegisterStorage(storageName string, clusterCfg config.Raft) (updateResult, *gitalypb.RegisterStorageResponse, error) { requester := NewRequester[*gitalypb.RegisterStorageRequest, *gitalypb.RegisterStorageResponse]( g.nodeHost, g.groupID, g.logger, requestOption{ retry: defaultRetry, @@ -244,7 +244,11 @@ func (g *metadataRaftGroup) requestRegisterStorage(storageName string) (updateRe exponential: g.backoffProfile, }, ) - return requester.SyncWrite(g.ctx, &gitalypb.RegisterStorageRequest{StorageName: storageName}) + return requester.SyncWrite(g.ctx, &gitalypb.RegisterStorageRequest{ + StorageName: storageName, + ReplicationFactor: clusterCfg.ReplicationFactor, + NodeId: clusterCfg.NodeID, + }) } func (g *metadataRaftGroup) getLeaderState() (*gitalypb.LeaderState, error) { diff --git a/internal/gitaly/storage/raft/metadata_group_test.go b/internal/gitaly/storage/raft/metadata_group_test.go index 266983104f..2f7a8ed9c2 100644 --- a/internal/gitaly/storage/raft/metadata_group_test.go +++ b/internal/gitaly/storage/raft/metadata_group_test.go @@ -292,9 +292,9 @@ func TestMetadataGroup_RegisterStorage(t *testing.T) { ClusterId: cluster.clusterID, NextStorageId: 4, Storages: map[uint64]*gitalypb.Storage{ - 1: {StorageId: 1, Name: "storage-2"}, - 2: {StorageId: 2, Name: "storage-4"}, - 3: {StorageId: 3, Name: "storage-6"}, + 1: {StorageId: 1, Name: "storage-2", ReplicationFactor: 3, NodeId: 1}, + 2: {StorageId: 2, Name: "storage-4", ReplicationFactor: 3, NodeId: 2}, + 3: {StorageId: 3, Name: "storage-6", ReplicationFactor: 3, NodeId: 3}, }, }, clusterInfo) } @@ -327,7 +327,7 @@ func TestMetadataGroup_RegisterStorage(t *testing.T) { ClusterId: cluster.clusterID, NextStorageId: 2, Storages: map[uint64]*gitalypb.Storage{ - 1: {StorageId: 1, Name: "storage-1"}, + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, }, }, clusterInfo) } diff --git a/internal/gitaly/storage/raft/metadata_statemachine.go b/internal/gitaly/storage/raft/metadata_statemachine.go index 2c826e3c90..327a6c45d9 100644 --- a/internal/gitaly/storage/raft/metadata_statemachine.go +++ b/internal/gitaly/storage/raft/metadata_statemachine.go @@ -210,8 +210,10 @@ func (s *metadataStateMachine) handleRegisterStorageRequest(req *gitalypb.Regist } newStorage := &gitalypb.Storage{ - StorageId: cluster.NextStorageId, - Name: req.StorageName, + StorageId: cluster.NextStorageId, + Name: req.GetStorageName(), + ReplicationFactor: req.GetReplicationFactor(), + NodeId: req.GetNodeId(), } cluster.Storages[cluster.NextStorageId] = newStorage cluster.NextStorageId++ diff --git a/internal/gitaly/storage/raft/metadata_statemachine_test.go b/internal/gitaly/storage/raft/metadata_statemachine_test.go index 26eaf2a3f0..4871ccf3cc 100644 --- a/internal/gitaly/storage/raft/metadata_statemachine_test.go +++ b/internal/gitaly/storage/raft/metadata_statemachine_test.go @@ -228,8 +228,8 @@ func TestMetadataStateMachine_Update(t *testing.T) { requireLastApplied(t, sm, 1) result, err := sm.Update([]statemachine.Entry{ - {Index: 2, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name})}, - {Index: 3, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[1].Name})}, + {Index: 2, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name, ReplicationFactor: 3, NodeId: 1})}, + {Index: 3, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[1].Name, ReplicationFactor: 5, NodeId: 2})}, }) require.NoError(t, err) require.Equal(t, []statemachine.Entry{ @@ -237,8 +237,10 @@ func TestMetadataStateMachine_Update(t *testing.T) { Value: uint64(resultRegisterStorageSuccessful), Data: wrapSMMessage(t, &gitalypb.RegisterStorageResponse{ Storage: &gitalypb.Storage{ - StorageId: 1, - Name: cfg.Storages[0].Name, + StorageId: 1, + Name: cfg.Storages[0].Name, + ReplicationFactor: 3, + NodeId: 1, }, }), }}, @@ -246,8 +248,10 @@ func TestMetadataStateMachine_Update(t *testing.T) { Value: uint64(resultRegisterStorageSuccessful), Data: wrapSMMessage(t, &gitalypb.RegisterStorageResponse{ Storage: &gitalypb.Storage{ - StorageId: 2, - Name: cfg.Storages[1].Name, + StorageId: 2, + Name: cfg.Storages[1].Name, + ReplicationFactor: 5, + NodeId: 2, }, }), }}, @@ -259,12 +263,16 @@ func TestMetadataStateMachine_Update(t *testing.T) { NextStorageId: 3, Storages: map[uint64]*gitalypb.Storage{ 1: { - StorageId: 1, - Name: cfg.Storages[0].Name, + StorageId: 1, + Name: cfg.Storages[0].Name, + ReplicationFactor: 3, + NodeId: 1, }, 2: { - StorageId: 2, - Name: cfg.Storages[1].Name, + StorageId: 2, + Name: cfg.Storages[1].Name, + ReplicationFactor: 5, + NodeId: 2, }, }, }) @@ -285,8 +293,8 @@ func TestMetadataStateMachine_Update(t *testing.T) { requireLastApplied(t, sm, 1) result, err := sm.Update([]statemachine.Entry{ - {Index: 2, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name})}, - {Index: 3, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name})}, + {Index: 2, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name, ReplicationFactor: 3, NodeId: 1})}, + {Index: 3, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name, ReplicationFactor: 5, NodeId: 2})}, }) require.NoError(t, err) require.Equal(t, []statemachine.Entry{ @@ -294,8 +302,10 @@ func TestMetadataStateMachine_Update(t *testing.T) { Value: uint64(resultRegisterStorageSuccessful), Data: wrapSMMessage(t, &gitalypb.RegisterStorageResponse{ Storage: &gitalypb.Storage{ - StorageId: 1, - Name: cfg.Storages[0].Name, + StorageId: 1, + Name: cfg.Storages[0].Name, + ReplicationFactor: 3, + NodeId: 1, }, }), }}, @@ -310,8 +320,10 @@ func TestMetadataStateMachine_Update(t *testing.T) { NextStorageId: 2, Storages: map[uint64]*gitalypb.Storage{ 1: { - StorageId: 1, - Name: cfg.Storages[0].Name, + StorageId: 1, + Name: cfg.Storages[0].Name, + ReplicationFactor: 3, + NodeId: 1, }, }, }) @@ -329,7 +341,7 @@ func TestMetadataStateMachine_Update(t *testing.T) { require.NoError(t, err) result, err := sm.Update([]statemachine.Entry{ - {Index: 1, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name})}, + {Index: 1, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name, ReplicationFactor: 3, NodeId: 1})}, }) require.NoError(t, err) require.Equal(t, []statemachine.Entry{ @@ -459,8 +471,8 @@ func TestMetadataStateMachine_Lookup(t *testing.T) { bootstrapCluster(t, sm) _, err = sm.Update([]statemachine.Entry{ - {Index: 2, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name})}, - {Index: 3, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[1].Name})}, + {Index: 2, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name, ReplicationFactor: 3, NodeId: 1})}, + {Index: 3, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[1].Name, ReplicationFactor: 5, NodeId: 2})}, }) require.NoError(t, err) @@ -471,8 +483,8 @@ func TestMetadataStateMachine_Lookup(t *testing.T) { ClusterId: "1234", NextStorageId: 3, Storages: map[uint64]*gitalypb.Storage{ - 1: {StorageId: 1, Name: cfg.Storages[0].Name}, - 2: {StorageId: 2, Name: cfg.Storages[1].Name}, + 1: {StorageId: 1, Name: cfg.Storages[0].Name, ReplicationFactor: 3, NodeId: 1}, + 2: {StorageId: 2, Name: cfg.Storages[1].Name, ReplicationFactor: 5, NodeId: 2}, }, }}, response) }) diff --git a/internal/gitaly/storage/raft/testhelper_test.go b/internal/gitaly/storage/raft/testhelper_test.go index 36b559a7c9..455d2bb5d5 100644 --- a/internal/gitaly/storage/raft/testhelper_test.go +++ b/internal/gitaly/storage/raft/testhelper_test.go @@ -218,14 +218,15 @@ func (c *testRaftCluster) createRaftConfig(node raftID) config.Raft { initialMembers[fmt.Sprintf("%d", node)] = addr } return config.Raft{ - Enabled: true, - ClusterID: c.clusterID, - NodeID: node.ToUint64(), - RaftAddr: c.initialMembers[node.ToUint64()], - InitialMembers: initialMembers, - RTTMilliseconds: config.RaftDefaultRTT, - ElectionTicks: config.RaftDefaultElectionTicks, - HeartbeatTicks: config.RaftDefaultHeartbeatTicks, + Enabled: true, + ClusterID: c.clusterID, + NodeID: node.ToUint64(), + RaftAddr: c.initialMembers[node.ToUint64()], + InitialMembers: initialMembers, + ReplicationFactor: 3, + RTTMilliseconds: config.RaftDefaultRTT, + ElectionTicks: config.RaftDefaultElectionTicks, + HeartbeatTicks: config.RaftDefaultHeartbeatTicks, } } diff --git a/proto/cluster.proto b/proto/cluster.proto index 69f2a9cb7a..0a820b4817 100644 --- a/proto/cluster.proto +++ b/proto/cluster.proto @@ -25,8 +25,10 @@ message Storage { string name = 2; // replication_factor defines the number of nodes where data of this storage are replicated. uint64 replication_factor = 3; + // node_id is the current residential node of the storage. + uint64 node_id = 4; // replica_groups is a list of identifiers for the replica groups associated with this storage. - repeated uint64 replica_groups = 4; + repeated uint64 replica_groups = 5; } // LeaderState represents the current leader state of a Raft group. @@ -69,6 +71,10 @@ message GetClusterResponse{ message RegisterStorageRequest { // storage_name is the human-readable name of the new storage. string storage_name = 1; + // node_id is the inital residential node of the storage. + uint64 node_id = 2; + // replication_factor contains the replication factor of this storage. + uint64 replication_factor = 3; } // RegisterStorageResponse is the response message for registering a new storage in a cluster. diff --git a/proto/go/gitalypb/cluster.pb.go b/proto/go/gitalypb/cluster.pb.go index aae6d33122..49352a0e50 100644 --- a/proto/go/gitalypb/cluster.pb.go +++ b/proto/go/gitalypb/cluster.pb.go @@ -102,8 +102,10 @@ type Storage struct { Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // replication_factor defines the number of nodes where data of this storage are replicated. ReplicationFactor uint64 `protobuf:"varint,3,opt,name=replication_factor,json=replicationFactor,proto3" json:"replication_factor,omitempty"` + // node_id is the current residential node of the storage. + NodeId uint64 `protobuf:"varint,4,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` // replica_groups is a list of identifiers for the replica groups associated with this storage. - ReplicaGroups []uint64 `protobuf:"varint,4,rep,packed,name=replica_groups,json=replicaGroups,proto3" json:"replica_groups,omitempty"` + ReplicaGroups []uint64 `protobuf:"varint,5,rep,packed,name=replica_groups,json=replicaGroups,proto3" json:"replica_groups,omitempty"` } func (x *Storage) Reset() { @@ -159,6 +161,13 @@ func (x *Storage) GetReplicationFactor() uint64 { return 0 } +func (x *Storage) GetNodeId() uint64 { + if x != nil { + return x.NodeId + } + return 0 +} + func (x *Storage) GetReplicaGroups() []uint64 { if x != nil { return x.ReplicaGroups @@ -438,6 +447,10 @@ type RegisterStorageRequest struct { // storage_name is the human-readable name of the new storage. StorageName string `protobuf:"bytes,1,opt,name=storage_name,json=storageName,proto3" json:"storage_name,omitempty"` + // node_id is the inital residential node of the storage. + NodeId uint64 `protobuf:"varint,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // replication_factor contains the replication factor of this storage. + ReplicationFactor uint64 `protobuf:"varint,3,opt,name=replication_factor,json=replicationFactor,proto3" json:"replication_factor,omitempty"` } func (x *RegisterStorageRequest) Reset() { @@ -479,6 +492,20 @@ func (x *RegisterStorageRequest) GetStorageName() string { return "" } +func (x *RegisterStorageRequest) GetNodeId() uint64 { + if x != nil { + return x.NodeId + } + return 0 +} + +func (x *RegisterStorageRequest) GetReplicationFactor() uint64 { + if x != nil { + return x.ReplicationFactor + } + return 0 +} + // RegisterStorageResponse is the response message for registering a new storage in a cluster. type RegisterStorageResponse struct { state protoimpl.MessageState @@ -546,49 +573,55 @@ var file_cluster_proto_rawDesc = []byte{ 0x01, 0x28, 0x04, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x92, 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, + 0x02, 0x38, 0x01, 0x22, 0xab, 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x63, 0x74, 0x6f, - 0x72, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0d, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x22, 0x6f, 0x0a, 0x0b, 0x4c, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, - 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, - 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, - 0x65, 0x72, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x22, 0x38, 0x0a, 0x17, 0x42, 0x6f, 0x6f, - 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x49, 0x64, 0x22, 0x45, 0x0a, 0x18, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x29, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x13, 0x0a, 0x11, 0x47, 0x65, - 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x22, 0x3b, 0x0a, 0x16, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x44, 0x0a, - 0x17, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x67, 0x69, 0x74, 0x61, - 0x6c, 0x79, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x07, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x6c, 0x61, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x67, 0x69, 0x74, 0x6c, 0x61, 0x62, 0x2d, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x69, 0x74, - 0x61, 0x6c, 0x79, 0x2f, 0x76, 0x31, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, - 0x2f, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x72, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x04, 0x52, 0x0d, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x73, 0x22, 0x6f, 0x0a, 0x0b, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6c, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, + 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x22, 0x38, 0x0a, 0x17, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, + 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x22, 0x45, 0x0a, 0x18, + 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x67, 0x69, 0x74, 0x61, + 0x6c, 0x79, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x22, 0x13, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, + 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x83, 0x01, 0x0a, 0x16, 0x52, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, + 0x12, 0x2d, 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x22, + 0x44, 0x0a, 0x17, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x67, 0x69, + 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x07, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x6c, 0x61, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x69, 0x74, 0x6c, 0x61, 0x62, 0x2d, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2f, 0x76, 0x31, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x2f, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( -- GitLab From 60042183b51d7b6c094e79dcca88d497c12e6a3a Mon Sep 17 00:00:00 2001 From: Quang-Minh Nguyen Date: Tue, 16 Jul 2024 14:04:03 +0700 Subject: [PATCH 03/17] raft: Persist full storage info after registration Recently, after registration, only the storage ID is persisted in the storage's metadata DB. The metadata Raft group stores more information than just the storage ID such as replication_factor. As a result, we need to persist full storage information in the storage's metadata DB. This persisted info will be used to compare with the upstream info so that the storage is able to apply any changes. --- internal/gitaly/storage/raft/manager.go | 16 +++-- internal/gitaly/storage/raft/manager_test.go | 68 ++++++++++++++----- .../gitaly/storage/raft/metadata_group.go | 16 ++--- .../storage/raft/metadata_group_test.go | 18 +++-- internal/gitaly/storage/raft/storage.go | 48 ++++++++----- 5 files changed, 116 insertions(+), 50 deletions(-) diff --git a/internal/gitaly/storage/raft/manager.go b/internal/gitaly/storage/raft/manager.go index fff76b445e..10513d2348 100644 --- a/internal/gitaly/storage/raft/manager.go +++ b/internal/gitaly/storage/raft/manager.go @@ -245,19 +245,23 @@ func (m *Manager) Start() (returnedErr error) { // Register storage ID if not exist. Similarly, this operation is handled by the metadata group. // It will be handled by the metadata authority in the future. for storageName, storageMgr := range m.storageManagers { - if err := storageMgr.loadStorageID(m.ctx); err != nil { - return fmt.Errorf("loading storage ID: %w", err) + if err := storageMgr.loadStorageInfo(m.ctx); err != nil { + return fmt.Errorf("loading persisted storage info: %w", err) } - if storageMgr.id == 0 { - id, err := m.metadataGroup.RegisterStorage(storageName) + if storageMgr.persistedInfo == nil || storageMgr.persistedInfo.GetStorageId() == 0 { + storageInfo, err := m.metadataGroup.RegisterStorage(storageName) if err != nil { return fmt.Errorf("registering storage ID: %w", err) } - if err := storageMgr.saveStorageID(m.ctx, id); err != nil { + if err := storageMgr.saveStorageInfo(m.ctx, storageInfo); err != nil { return fmt.Errorf("saving storage ID: %w", err) } } - m.logger.WithFields(log.Fields{"storage_name": storageName, "storage_id": storageMgr.id}).Info("storage joined the cluster") + m.logger.WithFields(log.Fields{ + "storage_name": storageName, + "storage_id": storageMgr.persistedInfo.GetStorageId(), + "replication_factor": storageMgr.persistedInfo.GetReplicationFactor(), + }).Info("storage joined the cluster") } m.logger.Info("Raft cluster has started") diff --git a/internal/gitaly/storage/raft/manager_test.go b/internal/gitaly/storage/raft/manager_test.go index be16bed11d..42c0e3cc4e 100644 --- a/internal/gitaly/storage/raft/manager_test.go +++ b/internal/gitaly/storage/raft/manager_test.go @@ -46,7 +46,7 @@ func TestManager_Start(t *testing.T) { resetManager := func(t *testing.T, m *Manager) { m.metadataGroup = nil for _, storageMgr := range m.storageManagers { - storageMgr.clearStorageID() + storageMgr.clearStorageInfo() storageMgr.nodeHost.Close() nodeHost, err := dragonboat.NewNodeHost(storageMgr.nodeHost.NodeHostConfig()) require.NoError(t, err) @@ -99,6 +99,12 @@ func TestManager_Start(t *testing.T) { fanOut(numNode, func(node raftID) { require.NoError(t, cluster.nodes[node].manager.Start()) + + storage := cluster.nodes[node].manager.firstStorage + require.Equal(t, storage.id.ToUint64(), storage.persistedInfo.StorageId) + require.Equal(t, storage.name, storage.persistedInfo.Name) + require.Equal(t, uint64(3), storage.persistedInfo.ReplicationFactor) + require.Equal(t, node.ToUint64(), storage.persistedInfo.NodeId) }) var expectedIDs, allocatedIDs []raftID @@ -117,9 +123,10 @@ func TestManager_Start(t *testing.T) { require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(numNode+1), clusterInfo.NextStorageId) - require.Equal(t, &gitalypb.Storage{ + expectedInfo := &gitalypb.Storage{ StorageId: storage.id.ToUint64(), Name: storage.name, ReplicationFactor: 3, NodeId: node.ToUint64(), - }, clusterInfo.Storages[storage.id.ToUint64()]) + } + testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[storage.id.ToUint64()]) }) }) }(numNode) @@ -137,6 +144,12 @@ func TestManager_Start(t *testing.T) { fanOut(2, func(node raftID) { require.NoError(t, cluster.nodes[node].manager.Start()) require.Equal(t, true, cluster.nodes[node].manager.Ready()) + + storage := cluster.nodes[node].manager.firstStorage + require.Equal(t, storage.id.ToUint64(), storage.persistedInfo.StorageId) + require.Equal(t, storage.name, storage.persistedInfo.Name) + require.Equal(t, uint64(3), storage.persistedInfo.ReplicationFactor) + require.Equal(t, node.ToUint64(), storage.persistedInfo.NodeId) }) // The quorum is reached @@ -154,9 +167,10 @@ func TestManager_Start(t *testing.T) { require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(3), clusterInfo.NextStorageId) - require.Equal(t, &gitalypb.Storage{ + expectedInfo := &gitalypb.Storage{ StorageId: storage.id.ToUint64(), Name: storage.name, ReplicationFactor: 3, NodeId: node.ToUint64(), - }, clusterInfo.Storages[storage.id.ToUint64()]) + } + testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[storage.id.ToUint64()]) }) // Now the third node joins. It does not matter whether the third node bootstraps the cluster. @@ -172,9 +186,10 @@ func TestManager_Start(t *testing.T) { require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(4), clusterInfo.NextStorageId) - require.Equal(t, &gitalypb.Storage{ + expectedInfo := &gitalypb.Storage{ StorageId: storage.id.ToUint64(), Name: storage.name, ReplicationFactor: 3, NodeId: node.ToUint64(), - }, clusterInfo.Storages[storage.id.ToUint64()]) + } + testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[storage.id.ToUint64()]) }) }) }(bootstrap) @@ -226,6 +241,12 @@ func TestManager_Start(t *testing.T) { require.EqualError(t, cluster.nodes[node].manager.Start(), "registering storage ID: storage \"storage-2\" already registered") } else { require.NoError(t, cluster.nodes[node].manager.Start()) + + storage := cluster.nodes[node].manager.firstStorage + require.Equal(t, storage.id.ToUint64(), storage.persistedInfo.StorageId) + require.Equal(t, storage.name, storage.persistedInfo.Name) + require.Equal(t, uint64(3), storage.persistedInfo.ReplicationFactor) + require.Equal(t, node.ToUint64(), storage.persistedInfo.NodeId) } if node != duplicatedNode { @@ -253,9 +274,10 @@ func TestManager_Start(t *testing.T) { require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(3), clusterInfo.NextStorageId) - require.Equal(t, &gitalypb.Storage{ - StorageId: storage.id.ToUint64(), Name: storage.name, ReplicationFactor: 3, - }, clusterInfo.Storages[storage.id.ToUint64()]) + expectedInfo := &gitalypb.Storage{ + StorageId: storage.id.ToUint64(), Name: storage.name, ReplicationFactor: 3, NodeId: node.ToUint64(), + } + testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[storage.id.ToUint64()]) } }) }) @@ -268,6 +290,12 @@ func TestManager_Start(t *testing.T) { fanOut(3, func(node raftID) { require.NoError(t, cluster.nodes[node].manager.Start()) + + storage := cluster.nodes[node].manager.firstStorage + require.Equal(t, storage.id.ToUint64(), storage.persistedInfo.StorageId) + require.Equal(t, storage.name, storage.persistedInfo.Name) + require.Equal(t, uint64(3), storage.persistedInfo.ReplicationFactor) + require.Equal(t, node.ToUint64(), storage.persistedInfo.NodeId) }) for _, node := range cluster.nodes { @@ -286,9 +314,10 @@ func TestManager_Start(t *testing.T) { require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(4), clusterInfo.NextStorageId) - require.Equal(t, &gitalypb.Storage{ - StorageId: mgr.firstStorage.id.ToUint64(), Name: mgr.firstStorage.name, ReplicationFactor: 3, - }, clusterInfo.Storages[mgr.firstStorage.id.ToUint64()]) + expectedInfo := &gitalypb.Storage{ + StorageId: mgr.firstStorage.id.ToUint64(), Name: mgr.firstStorage.name, ReplicationFactor: 3, NodeId: node.ToUint64(), + } + testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[mgr.firstStorage.id.ToUint64()]) }) }) @@ -300,6 +329,12 @@ func TestManager_Start(t *testing.T) { fanOut(3, func(node raftID) { require.NoError(t, cluster.nodes[node].manager.Start()) + + storage := cluster.nodes[node].manager.firstStorage + require.Equal(t, storage.id.ToUint64(), storage.persistedInfo.StorageId) + require.Equal(t, storage.name, storage.persistedInfo.Name) + require.Equal(t, uint64(3), storage.persistedInfo.ReplicationFactor) + require.Equal(t, node.ToUint64(), storage.persistedInfo.NodeId) }) for _, node := range cluster.nodes { @@ -319,9 +354,10 @@ func TestManager_Start(t *testing.T) { require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(4), clusterInfo.NextStorageId) - require.Equal(t, &gitalypb.Storage{ - StorageId: mgr.firstStorage.id.ToUint64(), Name: mgr.firstStorage.name, ReplicationFactor: 3, - }, clusterInfo.Storages[mgr.firstStorage.id.ToUint64()]) + expectedInfo := &gitalypb.Storage{ + StorageId: mgr.firstStorage.id.ToUint64(), Name: mgr.firstStorage.name, ReplicationFactor: 3, NodeId: node.ToUint64(), + } + testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[mgr.firstStorage.id.ToUint64()]) }) }) diff --git a/internal/gitaly/storage/raft/metadata_group.go b/internal/gitaly/storage/raft/metadata_group.go index e229f15f41..d0395667ae 100644 --- a/internal/gitaly/storage/raft/metadata_group.go +++ b/internal/gitaly/storage/raft/metadata_group.go @@ -157,35 +157,35 @@ func (g *metadataRaftGroup) tryBootstrap() (*gitalypb.Cluster, error) { // RegisterStorage requests the metadata group to allocate a unique ID for a new storage. The caller // is expected to persist the newly allocated ID. This ID is used for future interactions with the // Raft cluster. The storage name must be unique cluster-wide. -func (g *metadataRaftGroup) RegisterStorage(storageName string) (raftID, error) { +func (g *metadataRaftGroup) RegisterStorage(storageName string) (*gitalypb.Storage, error) { storageName = strings.TrimSpace(storageName) cluster, err := g.ClusterInfo() if err != nil { - return 0, err + return nil, err } for _, storage := range cluster.Storages { if storage.GetName() == storageName { - return 0, fmt.Errorf("storage %q already registered", storageName) + return nil, fmt.Errorf("storage %q already registered", storageName) } } result, response, err := g.requestRegisterStorage(storageName, g.clusterConfig) if err != nil { - return 0, fmt.Errorf("registering storage: %w", err) + return nil, fmt.Errorf("registering storage: %w", err) } switch result { case resultRegisterStorageSuccessful: - return raftID(response.GetStorage().GetStorageId()), nil + return response.GetStorage(), nil case resultStorageAlreadyRegistered: // There's a chance that storage is registered by another node while firing this request. We // have no choice but reject this request. - return 0, fmt.Errorf("storage %q already registered", storageName) + return nil, fmt.Errorf("storage %q already registered", storageName) case resultRegisterStorageClusterNotBootstrappedYet: // Extremely rare occasion. This case occurs when the cluster information is wiped out of // the metadata group when the register storage request is in-flight. - return 0, fmt.Errorf("cluster has not been bootstrapped") + return nil, fmt.Errorf("cluster has not been bootstrapped") default: - return 0, fmt.Errorf("unsupported update result: %d", result) + return nil, fmt.Errorf("unsupported update result: %d", result) } } diff --git a/internal/gitaly/storage/raft/metadata_group_test.go b/internal/gitaly/storage/raft/metadata_group_test.go index 2f7a8ed9c2..100d185a52 100644 --- a/internal/gitaly/storage/raft/metadata_group_test.go +++ b/internal/gitaly/storage/raft/metadata_group_test.go @@ -280,9 +280,14 @@ func TestMetadataGroup_RegisterStorage(t *testing.T) { groups := bootstrapCluster(t, cluster, ptnMgr) for i := raftID(1); i <= 3; i++ { - id, err := groups[i].RegisterStorage(fmt.Sprintf("storage-%d", 2*i)) + info, err := groups[i].RegisterStorage(fmt.Sprintf("storage-%d", 2*i)) require.NoError(t, err) - require.Equal(t, i, id) + require.Equal(t, &gitalypb.Storage{ + StorageId: uint64(i), + Name: fmt.Sprintf("storage-%d", 2*i), + ReplicationFactor: 3, + NodeId: i.ToUint64(), + }, info) } for i := raftID(1); i <= 3; i++ { @@ -310,9 +315,14 @@ func TestMetadataGroup_RegisterStorage(t *testing.T) { ptnMgr := setupTestPartitionManager(t, cfg) groups := bootstrapCluster(t, cluster, ptnMgr) - id, err := groups[1].RegisterStorage("storage-1") + info, err := groups[1].RegisterStorage("storage-1") require.NoError(t, err) - require.Equal(t, raftID(1), id) + require.Equal(t, &gitalypb.Storage{ + StorageId: 1, + Name: "storage-1", + ReplicationFactor: 3, + NodeId: 1, + }, info) _, err = groups[2].RegisterStorage("storage-1") require.EqualError(t, err, "storage \"storage-1\" already registered") diff --git a/internal/gitaly/storage/raft/storage.go b/internal/gitaly/storage/raft/storage.go index e6acefdb15..ed0f1f1a79 100644 --- a/internal/gitaly/storage/raft/storage.go +++ b/internal/gitaly/storage/raft/storage.go @@ -9,6 +9,8 @@ import ( "github.com/lni/dragonboat/v4" "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/keyvalue" "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/storagemgr" + "gitlab.com/gitlab-org/gitaly/v16/proto/go/gitalypb" + "google.golang.org/protobuf/proto" ) type dbAccessor func(context.Context, bool, func(keyvalue.ReadWriter) error) error @@ -17,10 +19,11 @@ type dbAccessor func(context.Context, bool, func(keyvalue.ReadWriter) error) err // keyvalue.Transactioner for each Raft group, allowing the Raft groups to store their data in the // underlying keyvalue store. type storageManager struct { - id raftID - name string - ptnMgr *storagemgr.PartitionManager - nodeHost *dragonboat.NodeHost + id raftID + name string + ptnMgr *storagemgr.PartitionManager + nodeHost *dragonboat.NodeHost + persistedInfo *gitalypb.Storage } // newStorageManager returns an instance of storage manager. @@ -35,10 +38,10 @@ func newStorageManager(name string, ptnMgr *storagemgr.PartitionManager, nodeHos // Close closes the storage manager. func (m *storageManager) Close() { m.nodeHost.Close() } -func (m *storageManager) loadStorageID(ctx context.Context) error { +func (m *storageManager) loadStorageInfo(ctx context.Context) error { db := m.dbForStorage() - return db(ctx, true, func(txn keyvalue.ReadWriter) error { - item, err := txn.Get([]byte("storage_id")) + return db(ctx, false, func(txn keyvalue.ReadWriter) error { + item, err := txn.Get([]byte("storage")) if err != nil { if errors.Is(err, badger.ErrKeyNotFound) { return nil @@ -46,32 +49,45 @@ func (m *storageManager) loadStorageID(ctx context.Context) error { return err } return item.Value(func(value []byte) error { - m.id.UnmarshalBinary(value) + var persistedInfo gitalypb.Storage + if err := proto.Unmarshal(value, &persistedInfo); err != nil { + return err + } + m.persistedInfo = &persistedInfo + m.id = raftID(m.persistedInfo.StorageId) return nil }) }) } -func (m *storageManager) saveStorageID(ctx context.Context, id raftID) error { +func (m *storageManager) saveStorageInfo(ctx context.Context, storage *gitalypb.Storage) error { db := m.dbForStorage() return db(ctx, false, func(txn keyvalue.ReadWriter) error { - _, err := txn.Get([]byte("storage_id")) + _, err := txn.Get([]byte("storage")) if err == nil { - return fmt.Errorf("storage ID already exists") + return fmt.Errorf("storage already exists") } else if !errors.Is(err, badger.ErrKeyNotFound) { return err } - if err := txn.Set([]byte("storage_id"), id.MarshalBinary()); err != nil { + marshaled, err := proto.Marshal(storage) + if err != nil { + return err + } + if err := txn.Set([]byte("storage"), marshaled); err != nil { return err } - m.id = id + m.persistedInfo = storage + m.id = raftID(m.persistedInfo.StorageId) return nil }) } -// clearStorageID clears the storage ID inside the in-memory storage of the storage manager. It does -// not clean the underlying storage ID. -func (m *storageManager) clearStorageID() { m.id = 0 } +// clearStorageInfo clears the storage info inside the in-memory storage of the storage manager. It +// does not clean the persisted info the DB. +func (m *storageManager) clearStorageInfo() { + m.id = 0 + m.persistedInfo = nil +} func (m *storageManager) dbForStorage() dbAccessor { return func(ctx context.Context, readOnly bool, fn func(keyvalue.ReadWriter) error) error { -- GitLab From d7e3ff94db521d16728119f47e7f2cfb6c087dc2 Mon Sep 17 00:00:00 2001 From: Quang-Minh Nguyen Date: Sat, 13 Jul 2024 17:28:07 +0700 Subject: [PATCH 04/17] raft: Implement a deterministic ring-based replica placement strategy This commit implements a deterministic replica placement. This strategy appoints a set of replica groups for a particular storage. Data of a storage are replicated to some certain amount of storages (defined by its replication factor) next to it in the ring. The order of the storage is determined by the storage ID, allocated by the metadata Raft group. For example: - A cluster of 3 nodes has the following replica placement: { storage-1 => [storage-2, storage-3], storage-2 => [storage-3, storage-1], storage-3 => [storage-1, storage-2], } - A cluster of 5 nodes has the following replica placement: { storage-1 => [storage-2, storage-3], storage-2 => [storage-3, storage-4], storage-3 => [storage-4, storage-5], storage-4 => [storage-5, storage-1], storage-5 => [storage-1, storage-2], } - If storage-4 is decommissioned: { storage-1 => [storage-2, storage-3], storage-2 => [storage-3, storage-5], storage-3 => [storage-5, storage-1], storage-5 => [storage-1, storage-2], } The strategy takes the physical storage residence into account. It means two or more storages of a same node don't replicate to each other. Although we limit one storage per node at this point, this coverage saves us some headaches later. --- .../gitaly/storage/raft/replica_placement.go | 73 ++++++ .../storage/raft/replica_placement_test.go | 233 ++++++++++++++++++ 2 files changed, 306 insertions(+) create mode 100644 internal/gitaly/storage/raft/replica_placement.go create mode 100644 internal/gitaly/storage/raft/replica_placement_test.go diff --git a/internal/gitaly/storage/raft/replica_placement.go b/internal/gitaly/storage/raft/replica_placement.go new file mode 100644 index 0000000000..38daf57e4d --- /dev/null +++ b/internal/gitaly/storage/raft/replica_placement.go @@ -0,0 +1,73 @@ +package raft + +import ( + "slices" + + "gitlab.com/gitlab-org/gitaly/v16/proto/go/gitalypb" +) + +// replicaPlacement is an interface for a function that appoint replicas for +// all storages of a Raft cluster. +type replicaPlacement interface { + apply(map[uint64]*gitalypb.Storage) +} + +// simpleRingReplicaPlacement implements a deterministic replica placement. It implements a simple +// ring-based placement strategy where each storage is assigned replicas based on the next storage +// nodes in the ring, wrapping around if necessary. This approach ensures a balanced distribution of +// replicas across the available storage nodes. Storages are allowed to have different replication +// factors, the default value is 3. +// +// For example, consider a scenario with 5 storages: 1, 2, 3, 4, 5. The replica placement will be: +// - Storage 1 (authority), replicas on storage 2 and storage 3 +// - Storage 2 (authority), replicas on storage 3 and storage 4 +// - Storage 3 (authority): replicas on storage 4 and storage 5 +// - Storage 4 (authority): replicas on storage 5 and storage 1 +// - Storage 5 (authority): replicas on storage 1 and storage 2 +// +// If the replication factor is more than the number of nodes, the strategy does its best to fill in +// the gaps. For example, with a replication factor of 5 and there are 3 storages, the replica +// placement will be: +// - Storage 1 (authority): replicas on storage 2 and storage 3 +// - Storage 2 (authority): replicas on storage 3 and storage 1 +// - Storage 3 (authority): replicas on storage 1 and storage 2 +// +// This strategy also takes storage residence into account. It means storages residing on the same +// nodes don't replicate to each other. +// +// The storages are not necessarily contiguous. Replication factor of 0 or 1 means that the storage +// does not replicate. +type simpleRingReplicaPlacement struct{} + +func (*simpleRingReplicaPlacement) apply(storages map[uint64]*gitalypb.Storage) { + var ids []uint64 + for id := range storages { + ids = append(ids, id) + } + slices.Sort(ids) + + for i := range ids { + // Reset replica groups. + storage := storages[ids[i]] + storage.ReplicaGroups = []uint64{} + j := i + for k := storage.ReplicationFactor - 1; k >= 1; k-- { + for { + j = (j + 1) % len(ids) + // Ensure the other storage is not on the same node. + if j == i || storages[ids[j]].GetNodeId() != storage.GetNodeId() { + break + } + } + // Reach the examining storage. It means there are less eligible nodes than needed. + if j == i { + break + } + storage.ReplicaGroups = append(storage.ReplicaGroups, ids[j]) + } + } +} + +func newSimpleRingReplicaPlacement() replicaPlacement { + return &simpleRingReplicaPlacement{} +} diff --git a/internal/gitaly/storage/raft/replica_placement_test.go b/internal/gitaly/storage/raft/replica_placement_test.go new file mode 100644 index 0000000000..c510f3d51b --- /dev/null +++ b/internal/gitaly/storage/raft/replica_placement_test.go @@ -0,0 +1,233 @@ +package raft + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "gitlab.com/gitlab-org/gitaly/v16/proto/go/gitalypb" +) + +func TestSimpleRingReplicaPlacement(t *testing.T) { + t.Parallel() + + t.Run("0 storage", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{} + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{}, storages) + }) + + t.Run("1 storage", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{}}, + }, storages) + }) + + t.Run("2 storages", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{1}}, + }, storages) + }) + + t.Run("3 storages", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 1}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) + + t.Run("more than 3 storages", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 4}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 5}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 4}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{4, 5}}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 4, ReplicaGroups: []uint64{5, 1}}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 5, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) + + t.Run("more than 3 storages in random order", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 4}, + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 5}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 4}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{4, 5}}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 4, ReplicaGroups: []uint64{5, 1}}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 5, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) + + t.Run("discontinued eligible storages", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 5}, + 6: {StorageId: 6, Name: "storage-6", ReplicationFactor: 3, NodeId: 6}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 5}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{5, 6}}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 5, ReplicaGroups: []uint64{6, 1}}, + 6: {StorageId: 6, Name: "storage-6", ReplicationFactor: 3, NodeId: 6, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) + t.Run("storages residing on the same node", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 1}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 2}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 2}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 3}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{3, 4}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{3, 4}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{5, 1}}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{5, 1}}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) + + t.Run("storages have different replication factors", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 5, NodeId: 2}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 4}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 1, NodeId: 5}, + 6: {StorageId: 6, Name: "storage-6", ReplicationFactor: 1, NodeId: 6}, + 7: {StorageId: 7, Name: "storage-7", ReplicationFactor: 3, NodeId: 7}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 5, NodeId: 2, ReplicaGroups: []uint64{3, 4, 5, 6}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{4, 5}}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 4, ReplicaGroups: []uint64{5, 6}}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 1, NodeId: 5, ReplicaGroups: []uint64{}}, + 6: {StorageId: 6, Name: "storage-6", ReplicationFactor: 1, NodeId: 6, ReplicaGroups: []uint64{}}, + 7: {StorageId: 7, Name: "storage-7", ReplicationFactor: 3, NodeId: 7, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) + + t.Run("fixup existing replica groups", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + // Result of assigning replica groups when there are 2 storages. + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{1}}, + // This one is new. + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 1}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) + + t.Run("add new storages", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + } + for i := uint64(2); i <= 7; i++ { + storages[i] = &gitalypb.Storage{ + StorageId: i, + Name: fmt.Sprintf("storage-%d", i), + ReplicationFactor: 3, + NodeId: i, + } + newSimpleRingReplicaPlacement().apply(storages) + } + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 4}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{4, 5}}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 4, ReplicaGroups: []uint64{5, 6}}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 5, ReplicaGroups: []uint64{6, 7}}, + 6: {StorageId: 6, Name: "storage-6", ReplicationFactor: 3, NodeId: 6, ReplicaGroups: []uint64{7, 1}}, + 7: {StorageId: 7, Name: "storage-7", ReplicationFactor: 3, NodeId: 7, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) + + t.Run("remove storages", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 4}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{4, 5}}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 4, ReplicaGroups: []uint64{5, 6}}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 5, ReplicaGroups: []uint64{6, 7}}, + 6: {StorageId: 6, Name: "storage-6", ReplicationFactor: 3, NodeId: 6, ReplicaGroups: []uint64{7, 1}}, + 7: {StorageId: 7, Name: "storage-7", ReplicationFactor: 3, NodeId: 7, ReplicaGroups: []uint64{1, 2}}, + } + for i := uint64(4); i <= 7; i++ { + delete(storages, i) + newSimpleRingReplicaPlacement().apply(storages) + } + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 1}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) +} -- GitLab From 37ba9cfb6847322f080ba3235b15cc0becae79e3 Mon Sep 17 00:00:00 2001 From: Quang-Minh Nguyen Date: Sat, 13 Jul 2024 17:45:47 +0700 Subject: [PATCH 05/17] raft: Apply default replica placement strategy in metadata Raft group A prior commit implements a ring-based replica placement strategy. This commit applies that strategy in the statemachine of the metadata Raft group. This approach ensures all members of the metadata Raft group yield the same replica groups for the cluster. Due to the nondeterministic characteristics of a Raft cluster, a node might have a different review point between its persistent state vs the actual state of the cluster. For example, consider this scenario. At the time storage A registers, there is no other storage, hence it has no replica groups. Right after storage A persists its empty replica groups, new registration from storage B changes the replica group list of A in the cluster. Storage A is not aware of this change. We don't try to address that gap now. Any change of a storage's replica group is expensive. It might lead to massive data replication or deletion on a node. So, we'll defer handling replica group changes to later. --- internal/gitaly/storage/raft/manager_test.go | 52 ++++++++++-- .../storage/raft/metadata_group_test.go | 12 ++- .../storage/raft/metadata_statemachine.go | 19 +++-- .../raft/metadata_statemachine_test.go | 80 +++++++++++++++++-- .../gitaly/storage/raft/replica_placement.go | 6 ++ 5 files changed, 146 insertions(+), 23 deletions(-) diff --git a/internal/gitaly/storage/raft/manager_test.go b/internal/gitaly/storage/raft/manager_test.go index 42c0e3cc4e..70a4964907 100644 --- a/internal/gitaly/storage/raft/manager_test.go +++ b/internal/gitaly/storage/raft/manager_test.go @@ -22,6 +22,21 @@ func managerTestConfig(bootstrap bool) ManagerConfig { func TestManager_Start(t *testing.T) { t.Parallel() + replicaGroups := func(i raftID, n uint64) []uint64 { + wrap := func(i raftID) uint64 { + if i.ToUint64() <= n { + return i.ToUint64() + } + return i.ToUint64() % n + } + if n == 1 { + return []uint64{} + } else if n == 2 { + return []uint64{wrap(i + 1)} + } + return []uint64{wrap(i + 1), wrap(i + 2)} + } + startManager := func(t *testing.T) nodeStarter { return func(cluster *testRaftCluster, node raftID) (*testNode, error) { ctx := testhelper.Context(t) @@ -124,7 +139,11 @@ func TestManager_Start(t *testing.T) { require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(numNode+1), clusterInfo.NextStorageId) expectedInfo := &gitalypb.Storage{ - StorageId: storage.id.ToUint64(), Name: storage.name, ReplicationFactor: 3, NodeId: node.ToUint64(), + StorageId: storage.id.ToUint64(), + Name: storage.name, + ReplicationFactor: 3, + NodeId: node.ToUint64(), + ReplicaGroups: replicaGroups(storage.id, uint64(numNode)), } testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[storage.id.ToUint64()]) }) @@ -163,12 +182,17 @@ func TestManager_Start(t *testing.T) { storage := mgr.firstStorage clusterInfo, err := mgr.ClusterInfo() + fmt.Printf("%+v %+v\n", node, clusterInfo) require.NoError(t, err) require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(3), clusterInfo.NextStorageId) expectedInfo := &gitalypb.Storage{ - StorageId: storage.id.ToUint64(), Name: storage.name, ReplicationFactor: 3, NodeId: node.ToUint64(), + StorageId: storage.id.ToUint64(), + Name: storage.name, + ReplicationFactor: 3, + NodeId: node.ToUint64(), + ReplicaGroups: replicaGroups(storage.id, 2), } testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[storage.id.ToUint64()]) }) @@ -187,7 +211,11 @@ func TestManager_Start(t *testing.T) { require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(4), clusterInfo.NextStorageId) expectedInfo := &gitalypb.Storage{ - StorageId: storage.id.ToUint64(), Name: storage.name, ReplicationFactor: 3, NodeId: node.ToUint64(), + StorageId: storage.id.ToUint64(), + Name: storage.name, + ReplicationFactor: 3, + NodeId: node.ToUint64(), + ReplicaGroups: replicaGroups(storage.id, 3), } testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[storage.id.ToUint64()]) }) @@ -275,7 +303,11 @@ func TestManager_Start(t *testing.T) { require.Equal(t, uint64(3), clusterInfo.NextStorageId) expectedInfo := &gitalypb.Storage{ - StorageId: storage.id.ToUint64(), Name: storage.name, ReplicationFactor: 3, NodeId: node.ToUint64(), + StorageId: storage.id.ToUint64(), + Name: storage.name, + ReplicationFactor: 3, + NodeId: node.ToUint64(), + ReplicaGroups: replicaGroups(storage.id, 2), } testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[storage.id.ToUint64()]) } @@ -315,7 +347,11 @@ func TestManager_Start(t *testing.T) { require.Equal(t, uint64(4), clusterInfo.NextStorageId) expectedInfo := &gitalypb.Storage{ - StorageId: mgr.firstStorage.id.ToUint64(), Name: mgr.firstStorage.name, ReplicationFactor: 3, NodeId: node.ToUint64(), + StorageId: mgr.firstStorage.id.ToUint64(), + Name: mgr.firstStorage.name, + ReplicationFactor: 3, + NodeId: node.ToUint64(), + ReplicaGroups: replicaGroups(mgr.firstStorage.id, 3), } testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[mgr.firstStorage.id.ToUint64()]) }) @@ -355,7 +391,11 @@ func TestManager_Start(t *testing.T) { require.Equal(t, uint64(4), clusterInfo.NextStorageId) expectedInfo := &gitalypb.Storage{ - StorageId: mgr.firstStorage.id.ToUint64(), Name: mgr.firstStorage.name, ReplicationFactor: 3, NodeId: node.ToUint64(), + StorageId: mgr.firstStorage.id.ToUint64(), + Name: mgr.firstStorage.name, + ReplicationFactor: 3, + NodeId: node.ToUint64(), + ReplicaGroups: replicaGroups(mgr.firstStorage.id, 3), } testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[mgr.firstStorage.id.ToUint64()]) }) diff --git a/internal/gitaly/storage/raft/metadata_group_test.go b/internal/gitaly/storage/raft/metadata_group_test.go index 100d185a52..c0939cdc0b 100644 --- a/internal/gitaly/storage/raft/metadata_group_test.go +++ b/internal/gitaly/storage/raft/metadata_group_test.go @@ -279,6 +279,11 @@ func TestMetadataGroup_RegisterStorage(t *testing.T) { ptnMgr := setupTestPartitionManager(t, cfg) groups := bootstrapCluster(t, cluster, ptnMgr) + expectedReplicaGroups := [][]uint64{ + nil, // When node-1 is registered, there is no other storages + {1}, // When node-2 is registered, only node-1 is eligible. + {1, 2}, // When node-3 is registered, both node-1 and node-2 are eligible. + } for i := raftID(1); i <= 3; i++ { info, err := groups[i].RegisterStorage(fmt.Sprintf("storage-%d", 2*i)) require.NoError(t, err) @@ -287,6 +292,7 @@ func TestMetadataGroup_RegisterStorage(t *testing.T) { Name: fmt.Sprintf("storage-%d", 2*i), ReplicationFactor: 3, NodeId: i.ToUint64(), + ReplicaGroups: expectedReplicaGroups[i-1], }, info) } @@ -297,9 +303,9 @@ func TestMetadataGroup_RegisterStorage(t *testing.T) { ClusterId: cluster.clusterID, NextStorageId: 4, Storages: map[uint64]*gitalypb.Storage{ - 1: {StorageId: 1, Name: "storage-2", ReplicationFactor: 3, NodeId: 1}, - 2: {StorageId: 2, Name: "storage-4", ReplicationFactor: 3, NodeId: 2}, - 3: {StorageId: 3, Name: "storage-6", ReplicationFactor: 3, NodeId: 3}, + 1: {StorageId: 1, Name: "storage-2", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-4", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 1}}, + 3: {StorageId: 3, Name: "storage-6", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{1, 2}}, }, }, clusterInfo) } diff --git a/internal/gitaly/storage/raft/metadata_statemachine.go b/internal/gitaly/storage/raft/metadata_statemachine.go index 327a6c45d9..9a0c3a14bb 100644 --- a/internal/gitaly/storage/raft/metadata_statemachine.go +++ b/internal/gitaly/storage/raft/metadata_statemachine.go @@ -14,10 +14,11 @@ import ( ) type metadataStateMachine struct { - ctx context.Context - groupID raftID - replicaID raftID - accessDB dbAccessor + ctx context.Context + groupID raftID + replicaID raftID + accessDB dbAccessor + replicaPlacement replicaPlacement } const ( @@ -217,6 +218,7 @@ func (s *metadataStateMachine) handleRegisterStorageRequest(req *gitalypb.Regist } cluster.Storages[cluster.NextStorageId] = newStorage cluster.NextStorageId++ + s.replicaPlacement.apply(cluster.Storages) response, err := anyProtoMarshal(&gitalypb.RegisterStorageResponse{Storage: newStorage}) if err != nil { @@ -296,9 +298,10 @@ var _ = Statemachine(&metadataStateMachine{}) func newMetadataStatemachine(ctx context.Context, groupID raftID, replicaID raftID, accessDB dbAccessor) *metadataStateMachine { return &metadataStateMachine{ - ctx: ctx, - groupID: groupID, - replicaID: replicaID, - accessDB: accessDB, + ctx: ctx, + groupID: groupID, + replicaID: replicaID, + accessDB: accessDB, + replicaPlacement: newDefaultReplicaPlacement(), } } diff --git a/internal/gitaly/storage/raft/metadata_statemachine_test.go b/internal/gitaly/storage/raft/metadata_statemachine_test.go index 4871ccf3cc..4c390dbdf1 100644 --- a/internal/gitaly/storage/raft/metadata_statemachine_test.go +++ b/internal/gitaly/storage/raft/metadata_statemachine_test.go @@ -216,7 +216,7 @@ func TestMetadataStateMachine_Update(t *testing.T) { t.Run("register a new storage", func(t *testing.T) { t.Parallel() - cfg := testcfg.Build(t, testcfg.WithStorages("storage-1", "storage-2")) + cfg := testcfg.Build(t, testcfg.WithStorages("storage-1", "storage-2", "storage-3", "storage-4", "storage-5")) ctx := testhelper.Context(t) ptnMgr := setupTestPartitionManager(t, cfg) @@ -230,9 +230,16 @@ func TestMetadataStateMachine_Update(t *testing.T) { result, err := sm.Update([]statemachine.Entry{ {Index: 2, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name, ReplicationFactor: 3, NodeId: 1})}, {Index: 3, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[1].Name, ReplicationFactor: 5, NodeId: 2})}, + {Index: 4, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[2].Name, ReplicationFactor: 3, NodeId: 3})}, + {Index: 5, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[3].Name, ReplicationFactor: 3, NodeId: 4})}, + {Index: 6, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[4].Name, ReplicationFactor: 3, NodeId: 5})}, }) require.NoError(t, err) - require.Equal(t, []statemachine.Entry{ + + // Remember, storage registration is supposed to be distributed and async. At the time first + // update finishes, the second might not have arrived. So, an update's returned data + // consists of the changes of the time that update is processed only. + testhelper.ProtoEqual(t, []statemachine.Entry{ {Index: 2, Result: statemachine.Result{ Value: uint64(resultRegisterStorageSuccessful), Data: wrapSMMessage(t, &gitalypb.RegisterStorageResponse{ @@ -252,27 +259,88 @@ func TestMetadataStateMachine_Update(t *testing.T) { Name: cfg.Storages[1].Name, ReplicationFactor: 5, NodeId: 2, + ReplicaGroups: []uint64{1}, + }, + }), + }}, + {Index: 4, Result: statemachine.Result{ + Value: uint64(resultRegisterStorageSuccessful), + Data: wrapSMMessage(t, &gitalypb.RegisterStorageResponse{ + Storage: &gitalypb.Storage{ + StorageId: 3, + Name: cfg.Storages[2].Name, + ReplicationFactor: 3, + NodeId: 3, + ReplicaGroups: []uint64{1, 2}, + }, + }), + }}, + {Index: 5, Result: statemachine.Result{ + Value: uint64(resultRegisterStorageSuccessful), + Data: wrapSMMessage(t, &gitalypb.RegisterStorageResponse{ + Storage: &gitalypb.Storage{ + StorageId: 4, + Name: cfg.Storages[3].Name, + ReplicationFactor: 3, + NodeId: 4, + ReplicaGroups: []uint64{1, 2}, + }, + }), + }}, + {Index: 6, Result: statemachine.Result{ + Value: uint64(resultRegisterStorageSuccessful), + Data: wrapSMMessage(t, &gitalypb.RegisterStorageResponse{ + Storage: &gitalypb.Storage{ + StorageId: 5, + Name: cfg.Storages[4].Name, + ReplicationFactor: 3, + NodeId: 5, + ReplicaGroups: []uint64{1, 2}, }, }), }}, }, result) - requireLastApplied(t, sm, 3) + requireLastApplied(t, sm, 6) + // The final state of the statemachine does have latest replica groups. requireClusterState(t, sm, &gitalypb.Cluster{ ClusterId: "1234", - NextStorageId: 3, + NextStorageId: 6, Storages: map[uint64]*gitalypb.Storage{ 1: { StorageId: 1, Name: cfg.Storages[0].Name, ReplicationFactor: 3, NodeId: 1, + ReplicaGroups: []uint64{2, 3}, }, 2: { StorageId: 2, Name: cfg.Storages[1].Name, ReplicationFactor: 5, NodeId: 2, + ReplicaGroups: []uint64{3, 4, 5, 1}, + }, + 3: { + StorageId: 3, + Name: cfg.Storages[2].Name, + ReplicationFactor: 3, + NodeId: 3, + ReplicaGroups: []uint64{4, 5}, + }, + 4: { + StorageId: 4, + Name: cfg.Storages[3].Name, + ReplicationFactor: 3, + NodeId: 4, + ReplicaGroups: []uint64{5, 1}, + }, + 5: { + StorageId: 5, + Name: cfg.Storages[4].Name, + ReplicationFactor: 3, + NodeId: 5, + ReplicaGroups: []uint64{1, 2}, }, }, }) @@ -483,8 +551,8 @@ func TestMetadataStateMachine_Lookup(t *testing.T) { ClusterId: "1234", NextStorageId: 3, Storages: map[uint64]*gitalypb.Storage{ - 1: {StorageId: 1, Name: cfg.Storages[0].Name, ReplicationFactor: 3, NodeId: 1}, - 2: {StorageId: 2, Name: cfg.Storages[1].Name, ReplicationFactor: 5, NodeId: 2}, + 1: {StorageId: 1, Name: cfg.Storages[0].Name, ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2}}, + 2: {StorageId: 2, Name: cfg.Storages[1].Name, ReplicationFactor: 5, NodeId: 2, ReplicaGroups: []uint64{1}}, }, }}, response) }) diff --git a/internal/gitaly/storage/raft/replica_placement.go b/internal/gitaly/storage/raft/replica_placement.go index 38daf57e4d..a3b118dc4b 100644 --- a/internal/gitaly/storage/raft/replica_placement.go +++ b/internal/gitaly/storage/raft/replica_placement.go @@ -71,3 +71,9 @@ func (*simpleRingReplicaPlacement) apply(storages map[uint64]*gitalypb.Storage) func newSimpleRingReplicaPlacement() replicaPlacement { return &simpleRingReplicaPlacement{} } + +// newDefaultReplicaPlacement defines a factory that returns the default replica placements strategy +// used for determining replica groups. At the moment, Gitaly supports a simple ring-based placement +// strategy. When we involve replica placement strategy in the future, all members of the metadata +// Raft group must sync up to ensure they have the same replica placement strategy. +var newDefaultReplicaPlacement = newSimpleRingReplicaPlacement -- GitLab From 4cc1a5dc84021bb0f66e3e2bcf6c09a362c8a674 Mon Sep 17 00:00:00 2001 From: GitLab Renovate Bot Date: Tue, 16 Jul 2024 19:22:02 +0000 Subject: [PATCH 06/17] tools/dlv: Update module github.com/go-delve/delve to v1.23.0 --- tools/dlv/go.mod | 7 +++---- tools/dlv/go.sum | 14 ++++++-------- 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/tools/dlv/go.mod b/tools/dlv/go.mod index f32d952b82..d5f5058259 100644 --- a/tools/dlv/go.mod +++ b/tools/dlv/go.mod @@ -2,7 +2,7 @@ module gitlab.com/gitlab-org/gitaly/tools/dlv go 1.21 -require github.com/go-delve/delve v1.22.1 +require github.com/go-delve/delve v1.23.0 require ( github.com/cilium/ebpf v0.11.0 // indirect @@ -10,7 +10,7 @@ require ( github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/derekparker/trie v0.0.0-20230829180723-39f4de51ef7d // indirect github.com/go-delve/liner v1.2.3-0.20231231155935-4726ab1d7f62 // indirect - github.com/google/go-dap v0.11.0 // indirect + github.com/google/go-dap v0.12.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -24,7 +24,6 @@ require ( go.starlark.net v0.0.0-20231101134539-556fd59b42f6 // indirect golang.org/x/arch v0.6.0 // indirect golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 // indirect - golang.org/x/sys v0.13.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + golang.org/x/sys v0.17.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/tools/dlv/go.sum b/tools/dlv/go.sum index faecce1f02..185eac4806 100644 --- a/tools/dlv/go.sum +++ b/tools/dlv/go.sum @@ -13,14 +13,14 @@ github.com/derekparker/trie v0.0.0-20230829180723-39f4de51ef7d h1:hUWoLdw5kvo2xC github.com/derekparker/trie v0.0.0-20230829180723-39f4de51ef7d/go.mod h1:C7Es+DLenIpPc9J6IYw4jrK0h7S9bKj4DNl8+KxGEXU= github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/go-delve/delve v1.22.1 h1:LQSF2sv+lP3mmOzMkadl5HGQGgSS2bFg2tbyALqHu8Y= -github.com/go-delve/delve v1.22.1/go.mod h1:TfOb+G5H6YYKheZYAmA59ojoHbOimGfs5trbghHdLbM= +github.com/go-delve/delve v1.23.0 h1:jYgZISZ14KAO3ys8kD07kjrowrygE9F9SIwnpz9xXys= +github.com/go-delve/delve v1.23.0/go.mod h1:S3SLuEE2mn7wipKilTvk1p9HdTMnXXElcEpiZ+VcuqU= github.com/go-delve/liner v1.2.3-0.20231231155935-4726ab1d7f62 h1:IGtvsNyIuRjl04XAOFGACozgUD7A82UffYxZt4DWbvA= github.com/go-delve/liner v1.2.3-0.20231231155935-4726ab1d7f62/go.mod h1:biJCRbqp51wS+I92HMqn5H8/A0PAhxn2vyOT+JqhiGI= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-dap v0.11.0 h1:SpAZJL41rOOvd85PuLCCLE1dteTQOyKNnn0H3DBHywo= -github.com/google/go-dap v0.11.0/go.mod h1:HAeyoSd2WIfTfg+0GRXcFrb+RnojAtGNh+k+XTIxJDE= +github.com/google/go-dap v0.12.0 h1:rVcjv3SyMIrpaOoTAdFDyHs99CwVOItIJGKLQFQhNeM= +github.com/google/go-dap v0.12.0/go.mod h1:tNjCASCm5cqePi/RVXXWEVqtnNLV1KTWtYOqu6rZNzc= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -67,16 +67,14 @@ golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -- GitLab From e834abd31e357eb170294e913c3f125b88d7a175 Mon Sep 17 00:00:00 2001 From: Karthik Nayak Date: Mon, 15 Jul 2024 13:08:03 +0200 Subject: [PATCH 07/17] git: Move `ReferenceUpdate[s]` to the git package We define `ReferenceUpdate[s]` structs in the 'storagemgr', but these structs can be used beyond this package. Let's move them to the 'git' package where they ideologically belong. --- internal/git/reference.go | 17 ++ internal/gitaly/hook/manager.go | 2 +- internal/gitaly/hook/referencetransaction.go | 7 +- .../hook/reference_transaction_test.go | 13 +- .../storage/storagemgr/testhelper_test.go | 8 +- .../storage/storagemgr/transaction_manager.go | 39 ++-- .../transaction_manager_alternate_test.go | 6 +- .../transaction_manager_consumer_test.go | 18 +- ...transaction_manager_default_branch_test.go | 20 +-- .../transaction_manager_hook_test.go | 30 ++-- .../transaction_manager_housekeeping_test.go | 98 +++++------ .../transaction_manager_refs_test.go | 166 +++++++++--------- .../transaction_manager_repo_test.go | 10 +- .../storagemgr/transaction_manager_test.go | 64 +++---- 14 files changed, 248 insertions(+), 250 deletions(-) diff --git a/internal/git/reference.go b/internal/git/reference.go index e342c00630..c56343d863 100644 --- a/internal/git/reference.go +++ b/internal/git/reference.go @@ -9,6 +9,23 @@ import ( "strings" ) +// ReferenceUpdate describes the state of a reference's old and new tip in an update. +type ReferenceUpdate struct { + // OldOID is the old OID the reference is expected to point to prior to updating it. + // If the reference does not point to the old value, the reference verification fails. + OldOID ObjectID + // NewOID is the new desired OID to point the reference to. + NewOID ObjectID + // OldTarget is the expected target for a symbolic reference. + OldTarget ReferenceName + // NewTarget stores the desired target for a symbolic reference. + NewTarget ReferenceName +} + +// ReferenceUpdates contains references to update. Reference name is used as the key and the value +// is the expected old tip and the desired new tip. +type ReferenceUpdates map[ReferenceName]ReferenceUpdate + // InternalReferenceType is the type of an internal reference. type InternalReferenceType int diff --git a/internal/gitaly/hook/manager.go b/internal/gitaly/hook/manager.go index b137b6fca9..c130d003ce 100644 --- a/internal/gitaly/hook/manager.go +++ b/internal/gitaly/hook/manager.go @@ -56,7 +56,7 @@ type Manager interface { // Transaction is the interface of storagemgr.Transaction. It's used for mocking in the tests. type Transaction interface { RecordInitialReferenceValues(context.Context, map[git.ReferenceName]git.Reference) error - UpdateReferences(storagemgr.ReferenceUpdates) + UpdateReferences(git.ReferenceUpdates) Commit(context.Context) error OriginalRepository(*gitalypb.Repository) *gitalypb.Repository RewriteRepository(*gitalypb.Repository) *gitalypb.Repository diff --git a/internal/gitaly/hook/referencetransaction.go b/internal/gitaly/hook/referencetransaction.go index 420b1c444e..7df889829d 100644 --- a/internal/gitaly/hook/referencetransaction.go +++ b/internal/gitaly/hook/referencetransaction.go @@ -10,7 +10,6 @@ import ( "strings" "gitlab.com/gitlab-org/gitaly/v16/internal/git" - "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/storagemgr" "gitlab.com/gitlab-org/gitaly/v16/internal/transaction/voting" ) @@ -124,11 +123,11 @@ func (m *GitLabHookManager) ReferenceTransactionHook(ctx context.Context, state // parseChanges parses the changes from the reader. All updates to references lacking a 'refs/' prefix are ignored. These // are the various pseudo reference like ORIG_HEAD but also HEAD. See the documentation of the reference-transaction hook // for details on the format: https://git-scm.com/docs/githooks#_reference_transaction -func parseChanges(ctx context.Context, objectHash git.ObjectHash, changes io.Reader) (storagemgr.ReferenceUpdates, bool, error) { +func parseChanges(ctx context.Context, objectHash git.ObjectHash, changes io.Reader) (git.ReferenceUpdates, bool, error) { scanner := bufio.NewScanner(changes) defaultBranchUpdated := false - updates := storagemgr.ReferenceUpdates{} + updates := git.ReferenceUpdates{} for scanner.Scan() { line := scanner.Text() components := strings.Split(line, " ") @@ -144,7 +143,7 @@ func parseChanges(ctx context.Context, objectHash git.ObjectHash, changes io.Rea continue } - update := storagemgr.ReferenceUpdate{} + update := git.ReferenceUpdate{} var err error update.OldOID, err = objectHash.FromHex(components[0]) diff --git a/internal/gitaly/service/hook/reference_transaction_test.go b/internal/gitaly/service/hook/reference_transaction_test.go index 3805fa0395..e62b681bbc 100644 --- a/internal/gitaly/service/hook/reference_transaction_test.go +++ b/internal/gitaly/service/hook/reference_transaction_test.go @@ -13,7 +13,6 @@ import ( "gitlab.com/gitlab-org/gitaly/v16/internal/git/gittest" "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/hook" "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage" - "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/storagemgr" "gitlab.com/gitlab-org/gitaly/v16/internal/grpc/backchannel" "gitlab.com/gitlab-org/gitaly/v16/internal/structerr" "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper" @@ -35,12 +34,12 @@ func (m mockTransactionRegistry) Get(id storage.TransactionID) (hook.Transaction type mockTransaction struct { hook.Transaction - updateReferencesFunc func(storagemgr.ReferenceUpdates) + updateReferencesFunc func(git.ReferenceUpdates) recordInitialReferenceValues func(context.Context, map[git.ReferenceName]git.Reference) error markDefaultBranchUpdated func() } -func (m mockTransaction) UpdateReferences(updates storagemgr.ReferenceUpdates) { +func (m mockTransaction) UpdateReferences(updates git.ReferenceUpdates) { m.updateReferencesFunc(updates) } @@ -99,7 +98,7 @@ func TestReferenceTransactionHook(t *testing.T) { expectedErr error expectedResponse *gitalypb.ReferenceTransactionHookResponse expectedReftxHash []byte - expectedReferenceUpdates storagemgr.ReferenceUpdates + expectedReferenceUpdates git.ReferenceUpdates expectedInitialValues map[git.ReferenceName]git.Reference expectedDefaultBranchUpdated bool }{ @@ -167,7 +166,7 @@ func TestReferenceTransactionHook(t *testing.T) { }, }, expectedReftxHash: stdin, - expectedReferenceUpdates: storagemgr.ReferenceUpdates{ + expectedReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": { OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: gittest.DefaultObjectHash.EmptyTreeOID, @@ -268,13 +267,13 @@ func TestReferenceTransactionHook(t *testing.T) { }, nil } - var actualReferenceUpdates storagemgr.ReferenceUpdates + var actualReferenceUpdates git.ReferenceUpdates var actualInitialValues map[git.ReferenceName]git.Reference var defaultBranchUpdated bool txRegistry := mockTransactionRegistry{ getFunc: func(storage.TransactionID) (hook.Transaction, error) { return mockTransaction{ - updateReferencesFunc: func(updates storagemgr.ReferenceUpdates) { + updateReferencesFunc: func(updates git.ReferenceUpdates) { actualReferenceUpdates = updates }, recordInitialReferenceValues: func(_ context.Context, initialValues map[git.ReferenceName]git.Reference) error { diff --git a/internal/gitaly/storage/storagemgr/testhelper_test.go b/internal/gitaly/storage/storagemgr/testhelper_test.go index f3615cabff..3c44c203f3 100644 --- a/internal/gitaly/storage/storagemgr/testhelper_test.go +++ b/internal/gitaly/storage/storagemgr/testhelper_test.go @@ -718,7 +718,7 @@ type Commit struct { // SkipVerificationFailures sets the verification failure handling for this commit. SkipVerificationFailures bool // ReferenceUpdates are the reference updates to commit. - ReferenceUpdates ReferenceUpdates + ReferenceUpdates git.ReferenceUpdates // QuarantinedPacks are the packs to include in the quarantine directory of the transaction. QuarantinedPacks [][]byte // DefaultBranchUpdate is the default branch update to commit. @@ -748,7 +748,7 @@ type UpdateReferences struct { // TransactionID identifies the transaction to update references on. TransactionID int // ReferenceUpdates are the reference updates to make. - ReferenceUpdates ReferenceUpdates + ReferenceUpdates git.ReferenceUpdates } // SetKey calls SetKey on a transaction. @@ -908,7 +908,7 @@ type transactionTestCase struct { expectedState StateAssertion } -func performReferenceUpdates(t *testing.T, ctx context.Context, tx *Transaction, rewrittenRepo git.RepositoryExecutor, updates ReferenceUpdates) { +func performReferenceUpdates(t *testing.T, ctx context.Context, tx *Transaction, rewrittenRepo git.RepositoryExecutor, updates git.ReferenceUpdates) { tx.UpdateReferences(updates) updater, err := updateref.New(ctx, rewrittenRepo) @@ -1453,7 +1453,7 @@ func checkManagerError(t *testing.T, ctx context.Context, managerErrChannel chan t.Helper() testTransaction := &Transaction{ - referenceUpdates: []ReferenceUpdates{{"sentinel": {}}}, + referenceUpdates: []git.ReferenceUpdates{{"sentinel": {}}}, result: make(chan error, 1), finish: func() error { return nil }, } diff --git a/internal/gitaly/storage/storagemgr/transaction_manager.go b/internal/gitaly/storage/storagemgr/transaction_manager.go index feb004314c..6e386871d2 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager.go @@ -135,19 +135,6 @@ func (err ReferenceVerificationError) Error() string { return fmt.Sprintf("expected %q to point to %q but it pointed to %q", err.ReferenceName, err.ExpectedOldOID, err.ActualOldOID) } -// ReferenceUpdate describes the state of a reference's old and new tip in an update. -type ReferenceUpdate struct { - // OldOID is the old OID the reference is expected to point to prior to updating it. - // If the reference does not point to the old value, the reference verification fails. - OldOID git.ObjectID - // NewOID is the new desired OID to point the reference to. - NewOID git.ObjectID - // OldTarget is the expected target for a symbolic reference. - OldTarget git.ReferenceName - // NewTarget stores the desired target for a symbolic reference. - NewTarget git.ReferenceName -} - // repositoryCreation models a repository creation in a transaction. type repositoryCreation struct { // objectHash defines the object format the repository is created with. @@ -193,10 +180,6 @@ type writeCommitGraphs struct { config housekeepingcfg.WriteCommitGraphConfig } -// ReferenceUpdates contains references to update. Reference name is used as the key and the value -// is the expected old tip and the desired new tip. -type ReferenceUpdates map[git.ReferenceName]ReferenceUpdate - type transactionState int const ( @@ -291,7 +274,7 @@ type Transaction struct { walEntry *wal.Entry skipVerificationFailures bool initialReferenceValues map[git.ReferenceName]git.Reference - referenceUpdates []ReferenceUpdates + referenceUpdates []git.ReferenceUpdates defaultBranchUpdated bool customHooksUpdated bool repositoryCreation *repositoryCreation @@ -677,8 +660,8 @@ func (txn *Transaction) RecordInitialReferenceValues(ctx context.Context, initia // committed as 'oid-1 -> oid-3'. The old OIDs of the intermediate states are not verified when // committing the write to the actual repository and are discarded from the final committed log // entry. -func (txn *Transaction) UpdateReferences(updates ReferenceUpdates) { - u := ReferenceUpdates{} +func (txn *Transaction) UpdateReferences(updates git.ReferenceUpdates) { + u := git.ReferenceUpdates{} for reference, update := range updates { oldOID := update.OldOID @@ -704,7 +687,7 @@ func (txn *Transaction) UpdateReferences(updates ReferenceUpdates) { } } - u[reference] = ReferenceUpdate{ + u[reference] = git.ReferenceUpdate{ OldOID: oldOID, NewOID: update.NewOID, OldTarget: oldTarget, @@ -717,14 +700,14 @@ func (txn *Transaction) UpdateReferences(updates ReferenceUpdates) { } // flattenReferenceTransactions flattens the recorded reference transactions by dropping -// all intermediate states. The returned ReferenceUpdates contains the reference changes +// all intermediate states. The returned git.ReferenceUpdates contains the reference changes // with the OldOID set to the reference's value at the beginning of the transaction, and the // NewOID set to the reference's final value after all of the changes. -func (txn *Transaction) flattenReferenceTransactions() ReferenceUpdates { - flattenedUpdates := ReferenceUpdates{} +func (txn *Transaction) flattenReferenceTransactions() git.ReferenceUpdates { + flattenedUpdates := git.ReferenceUpdates{} for _, updates := range txn.referenceUpdates { for reference, update := range updates { - u := ReferenceUpdate{ + u := git.ReferenceUpdate{ OldOID: update.OldOID, NewOID: update.NewOID, OldTarget: update.OldTarget, @@ -1367,19 +1350,19 @@ func (mgr *TransactionManager) stageRepositoryCreation(ctx context.Context, tran return fmt.Errorf("get references: %w", err) } - referenceUpdates := make(ReferenceUpdates, len(references)) + referenceUpdates := make(git.ReferenceUpdates, len(references)) for _, ref := range references { if ref.IsSymbolic { return fmt.Errorf("unexpected symbolic ref: %v", ref) } - referenceUpdates[ref.Name] = ReferenceUpdate{ + referenceUpdates[ref.Name] = git.ReferenceUpdate{ OldOID: objectHash.ZeroOID, NewOID: git.ObjectID(ref.Target), } } - transaction.referenceUpdates = []ReferenceUpdates{referenceUpdates} + transaction.referenceUpdates = []git.ReferenceUpdates{referenceUpdates} var customHooks bytes.Buffer if err := repoutil.GetCustomHooks(ctx, mgr.logger, diff --git a/internal/gitaly/storage/storagemgr/transaction_manager_alternate_test.go b/internal/gitaly/storage/storagemgr/transaction_manager_alternate_test.go index 01abfc474a..d3fa3505b2 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager_alternate_test.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager_alternate_test.go @@ -398,7 +398,7 @@ func generateAlternateTests(t *testing.T, setup testTransactionSetup) []transact }, Commit{ TransactionID: 5, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, }, @@ -1134,7 +1134,7 @@ func generateAlternateTests(t *testing.T, setup testTransactionSetup) []transact }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1211,7 +1211,7 @@ func generateAlternateTests(t *testing.T, setup testTransactionSetup) []transact }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.Second.Pack}, diff --git a/internal/gitaly/storage/storagemgr/transaction_manager_consumer_test.go b/internal/gitaly/storage/storagemgr/transaction_manager_consumer_test.go index e50748178b..57aa4bac82 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager_consumer_test.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager_consumer_test.go @@ -32,7 +32,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -75,7 +75,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -118,7 +118,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -129,7 +129,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -172,7 +172,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -228,7 +228,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -242,7 +242,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/other": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, }, @@ -261,7 +261,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/third": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Third.OID}, }, }, @@ -314,7 +314,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, diff --git a/internal/gitaly/storage/storagemgr/transaction_manager_default_branch_test.go b/internal/gitaly/storage/storagemgr/transaction_manager_default_branch_test.go index bc9be0362e..36d1e838cc 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager_default_branch_test.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager_default_branch_test.go @@ -19,7 +19,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -63,7 +63,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -74,7 +74,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, @@ -108,7 +108,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran RelativePath: setup.RelativePath, }, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, DefaultBranchUpdate: &DefaultBranchUpdate{ @@ -142,7 +142,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/branch2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -154,7 +154,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch2": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, DefaultBranchUpdate: &DefaultBranchUpdate{ @@ -188,7 +188,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -200,7 +200,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, DefaultBranchUpdate: &DefaultBranchUpdate{ @@ -242,7 +242,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/branch2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -262,7 +262,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, diff --git a/internal/gitaly/storage/storagemgr/transaction_manager_hook_test.go b/internal/gitaly/storage/storagemgr/transaction_manager_hook_test.go index 337eed7c9e..c486fd2c0e 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager_hook_test.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager_hook_test.go @@ -210,13 +210,13 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, ExpectedError: ReferenceVerificationError{ @@ -233,7 +233,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Third.OID}, }, }, @@ -264,7 +264,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -278,7 +278,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -313,13 +313,13 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, ExpectedError: ReferenceVerificationError{ @@ -338,7 +338,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -376,7 +376,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: ErrTransactionProcessingStopped, @@ -392,7 +392,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -430,7 +430,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: ErrTransactionProcessingStopped, @@ -446,7 +446,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -484,7 +484,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: ErrTransactionProcessingStopped, @@ -505,13 +505,13 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Third.OID}, }, ExpectedError: ReferenceVerificationError{ diff --git a/internal/gitaly/storage/storagemgr/transaction_manager_housekeeping_test.go b/internal/gitaly/storage/storagemgr/transaction_manager_housekeeping_test.go index 5fd9951a6d..3f34523054 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager_housekeeping_test.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager_housekeeping_test.go @@ -78,7 +78,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -140,7 +140,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, "refs/heads/branch-3": {OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: setup.Commits.Diverging.OID}, }, @@ -191,7 +191,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/keep-around/1": {OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/merge-requests/1": {OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, "refs/very/deep/nested/ref": {OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: setup.Commits.Third.OID}, @@ -252,7 +252,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-3": {OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: setup.Commits.Diverging.OID}, "refs/keep-around/1": {OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -310,7 +310,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-3": {OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: setup.Commits.Diverging.OID}, "refs/keep-around/1": {OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -361,7 +361,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, "refs/heads/branch-1": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, "refs/heads/branch-2": {OldOID: setup.Commits.Third.OID, NewOID: setup.Commits.Diverging.OID}, @@ -421,7 +421,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, "refs/heads/branch-1": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, "refs/heads/branch-2": {OldOID: setup.Commits.Third.OID, NewOID: setup.Commits.Diverging.OID}, @@ -474,7 +474,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.Commits.Second.OID, NewOID: gittest.DefaultObjectHash.ZeroOID}, "refs/tags/v1.0.0": {OldOID: lightweightTag, NewOID: gittest.DefaultObjectHash.ZeroOID}, }, @@ -530,7 +530,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -611,7 +611,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -630,7 +630,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.Commits.First.OID, NewOID: gittest.DefaultObjectHash.ZeroOID}, }, }, @@ -690,7 +690,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.Commits.Second.OID, NewOID: gittest.DefaultObjectHash.ZeroOID}, "refs/tags/v1.0.0": {OldOID: lightweightTag, NewOID: gittest.DefaultObjectHash.ZeroOID}, }, @@ -726,7 +726,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/empty-dir/parent/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -820,7 +820,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, RunPackRefs{}, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, ExpectedError: errHousekeepingConflictOtherUpdates, @@ -1700,7 +1700,7 @@ func generateHousekeepingRepackingStrategyTests(t *testing.T, ctx context.Contex }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Third.OID, NewOID: setup.Commits.Second.OID}, }, ExpectedError: errHousekeepingConflictOtherUpdates, @@ -1787,7 +1787,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -1854,7 +1854,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -1885,7 +1885,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, }, QuarantinedPacks: [][]byte{ @@ -1942,7 +1942,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -1970,7 +1970,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, }, QuarantinedPacks: [][]byte{ @@ -2029,7 +2029,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -2057,7 +2057,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.First.OID}, }, }, @@ -2106,7 +2106,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{ @@ -2131,7 +2131,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -2145,7 +2145,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, }, QuarantinedPacks: [][]byte{ @@ -2159,7 +2159,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 5, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Third.OID, NewOID: setup.Commits.Diverging.OID}, }, QuarantinedPacks: [][]byte{ @@ -2231,7 +2231,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, @@ -2247,7 +2247,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.Commits.Second.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -2269,7 +2269,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -2318,7 +2318,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, @@ -2334,7 +2334,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.Commits.Second.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -2356,7 +2356,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -2404,7 +2404,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, @@ -2420,7 +2420,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.Commits.Second.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -2442,7 +2442,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -2496,7 +2496,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, IncludeObjects: []git.ObjectID{setup.Commits.Diverging.OID}, @@ -2517,13 +2517,13 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, UpdateReferences{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Diverging.OID}, }, }, UpdateReferences{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-2": {OldOID: setup.Commits.Diverging.OID, NewOID: setup.Commits.First.OID}, }, }, @@ -2602,7 +2602,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -2776,7 +2776,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Third.OID}, }, QuarantinedPacks: [][]byte{ @@ -2791,7 +2791,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.Commits.Third.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -2934,7 +2934,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Third.OID}, }, QuarantinedPacks: [][]byte{ @@ -2949,7 +2949,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.Commits.Third.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -3060,7 +3060,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -3074,7 +3074,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, }, QuarantinedPacks: [][]byte{ @@ -3223,7 +3223,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, "refs/heads/branch-2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Third.OID}, }, @@ -3496,7 +3496,7 @@ func generateHousekeepingCommitGraphsTests(t *testing.T, ctx context.Context, se }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -3653,7 +3653,7 @@ func generateHousekeepingCommitGraphsTests(t *testing.T, ctx context.Context, se }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -3700,7 +3700,7 @@ func generateHousekeepingCommitGraphsTests(t *testing.T, ctx context.Context, se }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Third.OID}, }, QuarantinedPacks: [][]byte{ diff --git a/internal/gitaly/storage/storagemgr/transaction_manager_refs_test.go b/internal/gitaly/storage/storagemgr/transaction_manager_refs_test.go index bfa379520d..9f43bd4ab6 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager_refs_test.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager_refs_test.go @@ -27,7 +27,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t RelativePath: setup.RelativePath, }, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/non-\xE5-utf8-directory/non-\xE5-utf8-file": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -61,7 +61,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/non-\xE5-utf8-directory/non-\xE5-utf8-file": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -72,7 +72,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/non-\xE5-utf8-directory/non-\xE5-utf8-file": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -91,7 +91,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t RelativePath: setup.RelativePath, }, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -120,7 +120,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t RelativePath: setup.RelativePath, }, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "not-in-refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: InvalidReferenceFormatError{ @@ -143,13 +143,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/parent": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/parent/child": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: updateref.FileDirectoryConflictError{ @@ -187,14 +187,14 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/parent": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 1, SkipVerificationFailures: true, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/parent/child": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -233,14 +233,14 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/parent/child": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, Commit{ // This is a no-op and thus is dropped. TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/parent": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -270,7 +270,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/parent": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -281,13 +281,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, UpdateReferences{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/parent": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/parent/child": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -315,7 +315,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t RelativePath: setup.RelativePath, }, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/tags/v1.0.0": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.ObjectHash.EmptyTreeOID}, }, }, @@ -370,7 +370,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: localrepo.InvalidObjectError(setup.Commits.First.OID), @@ -397,7 +397,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, }, @@ -413,14 +413,14 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 2, SkipVerificationFailures: true, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, "refs/heads/non-conflicting": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, @@ -457,13 +457,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, "refs/heads/non-conflicting": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, @@ -505,13 +505,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: ReferenceVerificationError{ @@ -548,7 +548,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -559,7 +559,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -590,7 +590,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, "refs/heads/non-conflicting": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -607,14 +607,14 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 2, SkipVerificationFailures: true, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, "refs/heads/non-conflicting": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Third.OID}, }, @@ -647,7 +647,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, "refs/heads/non-conflicting": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -664,13 +664,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, "refs/heads/non-conflicting": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Third.OID}, }, @@ -709,7 +709,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -725,13 +725,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, ExpectedError: ReferenceVerificationError{ @@ -758,7 +758,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -769,7 +769,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.First.OID}, }, }, @@ -800,7 +800,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -811,7 +811,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -832,7 +832,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/branch-2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -860,13 +860,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-2": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -896,7 +896,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/subdir/branch-2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -913,13 +913,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/subdir/branch-2": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -945,7 +945,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-packed": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/sentinel-packed": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -968,7 +968,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-loose": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/sentinel-loose": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -985,13 +985,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-packed": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, Commit{ TransactionID: 5, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-loose": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -1030,7 +1030,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t RelativePath: setup.RelativePath, }, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/symbolic": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -1058,7 +1058,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1069,7 +1069,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/symbolic": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -1106,7 +1106,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1117,7 +1117,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/symbolic": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -1150,7 +1150,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, "refs/heads/non-conflicting": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -1167,14 +1167,14 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 2, SkipVerificationFailures: true, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.ObjectHash.ZeroOID}, "refs/heads/non-conflicting": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, @@ -1206,7 +1206,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, "refs/heads/non-conflicting": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -1223,13 +1223,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.ObjectHash.ZeroOID}, "refs/heads/non-conflicting": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, @@ -1268,7 +1268,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1284,13 +1284,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, ExpectedError: ReferenceVerificationError{ @@ -1315,7 +1315,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t RelativePath: setup.RelativePath, }, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -1334,12 +1334,12 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t RelativePath: setup.RelativePath, }, UpdateReferences{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, UpdateReferences{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -1374,19 +1374,19 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, UpdateReferences{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, }, UpdateReferences{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ // The old oid should be ignored since there's already a recorded initial value for the // reference. "refs/heads/main": {NewOID: setup.Commits.Third.OID}, @@ -1451,7 +1451,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, UpdateReferences{ // The old oid is ignored as the references old value was already recorded. - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {NewOID: setup.Commits.First.OID}, }, }, @@ -1482,7 +1482,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1500,7 +1500,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t UpdateReferences{ TransactionID: 2, // The old oid is ignored as the references old value was already recorded. - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {NewOID: setup.Commits.Second.OID}, }, }, @@ -1533,7 +1533,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1551,7 +1551,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t UpdateReferences{ TransactionID: 2, // The old oid is ignored as the references old value was already recorded. - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {NewOID: setup.Commits.Second.OID}, }, }, @@ -1584,7 +1584,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1602,7 +1602,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t UpdateReferences{ TransactionID: 2, // The old oid is ignored as the references old value was already recorded. - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {NewOID: setup.Commits.Second.OID}, }, }, @@ -1639,7 +1639,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t RelativePath: setup.RelativePath, }, UpdateReferences{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1675,7 +1675,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1697,7 +1697,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.First.OID}, }, }, @@ -1729,7 +1729,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1740,7 +1740,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.First.OID}, }, }, @@ -1771,7 +1771,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/remotes/upstream/deleted-branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1798,13 +1798,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t // Delete the branch in the same transaction as we create another one in the `refs/remotes` // directory. The reference deletion creates the `refs/remotes` directory that was removed // by the repacking task. - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/remotes/upstream/deleted-branch": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, UpdateReferences{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/remotes/upstream/created-branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, diff --git a/internal/gitaly/storage/storagemgr/transaction_manager_repo_test.go b/internal/gitaly/storage/storagemgr/transaction_manager_repo_test.go index 74fb0b5d2a..8095a4b962 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager_repo_test.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager_repo_test.go @@ -510,7 +510,7 @@ func generateDeleteRepositoryTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, DeleteRepository: true, @@ -611,7 +611,7 @@ func generateDeleteRepositoryTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: ErrRepositoryNotFound, @@ -762,7 +762,7 @@ func generateDeleteRepositoryTests(t *testing.T, setup testTransactionSetup) []t DefaultBranchUpdate: &DefaultBranchUpdate{ Reference: "refs/heads/branch", }, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, CustomHooksUpdate: &CustomHooksUpdate{ @@ -853,7 +853,7 @@ func generateDeleteRepositoryTests(t *testing.T, setup testTransactionSetup) []t DefaultBranchUpdate: &DefaultBranchUpdate{ Reference: "refs/heads/new-head", }, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.First.Pack}, @@ -948,7 +948,7 @@ func generateDeleteRepositoryTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.First.Pack}, diff --git a/internal/gitaly/storage/storagemgr/transaction_manager_test.go b/internal/gitaly/storage/storagemgr/transaction_manager_test.go index fe9495d7ae..b8f2f6e23c 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager_test.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager_test.go @@ -310,7 +310,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ Context: ctx, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: context.Canceled, @@ -369,7 +369,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio RelativePath: setup.RelativePath, }, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: ErrTransactionProcessingStopped, @@ -454,7 +454,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, CustomHooksUpdate: &CustomHooksUpdate{ @@ -515,7 +515,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, ExpectedError: ReferenceVerificationError{ @@ -540,7 +540,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Third.OID}, }, CustomHooksUpdate: &CustomHooksUpdate{}, @@ -584,7 +584,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.First.Pack}, @@ -628,7 +628,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.First.Pack}, @@ -680,7 +680,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.Second.Pack}, @@ -728,7 +728,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/existing": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/new": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, @@ -768,7 +768,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.First.Pack}, @@ -780,7 +780,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Third.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.Third.Pack}, @@ -932,7 +932,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.First.Pack}, @@ -944,7 +944,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, QuarantinedPacks: [][]byte{setup.Commits.Second.Pack}, @@ -975,7 +975,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.First.Pack}, @@ -992,7 +992,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -1000,7 +1000,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio Prune{}, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/dependant": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.Second.Pack}, @@ -1175,7 +1175,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio DefaultBranchUpdate: &DefaultBranchUpdate{ Reference: "refs/heads/new-head", }, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.First.Pack}, @@ -1317,7 +1317,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }), Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1339,7 +1339,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }), Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1374,7 +1374,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1398,7 +1398,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }), Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1434,7 +1434,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }), Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1522,7 +1522,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1538,7 +1538,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1549,7 +1549,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1613,7 +1613,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1629,7 +1629,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1640,7 +1640,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1781,11 +1781,11 @@ func BenchmarkTransactionManager(b *testing.B) { commit2 git.ObjectID ) - // getReferenceUpdates builds a ReferenceUpdates with unique branches for the updater. - getReferenceUpdates := func(updaterID int, old, new git.ObjectID) ReferenceUpdates { - referenceUpdates := make(ReferenceUpdates, tc.transactionSize) + // getReferenceUpdates builds a git.ReferenceUpdates with unique branches for the updater. + getReferenceUpdates := func(updaterID int, old, new git.ObjectID) git.ReferenceUpdates { + referenceUpdates := make(git.ReferenceUpdates, tc.transactionSize) for i := 0; i < tc.transactionSize; i++ { - referenceUpdates[git.ReferenceName(fmt.Sprintf("refs/heads/updater-%d-branch-%d", updaterID, i))] = ReferenceUpdate{ + referenceUpdates[git.ReferenceName(fmt.Sprintf("refs/heads/updater-%d-branch-%d", updaterID, i))] = git.ReferenceUpdate{ OldOID: old, NewOID: new, } -- GitLab From cfb11c26e9476d2b73853c73ec1bfc1db5c90c8d Mon Sep 17 00:00:00 2001 From: Karthik Nayak Date: Sun, 14 Jul 2024 13:26:50 +0200 Subject: [PATCH 08/17] git: Add code to parse reftable tables We generally don't have to parse reftable's as this is handled entirely by Git. But for testing the transaction manager, it is essential that we test the integrity of files being added to the log. This is done by simply looking at the refs present in the file. This is simple since the files contain regular text. Reftables however are a binary format, as such, we need to parse the binary format to be able to understand the refs included in the file. Let's add code to do this, currently the code is very limited to our use case: 1. It only parses ref blocks and not other blocks. 2. It doesn't perform CR32 validation of the data since it is only needed for reading the refs. 3. It is only allowed to run in tests. For all production code, we should rely on Git and its implementation. --- internal/git/reftable.go | 331 ++++++++++++++++++++++++++++++++++ internal/git/reftable_test.go | 224 +++++++++++++++++++++++ 2 files changed, 555 insertions(+) create mode 100644 internal/git/reftable.go create mode 100644 internal/git/reftable_test.go diff --git a/internal/git/reftable.go b/internal/git/reftable.go new file mode 100644 index 0000000000..fc56ea9c68 --- /dev/null +++ b/internal/git/reftable.go @@ -0,0 +1,331 @@ +package git + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "fmt" + "math/big" +) + +type reftableHeader struct { + Name [4]byte + Version uint8 + BlockSize [3]byte + MinUpdateIndex uint64 + MaxUpdateIndex uint64 + // HashID is only present if version is 2 + HashID [4]byte +} + +type reftableFooterBase struct { + Name [4]byte + Version uint8 + BlockSize [3]byte + MinUpdateIndex uint64 + MaxUpdateIndex uint64 +} + +type reftableFooterEnd struct { + RefIndexOffset uint64 + ObjectOffsetAndLen uint64 + ObjectIndexOffset uint64 + LogOffset uint64 + LogIndexPosition uint64 + CR32 uint32 +} + +type reftableFooter struct { + reftableFooterBase + HashID [4]byte + reftableFooterEnd +} + +type reftableBlock struct { + BlockStart uint + FullBlockSize uint + HeaderOffset uint + RestartCount uint16 + RestartStart uint +} + +type reftable struct { + blockSize *uint + headerSize uint + footerSize uint + size uint + src []byte + header *reftableHeader + footer *reftableFooter +} + +// shaFormat maps reftable sha format to Gitaly's hash object. +func (t *reftable) shaFormat() ObjectHash { + if t.footer.Version == 2 && bytes.Equal(t.footer.HashID[:], []byte("s256")) { + return ObjectHashSHA256 + } + return ObjectHashSHA1 +} + +// parseBlockSize parses the table's header for the block size. +func (t *reftable) parseBlockSize() uint { + if t.blockSize == nil { + blockSize := uint(big.NewInt(0).SetBytes(t.header.BlockSize[:]).Uint64()) + t.blockSize = &blockSize + } + + return *t.blockSize +} + +// getBlockRange provides the abs block range if the block is smaller +// than the table. +func (t *reftable) getBlockRange(offset, size uint) (uint, uint) { + if offset >= t.size { + return 0, 0 + } + + if offset+size > t.size { + size = t.size - offset + } + + return offset, offset + size +} + +// extractBlockLen extracts the block length from a given location. +func (t *reftable) extractBlockLen(blockStart uint) uint { + return uint(big.NewInt(0).SetBytes(t.src[blockStart+1 : blockStart+4]).Uint64()) +} + +// getVarInt parses a variable int and increases the index. +func (t *reftable) getVarInt(start uint, blockEnd uint) (uint, uint, error) { + var val uint + + val = uint(t.src[start]) & 0x7f + + for (uint(t.src[start]) & 0x80) > 0 { + start++ + if start > blockEnd { + return 0, 0, fmt.Errorf("exceeded block length") + } + + val = ((val + 1) << 7) | (uint(t.src[start]) & 0x7f) + } + + return start + 1, val, nil +} + +// getRefsFromBlock provides the ref udpates from a reference block. +func (t *reftable) getRefsFromBlock(b *reftableBlock) (ReferenceUpdates, error) { + u := make(map[ReferenceName]ReferenceUpdate) + + prefix := "" + + // Skip the block_type and block_len + idx := b.BlockStart + 4 + + for idx < b.RestartStart { + var prefixLength, suffixLength, updateIndexDelta uint + var err error + + idx, prefixLength, err = t.getVarInt(idx, b.RestartStart) + if err != nil { + return u, fmt.Errorf("getting prefix length: %w", err) + } + + idx, suffixLength, err = t.getVarInt(idx, b.RestartStart) + if err != nil { + return u, fmt.Errorf("getting suffix length: %w", err) + } + + extra := (suffixLength & 0x7) + suffixLength >>= 3 + + refname := prefix[:prefixLength] + string(t.src[idx:idx+suffixLength]) + idx = idx + suffixLength + + idx, updateIndexDelta, err = t.getVarInt(idx, b.FullBlockSize) + if err != nil { + return u, fmt.Errorf("getting update index delta: %w", err) + } + // we don't use this for now + _ = updateIndexDelta + + refUpdate := ReferenceUpdate{} + + switch extra { + case 0: + // Deletion, no value + refUpdate.NewOID = t.shaFormat().ZeroOID + case 1: + // Regular reference + hashSize := t.shaFormat().Hash().Size() + refUpdate.NewOID = ObjectID(hex.EncodeToString(t.src[idx : idx+uint(hashSize)])) + + idx += uint(hashSize) + case 2: + // Peeled Tag + hashSize := t.shaFormat().Hash().Size() + refUpdate.NewOID = ObjectID(hex.EncodeToString(t.src[idx : idx+uint(hashSize)])) + + idx += uint(hashSize) + + // For now we don't need the peeledOID, but we still need + // to skip the index. + // peeledOID := ObjectID(bytesToHex(t.src[idx : idx+uint(hashSize)])) + idx += uint(hashSize) + case 3: + // Symref + var size uint + idx, size, err = t.getVarInt(idx, b.FullBlockSize) + if err != nil { + return u, fmt.Errorf("getting symref size: %w", err) + } + + refUpdate.NewTarget = ReferenceName(t.src[idx : idx+size]) + idx = idx + size + } + + u[ReferenceName(refname)] = refUpdate + prefix = refname + } + + return u, nil +} + +// parseRefBlock parses a block and if it is a ref block, provides +// all the reference updates. +func (t *reftable) parseRefBlock(headerOffset, blockStart, blockEnd uint) (ReferenceUpdates, error) { + currentBS := t.extractBlockLen(blockStart + headerOffset) + + fullBlockSize := t.parseBlockSize() + if fullBlockSize == 0 { + fullBlockSize = currentBS + } else if currentBS < fullBlockSize && currentBS < (blockEnd-blockStart) && t.src[blockStart+currentBS] != 0 { + fullBlockSize = currentBS + } + + b := &reftableBlock{ + BlockStart: blockStart + headerOffset, + FullBlockSize: fullBlockSize, + } + + if err := binary.Read(bytes.NewBuffer(t.src[blockStart+currentBS-2:]), binary.BigEndian, &b.RestartCount); err != nil { + return nil, fmt.Errorf("reading restart count: %w", err) + } + + b.RestartStart = blockStart + currentBS - 2 - 3*uint(b.RestartCount) + + return t.getRefsFromBlock(b) +} + +// IterateRefs provides all the refs present in a table. +func (t *reftable) IterateRefs() (ReferenceUpdates, error) { + if t.footer == nil { + return nil, fmt.Errorf("table not instantiated") + } + + offset := uint(0) + allUpdates := make(map[ReferenceName]ReferenceUpdate) + + for offset < t.size { + headerOffset := uint(0) + if offset == 0 { + headerOffset = t.headerSize + } + + blockStart, blockEnd := t.getBlockRange(offset, t.parseBlockSize()) + if blockStart == 0 && blockEnd == 0 { + break + } + + // If we run out of ref blocks, we can stop the iteration. + if t.src[blockStart+headerOffset] != 'r' { + return nil, nil + } + + u, err := t.parseRefBlock(headerOffset, blockStart, blockEnd) + if err != nil { + return nil, fmt.Errorf("parsing block: %w", err) + } + + if u == nil { + break + } + + for ref, val := range u { + allUpdates[ref] = val + } + + offset = blockEnd + } + + return allUpdates, nil +} + +// NewReftable instantiates a new reftable from the given reftable content. +func NewReftable(content []byte) (*reftable, error) { + t := &reftable{src: content} + block := t.src[0:28] + + var h reftableHeader + if err := binary.Read(bytes.NewBuffer(block), binary.BigEndian, &h); err != nil { + return nil, fmt.Errorf("reading header: %w", err) + } + + if !bytes.Equal(h.Name[:], []byte("REFT")) { + return nil, fmt.Errorf("unexpected header name: %s", h.Name) + } + + if h.Version != 1 && h.Version != 2 { + return nil, fmt.Errorf("unexpected reftable version: %d", h.Version) + } + + t.header = &h + + t.footerSize = uint(68) + t.headerSize = uint(24) + if h.Version == 2 { + t.footerSize = 72 + t.headerSize = 28 + } + t.size = uint(len(t.src)) - t.footerSize + + block = t.src[t.size:len(t.src)] + + var f reftableFooter + if err := binary.Read(bytes.NewBuffer(block), binary.BigEndian, &f.reftableFooterBase); err != nil { + return nil, fmt.Errorf("reading footer: %w", err) + } + + if f.Name != h.Name || + f.Version != h.Version || + !bytes.Equal(f.BlockSize[:], h.BlockSize[:]) || + f.MinUpdateIndex != h.MinUpdateIndex || + f.MaxUpdateIndex != h.MaxUpdateIndex { + return nil, fmt.Errorf("footer doesn't match header") + } + + if h.Version == 2 { + if err := binary.Read(bytes.NewBuffer(block[t.headerSize:]), binary.BigEndian, &f.HashID); err != nil { + return nil, fmt.Errorf("reading hash ID: %w", err) + } + + if f.HashID != h.HashID { + return nil, fmt.Errorf("footer doesn't match header") + } + + if err := binary.Read(bytes.NewBuffer(block[t.headerSize+4:]), binary.BigEndian, &f.reftableFooterEnd); err != nil { + return nil, fmt.Errorf("reading footer: %w", err) + } + } else { + if err := binary.Read(bytes.NewBuffer(block[t.headerSize:]), binary.BigEndian, &f.reftableFooterEnd); err != nil { + return nil, fmt.Errorf("reading footer: %w", err) + } + } + + // TODO: CRC32 validation of the data + // https://gitlab.com/gitlab-org/git/-/blob/master/reftable/reader.c#L143 + t.footer = &f + + return t, nil +} diff --git a/internal/git/reftable_test.go b/internal/git/reftable_test.go new file mode 100644 index 0000000000..3bebc3ba4d --- /dev/null +++ b/internal/git/reftable_test.go @@ -0,0 +1,224 @@ +package git_test + +import ( + "fmt" + "io" + "log" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "gitlab.com/gitlab-org/gitaly/v16/internal/git" + "gitlab.com/gitlab-org/gitaly/v16/internal/git/gittest" + "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper" + "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper/testcfg" +) + +func getReftables(repoPath string) []string { + tables := []string{} + + reftablePath := filepath.Join(repoPath, "reftable") + + files, err := os.ReadDir(reftablePath) + if err != nil { + log.Fatal(err) + } + + for _, file := range files { + if filepath.Base(file.Name()) == "tables.list" { + continue + } + + tables = append(tables, filepath.Join(reftablePath, file.Name())) + } + + return tables +} + +func TestParseReftable(t *testing.T) { + t.Parallel() + + if !testhelper.IsReftableEnabled() { + t.Skip("tests are reftable specific") + } + + ctx := testhelper.Context(t) + cfg := testcfg.Build(t) + + tableName := [4]byte{} + n, err := strings.NewReader("REFT").Read(tableName[:]) + require.NoError(t, err) + require.Equal(t, 4, n) + + type setupData struct { + repoPath string + updates git.ReferenceUpdates + } + + for _, tc := range []struct { + name string + setup func() setupData + expectedErr error + }{ + { + name: "single ref", + setup: func() setupData { + _, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + + mainCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch("main")) + + return setupData{ + repoPath: repoPath, + updates: git.ReferenceUpdates{ + "HEAD": {NewTarget: "refs/heads/main"}, + "refs/heads/main": {NewOID: mainCommit}, + }, + } + }, + }, + { + name: "single ref + annotated tag", + setup: func() setupData { + _, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + + mainCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch("main")) + annotatedTag := gittest.WriteTag(t, cfg, repoPath, "v2.0.0", mainCommit.Revision(), gittest.WriteTagConfig{ + Message: "annotated tag", + }) + + return setupData{ + repoPath: repoPath, + updates: git.ReferenceUpdates{ + "HEAD": {NewTarget: "refs/heads/main"}, + "refs/heads/main": {NewOID: mainCommit}, + "refs/tags/v2.0.0": {NewOID: annotatedTag}, + }, + } + }, + }, + { + name: "two refs without prefix compression", + setup: func() setupData { + _, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + + mainCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch("main")) + rootRefCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithReference("ROOTREF")) + + return setupData{ + repoPath: repoPath, + updates: git.ReferenceUpdates{ + "HEAD": {NewTarget: "refs/heads/main"}, + "refs/heads/main": {NewOID: mainCommit}, + "ROOTREF": {NewOID: rootRefCommit}, + }, + } + }, + }, + { + name: "two refs with prefix compression", + setup: func() setupData { + _, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + + mainCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch("main")) + masterCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch("master")) + + return setupData{ + repoPath: repoPath, + updates: git.ReferenceUpdates{ + "HEAD": {NewTarget: "refs/heads/main"}, + "refs/heads/main": {NewOID: mainCommit}, + "refs/heads/master": {NewOID: masterCommit}, + }, + } + }, + }, + { + name: "multiple refs with different commit IDs", + setup: func() setupData { + _, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + + mainCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch("main")) + masterCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithParents(mainCommit), gittest.WithBranch("master")) + + return setupData{ + repoPath: repoPath, + updates: git.ReferenceUpdates{ + "HEAD": {NewTarget: "refs/heads/main"}, + "refs/heads/main": {NewOID: mainCommit}, + "refs/heads/master": {NewOID: masterCommit}, + }, + } + }, + }, + { + name: "multiple blocks in table", + setup: func() setupData { + _, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + + updates := make(map[git.ReferenceName]git.ReferenceUpdate) + + updates["HEAD"] = git.ReferenceUpdate{NewTarget: "refs/heads/main"} + + for i := 0; i < 200; i++ { + branch := fmt.Sprintf("branch%d", i) + updates[git.ReferenceName(fmt.Sprintf("refs/heads/%s", branch))] = git.ReferenceUpdate{ + NewOID: gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch(branch)), + } + } + + return setupData{ + repoPath: repoPath, + updates: updates, + } + }, + }, + } { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + setup := tc.setup() + + repoPath := setup.repoPath + + // pack-refs so there is only one table + gittest.Exec(t, cfg, "-C", repoPath, "pack-refs") + reftablePath := getReftables(repoPath)[0] + + file, err := os.Open(reftablePath) + require.NoError(t, err) + defer file.Close() + + buf, err := io.ReadAll(file) + require.NoError(t, err) + + table, err := git.NewReftable(buf) + require.NoError(t, err) + + u, err := table.IterateRefs() + require.NoError(t, err) + + require.Equal(t, len(setup.updates), len(u)) + for ref, expectedUpdate := range setup.updates { + update, ok := u[ref] + require.True(t, ok) + require.Equal(t, expectedUpdate, update) + } + }) + } +} -- GitLab From 59b92fe3a3b133879fb2643ea0d6d1c327a96d27 Mon Sep 17 00:00:00 2001 From: John Cai Date: Fri, 21 Jun 2024 12:02:15 -0400 Subject: [PATCH 09/17] service: Add inflight tracker An inflight tracker can be used to track anything that's inflight. This will be used to track how many pack-objects processes are inflight for a given repository in order to inform whether or not a bundle should be created for a given repository. --- .../gitaly/service/in_progress_tracker.go | 42 ++++++++++++ .../service/in_progress_tracker_test.go | 67 +++++++++++++++++++ internal/gitaly/service/testhelper_test.go | 11 +++ 3 files changed, 120 insertions(+) create mode 100644 internal/gitaly/service/in_progress_tracker.go create mode 100644 internal/gitaly/service/in_progress_tracker_test.go create mode 100644 internal/gitaly/service/testhelper_test.go diff --git a/internal/gitaly/service/in_progress_tracker.go b/internal/gitaly/service/in_progress_tracker.go new file mode 100644 index 0000000000..bd1aa7052e --- /dev/null +++ b/internal/gitaly/service/in_progress_tracker.go @@ -0,0 +1,42 @@ +package service + +import ( + "sync" +) + +// InProgressTracker can be used to keep track of processes that are in flight +type InProgressTracker struct { + inProgress map[string]int + l sync.RWMutex +} + +// NewInProgressTracker instantiates a new InProgressTracker. +func NewInProgressTracker() *InProgressTracker { + return &InProgressTracker{ + inProgress: make(map[string]int), + } +} + +// GetInProgress gets the number of inflight processes for a given key. +func (p *InProgressTracker) GetInProgress(key string) int { + p.l.RLock() + defer p.l.RUnlock() + + return p.inProgress[key] +} + +// IncrementInProgress increments the number of inflight processes for a given key. +func (p *InProgressTracker) IncrementInProgress(key string) { + p.l.Lock() + defer p.l.Unlock() + + p.inProgress[key]++ +} + +// DecrementInProgress decrements the number of inflight processes for a given key. +func (p *InProgressTracker) DecrementInProgress(key string) { + p.l.Lock() + defer p.l.Unlock() + + p.inProgress[key]-- +} diff --git a/internal/gitaly/service/in_progress_tracker_test.go b/internal/gitaly/service/in_progress_tracker_test.go new file mode 100644 index 0000000000..99178863c6 --- /dev/null +++ b/internal/gitaly/service/in_progress_tracker_test.go @@ -0,0 +1,67 @@ +package service_test + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/require" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/service" +) + +func TestInProgressTracker(t *testing.T) { + key := "key1" + + testCases := []struct { + desc string + expectedInProgress int + actions func(*service.InProgressTracker) + }{ + { + desc: "one in flight", + expectedInProgress: 1, + actions: func(t *service.InProgressTracker) { + t.IncrementInProgress(key) + t.IncrementInProgress(key) + t.DecrementInProgress(key) + }, + }, + { + desc: "two in flight with concurrent writes", + expectedInProgress: 2, + actions: func(t *service.InProgressTracker) { + var wg sync.WaitGroup + + wg.Add(4) + go func() { + t.IncrementInProgress(key) + wg.Done() + }() + go func() { + t.IncrementInProgress(key) + wg.Done() + }() + + go func() { + t.IncrementInProgress(key) + wg.Done() + }() + + go func() { + t.DecrementInProgress(key) + wg.Done() + }() + + wg.Wait() + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + tracker := service.NewInProgressTracker() + + tc.actions(tracker) + require.Equal(t, tc.expectedInProgress, tracker.GetInProgress(key)) + }) + } +} diff --git a/internal/gitaly/service/testhelper_test.go b/internal/gitaly/service/testhelper_test.go new file mode 100644 index 0000000000..43ac8d85bf --- /dev/null +++ b/internal/gitaly/service/testhelper_test.go @@ -0,0 +1,11 @@ +package service_test + +import ( + "testing" + + "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper" +) + +func TestMain(m *testing.M) { + testhelper.Run(m) +} -- GitLab From 75d3f4830a5a95082ac538a0e3e30a03193655f2 Mon Sep 17 00:00:00 2001 From: John Cai Date: Fri, 21 Jun 2024 16:15:02 -0400 Subject: [PATCH 10/17] bundleuri: Add a function to generate bundles in the background We will need the ability to generate bundles in the background if one does not exist for a repository during concurrent clones. To do so, introduce a function on the Sink to call Generate() asynchronously. --- internal/bundleuri/git_config.go | 5 +- internal/bundleuri/git_config_test.go | 7 +- internal/bundleuri/sink.go | 92 +++++++++++++-- internal/bundleuri/sink_test.go | 164 +++++++++++++++++++++++++- 4 files changed, 255 insertions(+), 13 deletions(-) diff --git a/internal/bundleuri/git_config.go b/internal/bundleuri/git_config.go index 69cc21b49b..f18fd4bda1 100644 --- a/internal/bundleuri/git_config.go +++ b/internal/bundleuri/git_config.go @@ -11,6 +11,9 @@ import ( "gitlab.com/gitlab-org/gitaly/v16/internal/log" ) +// ErrSinkMissing indicates a sink is missing +var ErrSinkMissing = errors.New("bundle-URI sink missing") + // CapabilitiesGitConfig returns a slice of git.ConfigPairs that can be injected // into the Git config to make it aware the bundle-URI capabilities are // supported. @@ -42,7 +45,7 @@ func UploadPackGitConfig( } if sink == nil { - return CapabilitiesGitConfig(ctx), errors.New("bundle-URI sink missing") + return CapabilitiesGitConfig(ctx), ErrSinkMissing } uri, err := sink.SignedURL(ctx, repo) diff --git a/internal/bundleuri/git_config_test.go b/internal/bundleuri/git_config_test.go index 0a5da2bbc2..d81224bf4f 100644 --- a/internal/bundleuri/git_config_test.go +++ b/internal/bundleuri/git_config_test.go @@ -14,7 +14,6 @@ import ( "gitlab.com/gitlab-org/gitaly/v16/internal/git" "gitlab.com/gitlab-org/gitaly/v16/internal/git/gittest" "gitlab.com/gitlab-org/gitaly/v16/internal/git/localrepo" - "gitlab.com/gitlab-org/gitaly/v16/internal/structerr" "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper" "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper/testcfg" ) @@ -60,7 +59,7 @@ func testUploadPackGitConfig(t *testing.T, ctx context.Context) { return setupData{} }, expectedConfig: nil, - expectedErr: errors.New("bundle-URI sink missing"), + expectedErr: ErrSinkMissing, }, { desc: "no bundle found", @@ -74,7 +73,7 @@ func testUploadPackGitConfig(t *testing.T, ctx context.Context) { } }, expectedConfig: nil, - expectedErr: structerr.NewNotFound("no bundle available"), + expectedErr: ErrBundleNotFound, }, { desc: "not signed", @@ -144,7 +143,7 @@ func testUploadPackGitConfig(t *testing.T, ctx context.Context) { actual, err := UploadPackGitConfig(ctx, sink, repoProto) if featureflag.BundleURI.IsEnabled(ctx) { - require.Equal(t, tc.expectedErr, err) + require.True(t, errors.Is(err, tc.expectedErr) || strings.Contains(err.Error(), tc.expectedErr.Error())) if tc.expectedConfig != nil { require.Equal(t, len(tc.expectedConfig), len(actual)) diff --git a/internal/bundleuri/sink.go b/internal/bundleuri/sink.go index a2d1ac8d00..e6beabe5e7 100644 --- a/internal/bundleuri/sink.go +++ b/internal/bundleuri/sink.go @@ -7,6 +7,7 @@ import ( "io" "path/filepath" "strings" + "sync" "time" "gitlab.com/gitlab-org/gitaly/v16/internal/backup" @@ -29,21 +30,61 @@ const ( defaultExpiry = 10 * time.Minute ) +var ( + // ErrBundleGenerationInProgress indicates that an existing bundle generation + // is already in progress. + ErrBundleGenerationInProgress = errors.New("bundle generation in progress") + // ErrBundleNotFound indicates that no bundle could be found for a given repository. + ErrBundleNotFound = errors.New("no bundle found") +) + // Sink is a wrapper around the storage bucket used for accessing/writing // bundleuri bundles. type Sink struct { - bucket *blob.Bucket + bucket *blob.Bucket + bundleCreationMutex map[string]*sync.Mutex + + config sinkConfig +} + +type sinkConfig struct { + notifyBundleGeneration func(string, error) +} + +// SinkOption can be passed into NewSink to pass in options when creating a new sink. +type SinkOption func(s *sinkConfig) + +// WithBundleGenerationNotifier sets a notifier function that gets called when GenerateOneAtATime +// finishes. GenerateOneAtATime will be called in a separate background goroutine, so this function +// is an entrypoint to pass in logic to be called after the bundle has been generated. +func WithBundleGenerationNotifier(f func(string, error)) SinkOption { + return func(s *sinkConfig) { + s.notifyBundleGeneration = f + } } // NewSink creates a Sink from the given parameters. -func NewSink(ctx context.Context, uri string) (*Sink, error) { +func NewSink(ctx context.Context, uri string, options ...SinkOption) (*Sink, error) { bucket, err := blob.OpenBucket(ctx, uri) if err != nil { return nil, fmt.Errorf("open bucket: %w", err) } - return &Sink{ - bucket: bucket, - }, nil + + s := &Sink{ + bucket: bucket, + bundleCreationMutex: make(map[string]*sync.Mutex), + } + + var c sinkConfig + if len(options) > 0 { + for _, option := range options { + option(&c) + } + + s.config = c + } + + return s, nil } // relativePath returns a relative path of the bundle-URI bundle inside the @@ -73,6 +114,43 @@ func (s *Sink) getWriter(ctx context.Context, relativePath string) (io.WriteClos return writer, nil } +// GenerateOneAtATime generates a bundle for a repository, but only if there is not already +// one in flight. +func (s *Sink) GenerateOneAtATime(ctx context.Context, repo *localrepo.Repo) error { + bundlePath := s.relativePath(repo, defaultBundle) + + var m *sync.Mutex + var ok bool + + if m, ok = s.bundleCreationMutex[bundlePath]; !ok { + s.bundleCreationMutex[bundlePath] = &sync.Mutex{} + m = s.bundleCreationMutex[bundlePath] + } + + if m.TryLock() { + defer m.Unlock() + errChan := make(chan error) + + go func(ctx context.Context) { + select { + case errChan <- s.Generate(ctx, repo): + case <-ctx.Done(): + errChan <- ctx.Err() + } + }(ctx) + + err := <-errChan + + if s.config.notifyBundleGeneration != nil { + s.config.notifyBundleGeneration(bundlePath, err) + } + } else { + return fmt.Errorf("%w: %s", ErrBundleGenerationInProgress, bundlePath) + } + + return nil +} + // Generate creates a bundle for bundle-URI use into the bucket. func (s Sink) Generate(ctx context.Context, repo *localrepo.Repo) (returnErr error) { ref, err := repo.HeadReference(ctx) @@ -132,9 +210,9 @@ func (s Sink) SignedURL(ctx context.Context, repo storage.Repository) (string, e if exists, err := s.bucket.Exists(ctx, relativePath); !exists { if err == nil { - return "", structerr.NewNotFound("no bundle available") + return "", ErrBundleNotFound } - return "", structerr.NewNotFound("no bundle available: %w", err) + return "", fmt.Errorf("%w: %w", ErrBundleNotFound, err) } uri, err := s.bucket.SignedURL(ctx, relativePath, &blob.SignedURLOptions{ diff --git a/internal/bundleuri/sink_test.go b/internal/bundleuri/sink_test.go index 01aa6907af..00dba139e6 100644 --- a/internal/bundleuri/sink_test.go +++ b/internal/bundleuri/sink_test.go @@ -1,6 +1,7 @@ package bundleuri import ( + "context" "fmt" "os" "path/filepath" @@ -105,7 +106,7 @@ func TestSink_SignedURL(t *testing.T) { { desc: "fails with missing bundle", setup: func(t *testing.T, sinkDir string, sink *Sink) {}, - expectedErr: structerr.NewNotFound("no bundle available"), + expectedErr: ErrBundleNotFound, }, } { tc := tc @@ -123,9 +124,170 @@ func TestSink_SignedURL(t *testing.T) { if tc.expectedErr == nil { require.NoError(t, err) require.Regexp(t, "http://example\\.com", uri) + } else { + require.ErrorIs(t, err, tc.expectedErr) + } + }) + } +} + +func TestSink_GenerateOneAtATime(t *testing.T) { + t.Parallel() + + cfg := testcfg.Build(t) + ctx := testhelper.Context(t) + + for _, tc := range []struct { + desc string + setup func(t *testing.T, repoPath string) + expectedErr error + }{ + { + desc: "creates bundle successfully", + setup: func(t *testing.T, repoPath string) { + gittest.WriteCommit(t, cfg, repoPath, + gittest.WithTreeEntries(gittest.TreeEntry{Mode: "100644", Path: "README", Content: "much"}), + gittest.WithBranch("main")) + }, + }, + { + desc: "fails with missing HEAD", + setup: func(t *testing.T, repoPath string) {}, + expectedErr: structerr.NewFailedPrecondition("ref %q does not exist: %w", "refs/heads/main", fmt.Errorf("create bundle: %w", localrepo.ErrEmptyBundle)), + }, + } { + tc := tc + + t.Run(tc.desc, func(t *testing.T) { + t.Parallel() + + repoProto, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + repo := localrepo.NewTestRepo(t, cfg, repoProto) + + tc.setup(t, repoPath) + + doneChan := make(chan struct{}) + errChan := make(chan error) + sinkDir := t.TempDir() + sink, err := NewSink( + ctx, + "file://"+sinkDir, + WithBundleGenerationNotifier( + func(_ string, err error) { + close(doneChan) + errChan <- err + }, + ), + ) + require.NoError(t, err) + + go func() { + err := sink.GenerateOneAtATime(ctx, repo) + require.NoError(t, err) + }() + + <-doneChan + err = <-errChan + + if tc.expectedErr == nil { + require.NoError(t, err) + require.FileExists(t, filepath.Join(sinkDir, sink.relativePath(repo, "default"))) } else { require.Equal(t, err, tc.expectedErr, err) } }) } } + +func TestSink_GenerateOneAtATimeConcurrent(t *testing.T) { + t.Parallel() + + cfg := testcfg.Build(t) + ctx := testhelper.Context(t) + + repoProto, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + repo := localrepo.NewTestRepo(t, cfg, repoProto) + + gittest.WriteCommit(t, cfg, repoPath, + gittest.WithTreeEntries(gittest.TreeEntry{Mode: "100644", Path: "README", Content: "much"}), + gittest.WithBranch("main")) + + doneChan, startNotifierCh := make(chan struct{}), make(chan struct{}) + errChan := make(chan error) + + sinkDir := t.TempDir() + sink, err := NewSink( + ctx, + "file://"+sinkDir, + WithBundleGenerationNotifier( + func(_ string, err error) { + close(startNotifierCh) + close(doneChan) + errChan <- err + }, + ), + ) + require.NoError(t, err) + + go func() { + err := sink.GenerateOneAtATime(ctx, repo) + require.NoError(t, err) + }() + + <-startNotifierCh + + err = sink.GenerateOneAtATime(ctx, repo) + require.ErrorIs(t, err, ErrBundleGenerationInProgress) + + <-doneChan + err = <-errChan + + require.NoError(t, err) + require.FileExists(t, filepath.Join(sinkDir, sink.relativePath(repo, "default"))) +} + +func TestSink_GenerateOneAtATime_ContextCancelled(t *testing.T) { + t.Parallel() + + cfg := testcfg.Build(t) + ctx := testhelper.Context(t) + ctx, cancel := context.WithCancel(ctx) + + repoProto, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + repo := localrepo.NewTestRepo(t, cfg, repoProto) + + gittest.WriteCommit(t, cfg, repoPath, + gittest.WithTreeEntries(gittest.TreeEntry{Mode: "100644", Path: "README", Content: "much"}), + gittest.WithBranch("main")) + + errChan := make(chan error) + + sinkDir := t.TempDir() + sink, err := NewSink( + ctx, + "file://"+sinkDir, + WithBundleGenerationNotifier( + func(_ string, err error) { + errChan <- err + }, + ), + ) + require.NoError(t, err) + + cancel() + + go func() { + require.NoError(t, sink.GenerateOneAtATime(ctx, repo)) + }() + + err = <-errChan + + require.ErrorIs(t, err, context.Canceled) + require.NoFileExists(t, filepath.Join(sinkDir, sink.relativePath(repo, "default"))) +} -- GitLab From 75e914c5bab3529ae3894426b4c89b64fd495fb1 Mon Sep 17 00:00:00 2001 From: John Cai Date: Thu, 4 Jul 2024 21:00:21 -0700 Subject: [PATCH 11/17] testserver: Allow passing in partition manager There are tests that need a partition manager to be present. For this reason, add a server option that allows the passing in of a PartitionManager. --- internal/testhelper/testserver/gitaly.go | 56 ++++++++++++++---------- 1 file changed, 34 insertions(+), 22 deletions(-) diff --git a/internal/testhelper/testserver/gitaly.go b/internal/testhelper/testserver/gitaly.go index d3dc6aca66..acd1e15160 100644 --- a/internal/testhelper/testserver/gitaly.go +++ b/internal/testhelper/testserver/gitaly.go @@ -290,6 +290,7 @@ type gitalyServerDeps struct { signingKey string transactionRegistry *storagemgr.TransactionRegistry procReceiveRegistry *hook.ProcReceiveRegistry + partitionManager *storagemgr.PartitionManager } func (gsd *gitalyServerDeps) createDependencies(tb testing.TB, cfg config.Cfg) *service.Dependencies { @@ -333,27 +334,30 @@ func (gsd *gitalyServerDeps) createDependencies(tb testing.TB, cfg config.Cfg) * var partitionManager *storagemgr.PartitionManager if testhelper.IsWALEnabled() { - dbMgr, err := keyvalue.NewDBManager( - cfg.Storages, - keyvalue.NewBadgerStore, - helper.NewNullTickerFactory(), - gsd.logger, - ) - require.NoError(tb, err) - tb.Cleanup(dbMgr.Close) - - partitionManager, err = storagemgr.NewPartitionManager( - testhelper.Context(tb), - cfg.Storages, - gsd.gitCmdFactory, - localrepo.NewFactory(gsd.logger, gsd.locator, gsd.gitCmdFactory, gsd.catfileCache), - gsd.logger, - dbMgr, - cfg.Prometheus, - nil, - ) - require.NoError(tb, err) - tb.Cleanup(partitionManager.Close) + if gsd.partitionManager == nil { + dbMgr, err := keyvalue.NewDBManager( + cfg.Storages, + keyvalue.NewBadgerStore, + helper.NewNullTickerFactory(), + gsd.logger, + ) + require.NoError(tb, err) + tb.Cleanup(dbMgr.Close) + + partitionManager, err = storagemgr.NewPartitionManager( + testhelper.Context(tb), + cfg.Storages, + gsd.gitCmdFactory, + localrepo.NewFactory(gsd.logger, gsd.locator, gsd.gitCmdFactory, gsd.catfileCache), + gsd.logger, + dbMgr, + cfg.Prometheus, + nil, + ) + require.NoError(tb, err) + tb.Cleanup(partitionManager.Close) + gsd.partitionManager = partitionManager + } } if gsd.hookMgr == nil { @@ -433,7 +437,7 @@ func (gsd *gitalyServerDeps) createDependencies(tb testing.TB, cfg config.Cfg) * UpdaterWithHooks: gsd.updaterWithHooks, HousekeepingManager: gsd.housekeepingManager, TransactionRegistry: gsd.transactionRegistry, - PartitionManager: partitionManager, + PartitionManager: gsd.partitionManager, BackupSink: gsd.backupSink, BackupLocator: gsd.backupLocator, BundleURISink: gsd.bundleURISink, @@ -591,3 +595,11 @@ func WithProcReceiveRegistry(registry *hook.ProcReceiveRegistry) GitalyServerOpt return deps } } + +// WithPartitionManager sets the proc receive registry that will be used for Gitaly services. +func WithPartitionManager(partitionMgr *storagemgr.PartitionManager) GitalyServerOpt { + return func(deps gitalyServerDeps) gitalyServerDeps { + deps.partitionManager = partitionMgr + return deps + } +} -- GitLab From b45283a6abae8cb4ea7bd811f1bebb0fe788ab86 Mon Sep 17 00:00:00 2001 From: John Cai Date: Wed, 17 Jul 2024 20:46:25 -0400 Subject: [PATCH 12/17] testhelper: Allow passing in InProgressTracker Allow passing in an InProgressTracker to the test server --- internal/testhelper/testserver/gitaly.go | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/internal/testhelper/testserver/gitaly.go b/internal/testhelper/testserver/gitaly.go index acd1e15160..56a9b1c069 100644 --- a/internal/testhelper/testserver/gitaly.go +++ b/internal/testhelper/testserver/gitaly.go @@ -291,6 +291,7 @@ type gitalyServerDeps struct { transactionRegistry *storagemgr.TransactionRegistry procReceiveRegistry *hook.ProcReceiveRegistry partitionManager *storagemgr.PartitionManager + inProgressTracker *service.InProgressTracker } func (gsd *gitalyServerDeps) createDependencies(tb testing.TB, cfg config.Cfg) *service.Dependencies { @@ -332,6 +333,10 @@ func (gsd *gitalyServerDeps) createDependencies(tb testing.TB, cfg config.Cfg) * gsd.procReceiveRegistry = hook.NewProcReceiveRegistry() } + if gsd.inProgressTracker == nil { + gsd.inProgressTracker = service.NewInProgressTracker() + } + var partitionManager *storagemgr.PartitionManager if testhelper.IsWALEnabled() { if gsd.partitionManager == nil { @@ -442,6 +447,7 @@ func (gsd *gitalyServerDeps) createDependencies(tb testing.TB, cfg config.Cfg) * BackupLocator: gsd.backupLocator, BundleURISink: gsd.bundleURISink, ProcReceiveRegistry: gsd.procReceiveRegistry, + InProgressTracker: gsd.inProgressTracker, } } @@ -571,6 +577,14 @@ func WithBundleURISink(sink *bundleuri.Sink) GitalyServerOpt { } } +// WithInProgressTracker sets the bundleuri.Sink that will be used for Gitaly services +func WithInProgressTracker(tracker *service.InProgressTracker) GitalyServerOpt { + return func(deps gitalyServerDeps) gitalyServerDeps { + deps.inProgressTracker = tracker + return deps + } +} + // WithSigningKey sets the signing key path that will be used for Gitaly // services. func WithSigningKey(signingKey string) GitalyServerOpt { -- GitLab From f3fb12f831e33ef97b2221e37e627dd096f85fdb Mon Sep 17 00:00:00 2001 From: John Cai Date: Wed, 17 Jul 2024 20:48:26 -0400 Subject: [PATCH 13/17] smarthttp: Generate bundle in background clone, check if 5 or more clones are already happening for this repository. If so, and a bundle does not exist, then we want to generate one because this is likely a busy repository. --- internal/cli/gitaly/serve.go | 13 +- .../featureflag/ff_autogenerate_bundles.go | 9 + internal/gitaly/config/config.go | 2 + internal/gitaly/service/dependencies.go | 6 + internal/gitaly/service/smarthttp/server.go | 16 +- .../gitaly/service/smarthttp/upload_pack.go | 80 +++++- .../service/smarthttp/upload_pack_test.go | 232 ++++++++++++++++++ 7 files changed, 352 insertions(+), 6 deletions(-) create mode 100644 internal/featureflag/ff_autogenerate_bundles.go diff --git a/internal/cli/gitaly/serve.go b/internal/cli/gitaly/serve.go index 6e4ccad05a..3a7520f0b5 100644 --- a/internal/cli/gitaly/serve.go +++ b/internal/cli/gitaly/serve.go @@ -498,7 +498,17 @@ func run(appCtx *cli.Context, cfg config.Cfg, logger log.Logger) error { var bundleURISink *bundleuri.Sink if cfg.BundleURI.GoCloudURL != "" { - bundleURISink, err = bundleuri.NewSink(ctx, cfg.BundleURI.GoCloudURL) + bundleURISink, err = bundleuri.NewSink( + ctx, + cfg.BundleURI.GoCloudURL, + bundleuri.WithBundleGenerationNotifier(func(bundlePath string, err error) { + if err != nil { + logger.WithField("bundle_path", bundlePath). + WithError(err). + Warn("bundle generation failed") + } + }), + ) if err != nil { return fmt.Errorf("create bundle-URI sink: %w", err) } @@ -547,6 +557,7 @@ func run(appCtx *cli.Context, cfg config.Cfg, logger log.Logger) error { BackupSink: backupSink, BackupLocator: backupLocator, BundleURISink: bundleURISink, + InProgressTracker: service.NewInProgressTracker(), }) b.RegisterStarter(starter.New(c, srv, logger)) } diff --git a/internal/featureflag/ff_autogenerate_bundles.go b/internal/featureflag/ff_autogenerate_bundles.go new file mode 100644 index 0000000000..61692778f3 --- /dev/null +++ b/internal/featureflag/ff_autogenerate_bundles.go @@ -0,0 +1,9 @@ +package featureflag + +// AutogenerateBundlesForBundleURI enables the use of git's bundle URI feature +var AutogenerateBundlesForBundleURI = NewFeatureFlag( + "autogenerate_bundles_for_bundleuri", + "v17.3.0", + "https://gitlab.com/gitlab-org/gitaly/-/issues/6204", + false, +) diff --git a/internal/gitaly/config/config.go b/internal/gitaly/config/config.go index ee7db84d80..765bf9b04e 100644 --- a/internal/gitaly/config/config.go +++ b/internal/gitaly/config/config.go @@ -623,6 +623,8 @@ type BundleURIConfig struct { // GoCloudURL is the blob storage GoCloud URL that will be used to store // Git bundles for Bundle-URI use. GoCloudURL string `toml:"go_cloud_url,omitempty" json:"go_cloud_url,omitempty"` + // Autogeneration controls whether or not bundles for bundle uris are auto generated + Autogeneration bool `toml:"autogeneration,omitempty" json:"autogeneration"` } // Validate runs validation on all fields and returns any errors found. diff --git a/internal/gitaly/service/dependencies.go b/internal/gitaly/service/dependencies.go index 4fc6f952f9..73f45e525b 100644 --- a/internal/gitaly/service/dependencies.go +++ b/internal/gitaly/service/dependencies.go @@ -48,6 +48,7 @@ type Dependencies struct { BackupLocator backup.Locator BundleURISink *bundleuri.Sink ProcReceiveRegistry *gitalyhook.ProcReceiveRegistry + InProgressTracker *InProgressTracker } // GetLogger returns the logger. @@ -164,3 +165,8 @@ func (dc *Dependencies) GetBundleURISink() *bundleuri.Sink { func (dc *Dependencies) GetProcReceiveRegistry() *gitalyhook.ProcReceiveRegistry { return dc.ProcReceiveRegistry } + +// GetInProgressTracker returns the ProcReceiveRegistry. +func (dc *Dependencies) GetInProgressTracker() *InProgressTracker { + return dc.InProgressTracker +} diff --git a/internal/gitaly/service/smarthttp/server.go b/internal/gitaly/service/smarthttp/server.go index 528deeb82e..ffd455aa27 100644 --- a/internal/gitaly/service/smarthttp/server.go +++ b/internal/gitaly/service/smarthttp/server.go @@ -34,6 +34,10 @@ type server struct { backupLocator backup.Locator backupSink backup.Sink bundleURISink *bundleuri.Sink + inflightTracker *service.InProgressTracker + generateBundles bool + partitionMgr *storagemgr.PartitionManager + transactionRegistry *storagemgr.TransactionRegistry } // NewServer creates a new instance of a grpc SmartHTTPServer @@ -52,10 +56,14 @@ func NewServer(deps *service.Dependencies, serverOpts ...ServerOpt) gitalypb.Sma prometheus.CounterOpts{}, []string{"git_negotiation_feature"}, ), - infoRefCache: newInfoRefCache(deps.GetLogger(), deps.GetDiskCache()), - backupLocator: deps.GetBackupLocator(), - backupSink: deps.GetBackupSink(), - bundleURISink: deps.GetBundleURISink(), + infoRefCache: newInfoRefCache(deps.GetLogger(), deps.GetDiskCache()), + backupLocator: deps.GetBackupLocator(), + backupSink: deps.GetBackupSink(), + bundleURISink: deps.GetBundleURISink(), + inflightTracker: deps.GetInProgressTracker(), + generateBundles: deps.GetCfg().BundleURI.Autogeneration, + partitionMgr: deps.GetPartitionManager(), + transactionRegistry: deps.GetTransactionRegistry(), } for _, serverOpt := range serverOpts { diff --git a/internal/gitaly/service/smarthttp/upload_pack.go b/internal/gitaly/service/smarthttp/upload_pack.go index 05b5dedb7b..ee1173fb47 100644 --- a/internal/gitaly/service/smarthttp/upload_pack.go +++ b/internal/gitaly/service/smarthttp/upload_pack.go @@ -1,22 +1,35 @@ package smarthttp import ( + "bytes" "context" "crypto/sha1" "errors" "fmt" "io" + "time" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus" "gitlab.com/gitlab-org/gitaly/v16/internal/bundleuri" "gitlab.com/gitlab-org/gitaly/v16/internal/command" + "gitlab.com/gitlab-org/gitaly/v16/internal/featureflag" "gitlab.com/gitlab-org/gitaly/v16/internal/git" + "gitlab.com/gitlab-org/gitaly/v16/internal/git/localrepo" "gitlab.com/gitlab-org/gitaly/v16/internal/git/stats" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/storagectx" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/storagemgr" "gitlab.com/gitlab-org/gitaly/v16/internal/grpc/sidechannel" "gitlab.com/gitlab-org/gitaly/v16/internal/log" "gitlab.com/gitlab-org/gitaly/v16/internal/structerr" "gitlab.com/gitlab-org/gitaly/v16/proto/go/gitalypb" ) +const ( + concurrentUploadPackThreshold = 5 + bundleGenerationTimeout = 24 * time.Hour +) + func (s *server) PostUploadPackWithSidechannel(ctx context.Context, req *gitalypb.PostUploadPackWithSidechannelRequest) (*gitalypb.PostUploadPackWithSidechannelResponse, error) { repoPath, gitConfig, err := s.validateUploadPackRequest(ctx, req) if err != nil { @@ -117,21 +130,85 @@ func (s *server) runUploadPack(ctx context.Context, req *gitalypb.PostUploadPack gitConfig = append(gitConfig, bundleuri.CapabilitiesGitConfig(ctx)...) + txID := storage.ExtractTransactionID(ctx) + + var originalRepo *gitalypb.Repository + + if txID != 0 { + currentTx, err := s.transactionRegistry.Get(txID) + if err != nil { + return nil, structerr.NewInternal("error getting transaction: %w", err) + } + originalRepo = currentTx.OriginalRepository(req.GetRepository()) + } else { + originalRepo = req.GetRepository() + } + + key := originalRepo.GetGlRepository() + uploadPackConfig, err := bundleuri.UploadPackGitConfig(ctx, s.bundleURISink, req.GetRepository()) if err != nil { - log.AddFields(ctx, log.Fields{"bundle_uri_error": err}) + if errors.Is(err, bundleuri.ErrBundleNotFound) && + featureflag.AutogenerateBundlesForBundleURI.IsEnabled(ctx) && + s.generateBundles && + s.inflightTracker.GetInProgress(key) > concurrentUploadPackThreshold { + + go func() { + ctx, cancel := context.WithTimeout(context.Background(), bundleGenerationTimeout) + defer cancel() + + tx, err := s.partitionMgr.Begin( + ctx, + originalRepo.GetStorageName(), + originalRepo.GetRelativePath(), + 0, + storagemgr.TransactionOptions{ + ReadOnly: true, + }, + ) + if err != nil { + ctxlogrus.Extract(ctx).WithError(err).Error("failed starting transaction") + } + + ctx = storagectx.ContextWithTransaction(ctx, tx) + + if err := s.bundleURISink.GenerateOneAtATime(ctx, localrepo.New( + s.logger, + s.locator, + s.gitCmdFactory, + s.catfileCache, + originalRepo)); err != nil { + ctxlogrus.Extract(ctx).WithError(err).Error("generate bundle") + if err := tx.Rollback(); err != nil && !errors.Is(err, storagemgr.ErrTransactionAlreadyCommitted) { + ctxlogrus.Extract(ctx).WithError(err).Error("failed rolling back transaction") + } + } + + if err := tx.Commit(ctx); err != nil && !errors.Is(err, storagemgr.ErrTransactionAlreadyCommitted) { + ctxlogrus.Extract(ctx).WithError(err).Error("committing transaction") + } + }() + } else if !errors.Is(err, bundleuri.ErrSinkMissing) { + log.AddFields(ctx, log.Fields{"bundle_uri_error": err}) + } } else { gitConfig = append(gitConfig, uploadPackConfig...) } + var stderr bytes.Buffer + commandOpts := []git.CmdOpt{ git.WithStdin(stdin), + git.WithStderr(&stderr), git.WithSetupStdout(), git.WithGitProtocol(s.logger, req), git.WithConfig(gitConfig...), git.WithPackObjectsHookEnv(req.GetRepository(), "http"), } + s.inflightTracker.IncrementInProgress(key) + defer s.inflightTracker.DecrementInProgress(key) + cmd, err := s.gitCmdFactory.New(ctx, req.GetRepository(), git.Command{ Name: "upload-pack", Flags: []git.Option{git.Flag{Name: "--stateless-rpc"}}, @@ -160,5 +237,6 @@ func (s *server) runUploadPack(ctx context.Context, req *gitalypb.PostUploadPack } s.logger.WithField("request_sha", fmt.Sprintf("%x", h.Sum(nil))).WithField("response_bytes", respBytes).InfoContext(ctx, "request details") + return nil, nil } diff --git a/internal/gitaly/service/smarthttp/upload_pack_test.go b/internal/gitaly/service/smarthttp/upload_pack_test.go index 944268aee5..8c91384c5f 100644 --- a/internal/gitaly/service/smarthttp/upload_pack_test.go +++ b/internal/gitaly/service/smarthttp/upload_pack_test.go @@ -21,12 +21,17 @@ import ( "gitlab.com/gitlab-org/gitaly/v16/internal/bundleuri" "gitlab.com/gitlab-org/gitaly/v16/internal/featureflag" "gitlab.com/gitlab-org/gitaly/v16/internal/git" + "gitlab.com/gitlab-org/gitaly/v16/internal/git/catfile" "gitlab.com/gitlab-org/gitaly/v16/internal/git/gittest" "gitlab.com/gitlab-org/gitaly/v16/internal/git/localrepo" "gitlab.com/gitlab-org/gitaly/v16/internal/git/pktline" "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/config" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/service" "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/keyvalue" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/storagemgr" "gitlab.com/gitlab-org/gitaly/v16/internal/grpc/sidechannel" + "gitlab.com/gitlab-org/gitaly/v16/internal/helper" "gitlab.com/gitlab-org/gitaly/v16/internal/structerr" "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper" "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper/testcfg" @@ -380,6 +385,7 @@ func TestServer_PostUploadPackWithBundleURI(t *testing.T) { ctx := testhelper.Context(t) ctx = featureflag.ContextWithFeatureFlag(ctx, featureflag.BundleURI, true) + ctx = featureflag.ContextWithFeatureFlag(ctx, featureflag.AutogenerateBundlesForBundleURI, false) tempDir := testhelper.TempDir(t) keyFile, err := os.Create(filepath.Join(tempDir, "secret.key")) @@ -500,6 +506,232 @@ func TestServer_PostUploadPackWithBundleURI(t *testing.T) { } } +func TestServer_PostUploadPackAutogenerateBundles(t *testing.T) { + t.Parallel() + + ctx := testhelper.Context(t) + ctx = featureflag.ContextWithFeatureFlag(ctx, featureflag.AutogenerateBundlesForBundleURI, true) + ctx = featureflag.ContextWithFeatureFlag(ctx, featureflag.BundleURI, true) + + tempDir := testhelper.TempDir(t) + keyFile, err := os.Create(filepath.Join(tempDir, "secret.key")) + require.NoError(t, err) + _, err = keyFile.WriteString("super-secret-key") + require.NoError(t, err) + require.NoError(t, keyFile.Close()) + + testCases := []struct { + desc string + sinkDir string + setup func( + t *testing.T, + ctx context.Context, + cfg config.Cfg, + sink *bundleuri.Sink, + tracker *service.InProgressTracker, + repoProto *gitalypb.Repository, + repoPath string, + ) + expectBundleGenerated bool + verifyBundle func(*testing.T, config.Cfg, string, git.ObjectID) + }{ + { + desc: "autogeneration successful", + setup: func( + t *testing.T, + ctx context.Context, + cfg config.Cfg, + sink *bundleuri.Sink, + tracker *service.InProgressTracker, + repoProto *gitalypb.Repository, + repoPath string, + ) { + gittest.WriteCommit(t, cfg, repoPath, + gittest.WithTreeEntries(gittest.TreeEntry{Mode: "100644", Path: "README", Content: "much"}), + gittest.WithBranch("main")) + + key := repoProto.GetGlRepository() + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + }, + expectBundleGenerated: true, + verifyBundle: func(t *testing.T, cfg config.Cfg, bundlePath string, commit git.ObjectID) { + tempDir := t.TempDir() + objectFormat := gittest.DefaultObjectHash.Format + gittest.Exec(t, cfg, "init", "--object-format="+objectFormat, tempDir) + gittest.Exec(t, cfg, "-C", tempDir, "bundle", "unbundle", bundlePath) + // A new bundle is expected to be created containing the new commit + gittest.RequireObjectExists(t, cfg, tempDir, commit) + }, + }, + { + desc: "bundle already exists", + setup: func( + t *testing.T, + ctx context.Context, + cfg config.Cfg, + sink *bundleuri.Sink, + tracker *service.InProgressTracker, + repoProto *gitalypb.Repository, + repoPath string, + ) { + gittest.WriteCommit(t, cfg, repoPath, + gittest.WithTreeEntries(gittest.TreeEntry{Mode: "100644", Path: "README", Content: "much"}), + gittest.WithBranch("main")) + key := repoProto.GetGlRepository() + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + + repo := localrepo.NewTestRepo(t, cfg, repoProto) + require.NoError(t, sink.Generate(ctx, repo)) + }, + expectBundleGenerated: false, + verifyBundle: func(t *testing.T, cfg config.Cfg, bundlePath string, commit git.ObjectID) { + tempDir := t.TempDir() + objectFormat := gittest.DefaultObjectHash.Format + gittest.Exec(t, cfg, "init", "--object-format="+objectFormat, tempDir) + gittest.Exec(t, cfg, "-C", tempDir, "bundle", "unbundle", bundlePath) + // No new bundle is expected to be created since one already existed. + gittest.RequireObjectNotExists(t, cfg, tempDir, commit) + }, + }, + { + desc: "no concurrent upload packs in flight", + setup: func( + t *testing.T, + ctx context.Context, + cfg config.Cfg, + sink *bundleuri.Sink, + tracker *service.InProgressTracker, + repoProto *gitalypb.Repository, + repoPath string, + ) { + gittest.WriteCommit(t, cfg, repoPath, + gittest.WithTreeEntries(gittest.TreeEntry{Mode: "100644", Path: "README", Content: "much"}), + gittest.WithBranch("main")) + }, + expectBundleGenerated: false, + verifyBundle: func(t *testing.T, cfg config.Cfg, bundlePath string, commit git.ObjectID) { + tempDir := t.TempDir() + gittest.Exec(t, cfg, "init", tempDir) + gittest.Exec(t, cfg, "-C", tempDir, "bundle", "unbundle", bundlePath) + // No new bundle is expected to have been created because there are no + // inflight upload pack calls. + gittest.RequireObjectNotExists(t, cfg, tempDir, commit) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + doneChan := make(chan struct{}) + errChan := make(chan error) + + var bundlePath string + + bundleGeneratedNotifier := func(path string, err error) { + bundlePath = path + + close(doneChan) + errChan <- err + } + + tracker := service.NewInProgressTracker() + + sinkDir := t.TempDir() + sink, err := bundleuri.NewSink(ctx, "file://"+sinkDir, bundleuri.WithBundleGenerationNotifier(bundleGeneratedNotifier)) + require.NoError(t, err) + + cfg := testcfg.Build(t) + logger := testhelper.NewLogger(t) + + cfg.BundleURI.Autogeneration = true + + gitCmdFactory := gittest.NewCommandFactory(t, cfg) + locator := config.NewLocator(cfg) + catfileCache := catfile.NewCache(cfg) + t.Cleanup(catfileCache.Stop) + + dbMgr, err := keyvalue.NewDBManager( + cfg.Storages, + keyvalue.NewBadgerStore, + helper.NewNullTickerFactory(), + logger, + ) + require.NoError(t, err) + t.Cleanup(dbMgr.Close) + + partitionManager, err := storagemgr.NewPartitionManager( + ctx, + cfg.Storages, + gitCmdFactory, + localrepo.NewFactory(logger, locator, gitCmdFactory, catfileCache), + logger, + dbMgr, + cfg.Prometheus, + nil, + ) + require.NoError(t, err) + t.Cleanup(partitionManager.Close) + + server := startSmartHTTPServerWithOptions(t, cfg, nil, []testserver.GitalyServerOpt{ + testserver.WithBundleURISink(sink), + testserver.WithLogger(logger), + testserver.WithInProgressTracker(tracker), + testserver.WithTransactionRegistry(storagemgr.NewTransactionRegistry()), + testserver.WithPartitionManager(partitionManager), + testserver.WithGitCommandFactory(gitCmdFactory), + }) + + cfg.SocketPath = server.Address() + + repoProto, repoPath := gittest.CreateRepository(t, ctx, cfg) + oldCommit := gittest.WriteCommit(t, cfg, repoPath) + newCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch("master"), gittest.WithParents(oldCommit)) + + if tc.setup != nil { + tc.setup(t, ctx, cfg, sink, tracker, repoProto, repoPath) + } + + commitInUpdatedBundle := gittest.WriteCommit(t, cfg, repoPath, + gittest.WithTreeEntries(gittest.TreeEntry{Mode: "100644", Path: "CHANGELOG", Content: "nothing changed"}), + gittest.WithBranch("main")) + + // UploadPack request is a "want" packet line followed by a packet flush, then many "have" packets followed by a packet flush. + // This is explained a bit in https://git-scm.com/book/en/v2/Git-Internals-Transfer-Protocols#_downloading_data + requestBuffer := &bytes.Buffer{} + gittest.WritePktlineString(t, requestBuffer, fmt.Sprintf("want %s %s\n", newCommit, clientCapabilities)) + gittest.WritePktlineFlush(t, requestBuffer) + gittest.WritePktlineString(t, requestBuffer, fmt.Sprintf("have %s\n", oldCommit)) + gittest.WritePktlineFlush(t, requestBuffer) + + req := &gitalypb.PostUploadPackWithSidechannelRequest{Repository: repoProto} + responseBuffer, err := makePostUploadPackWithSidechannelRequest(t, ctx, cfg.SocketPath, cfg.Auth.Token, req, requestBuffer) + require.NoError(t, err) + + pack, _, _ := extractPackDataFromResponse(t, responseBuffer) + require.NotEmpty(t, pack, "Expected to find a pack file in response, found none") + + if tc.expectBundleGenerated { + <-doneChan + err := <-errChan + require.NoError(t, err) + tc.verifyBundle(t, cfg, filepath.Join(sinkDir, bundlePath), commitInUpdatedBundle) + } else { + require.Empty(t, bundlePath) + } + }) + } +} + func testServerPostUploadPackWithSideChannelValidation(t *testing.T, ctx context.Context, makeRequest requestMaker, opts ...testcfg.Option) { cfg := testcfg.Build(t, opts...) serverSocketPath := runSmartHTTPServer(t, cfg) -- GitLab From eab2bc9ad584cd07fd17106edd08d5a7b991ecb5 Mon Sep 17 00:00:00 2001 From: Sami Hiltunen Date: Tue, 16 Jul 2024 09:49:16 +0300 Subject: [PATCH 14/17] Fix racy write in ServerInfo ServerInfo is checking whether the storage is readable and writable. The check is performed by statting the storage directory and writing a test file in it. This has a number of issues: 1. If Gitaly doesn't exit cleanly, the test file is left stale in the storage. 2. Multiple invocations of the RPC will do the same write on the same file in a racy manner. While this doesn't itself cause issues, it's a bit surprising. 3. Testing for writes by writing a single file into the storage is not really sufficient. There could be issues writing in subdirectoreis of the storage as well. 4. The readability check done just stats the storage directory. It doesn't also ensure anything about the subdirectories. More over, statting a directory is possible without having read permissions on it, so the check is not really sufficient to ensure the storage can be read. 5. The check is ignoring errors coming from stat that could actually be useful. All in all, the read/write check doesn't really make sense to have in the first place. It doesn't sufficiently detect issues, and Gitaly should refuse to boot anyway if it can't access or write in the storage. For now, remove the racy write in the storage by replacing the check with whether or not we have the write permission on the file. We should later look into removing the unnecessary functionality in the RPC. --- internal/gitaly/service/server/info.go | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/internal/gitaly/service/server/info.go b/internal/gitaly/service/server/info.go index 8be3a77254..1e330240ff 100644 --- a/internal/gitaly/service/server/info.go +++ b/internal/gitaly/service/server/info.go @@ -3,11 +3,10 @@ package server import ( "context" "os" - "path/filepath" "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/mode/permission" "gitlab.com/gitlab-org/gitaly/v16/internal/helper/fstype" - "gitlab.com/gitlab-org/gitaly/v16/internal/helper/perm" "gitlab.com/gitlab-org/gitaly/v16/internal/structerr" "gitlab.com/gitlab-org/gitaly/v16/internal/version" "gitlab.com/gitlab-org/gitaly/v16/proto/go/gitalypb" @@ -46,19 +45,14 @@ func (s *server) ServerInfo(ctx context.Context, in *gitalypb.ServerInfoRequest) }, nil } -func shardCheck(shardPath string) (readable bool, writeable bool) { - if _, err := os.Stat(shardPath); err == nil { - readable = true +func shardCheck(shardPath string) (bool, bool) { + info, err := os.Stat(shardPath) + if err != nil { + return false, false } - // the path uses a `+` to avoid naming collisions - testPath := filepath.Join(shardPath, "+testWrite") - - content := []byte("testWrite") - if err := os.WriteFile(testPath, content, perm.SharedFile); err == nil { - writeable = true - } - _ = os.Remove(testPath) + readable := info.Mode()&(permission.OwnerRead|permission.OwnerExecute) == permission.OwnerRead|permission.OwnerExecute + writable := info.Mode()&permission.OwnerWrite == permission.OwnerWrite - return + return readable, writable } -- GitLab From 870e5cedac7152e2f4d86d40efbec5deae932f13 Mon Sep 17 00:00:00 2001 From: Sami Hiltunen Date: Thu, 11 Jul 2024 13:00:20 +0300 Subject: [PATCH 15/17] Remove most production usage of SharedFile Gitaly is using SharedFile in many places where PrivateWriteOnceFile would suffice. In production code, we're using it in a few locations: - streamcache for the cached file. They're never written to after creation, so no need to grant Gitaly the write bit. This is an internal cache, so no need to grant others permission to read the files. - ServerInfo RPC uses it to write out a file test whether Gitaly has permissions to write into the storage and removes it afterwards. No need for anyone to read the file nor for Gitaly to write into it again. - ReplicateRepository RPC uses it for git config. The config file should not be written into after creating it as this would break snapshot isolation with transactions. A new file should be created and the old file replaced with it. Gitaly's storage should not be read by others than Gitaly. The repositories are not guaranteed to be in consistent state if there is a WAL entry application in progress. In addition, SharedFile is used for log files that are readable by other than the Gitaly's user. This use case is left in place for now until we soon have an alteranative available which we'll replace the permission with. Replace all the usage of SharedFile in production code with PrivateWriteOnceFile, other than the usage with log files. --- internal/gitaly/service/repository/replicate.go | 2 +- internal/streamcache/filestore.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/gitaly/service/repository/replicate.go b/internal/gitaly/service/repository/replicate.go index 64da0ac8db..f810f780a6 100644 --- a/internal/gitaly/service/repository/replicate.go +++ b/internal/gitaly/service/repository/replicate.go @@ -352,7 +352,7 @@ func (s *server) syncGitconfig(ctx context.Context, source, target *gitalypb.Rep } configPath := filepath.Join(repoPath, "config") - if err := s.writeFile(ctx, configPath, perm.SharedFile, streamio.NewReader(func() ([]byte, error) { + if err := s.writeFile(ctx, configPath, perm.PrivateWriteOnceFile, streamio.NewReader(func() ([]byte, error) { resp, err := stream.Recv() return resp.GetData(), err })); err != nil { diff --git a/internal/streamcache/filestore.go b/internal/streamcache/filestore.go index 4d01e377e6..8ce3fb62b8 100644 --- a/internal/streamcache/filestore.go +++ b/internal/streamcache/filestore.go @@ -110,7 +110,7 @@ func (fs *filestore) Create() (namedWriteCloser, error) { return nil, fmt.Errorf("Create: mkdir: %w", err) } - f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm.SharedFile) + f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm.PrivateWriteOnceFile) if err != nil { return nil, fmt.Errorf("Create: %w", err) } -- GitLab From 10bf94b427226aa8ea055b0c4fca80ee411027d3 Mon Sep 17 00:00:00 2001 From: Sami Hiltunen Date: Thu, 11 Jul 2024 13:03:20 +0300 Subject: [PATCH 16/17] Remove test usage of SharedFile permission SharedFile permission is mostly used in tests. Remove usage of the permission in tests and replace it with the general permission set. --- cmd/gitaly-wrapper/main_test.go | 18 ++++---- internal/backup/locator_test.go | 22 +++++----- internal/backup/sink_test.go | 2 +- internal/bundleuri/sink_test.go | 2 +- internal/cache/walker_test.go | 2 +- internal/cgroups/mock_linux_test.go | 11 ++--- internal/git/conflict/parser_test.go | 2 +- internal/git/gittest/http_server.go | 2 +- .../git/housekeeping/clean_stale_data_test.go | 4 +- .../manager/optimize_repository_test.go | 14 +++---- internal/git/localrepo/refs_external_test.go | 2 +- internal/git/localrepo/snapshot_test.go | 10 ++--- internal/git/objectpool/disconnect_test.go | 8 ++-- internal/git/objectpool/link_test.go | 8 ++-- internal/git/objectpool/pool_test.go | 2 +- internal/git/remoterepo/repository_test.go | 2 +- internal/git/stats/repository_info_test.go | 42 +++++++++---------- internal/gitaly/config/config_test.go | 8 ++-- .../gitaly/linguist/language_stats_test.go | 2 +- internal/gitaly/linguist/linguist_test.go | 2 +- internal/gitaly/repoutil/custom_hooks_test.go | 4 +- internal/gitaly/repoutil/remove_test.go | 2 +- .../service/objectpool/alternates_test.go | 2 +- .../gitaly/service/objectpool/create_test.go | 2 +- .../gitaly/service/objectpool/get_test.go | 2 +- .../gitaly/service/objectpool/link_test.go | 2 +- .../service/repository/create_fork_test.go | 4 +- .../create_repository_from_url_test.go | 2 +- .../service/repository/fetch_remote_test.go | 2 +- .../gitaly/service/repository/fsck_test.go | 2 +- .../repository/info_attributes_test.go | 2 +- .../service/repository/object_format_test.go | 2 +- .../gitaly/service/repository/remove_test.go | 2 +- .../service/repository/replicate_test.go | 2 +- .../service/repository/snapshot_test.go | 6 +-- .../gitaly/service/smarthttp/inforefs_test.go | 2 +- .../gitaly/service/ssh/receive_pack_test.go | 2 +- .../gitaly/service/ssh/upload_pack_test.go | 2 +- .../storagemgr/apply_operations_test.go | 2 +- internal/gitaly/storage/wal/entry_test.go | 12 +++--- internal/gitlab/test_server.go | 2 +- internal/safe/locking_directory_test.go | 2 +- internal/safe/locking_file_writer_test.go | 21 +++++----- internal/streamcache/cache_test.go | 6 +-- internal/streamcache/filestore_test.go | 2 +- internal/tempdir/clean_test.go | 2 +- internal/tempdir/tempdir_test.go | 2 +- internal/testhelper/testcfg/gitaly.go | 2 +- 48 files changed, 132 insertions(+), 130 deletions(-) diff --git a/cmd/gitaly-wrapper/main_test.go b/cmd/gitaly-wrapper/main_test.go index ff113e0da7..dab9dd91e5 100644 --- a/cmd/gitaly-wrapper/main_test.go +++ b/cmd/gitaly-wrapper/main_test.go @@ -68,7 +68,7 @@ func TestFindProcess(t *testing.T) { t.Parallel() path := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(path, []byte("garbage"), perm.SharedFile)) + require.NoError(t, os.WriteFile(path, []byte("garbage"), perm.PrivateWriteOnceFile)) _, err := findProcess(path) _, expectedErr := strconv.Atoi("garbage") @@ -81,7 +81,7 @@ func TestFindProcess(t *testing.T) { // The below PID can exist, but chances are sufficiently low to hopefully not matter // in practice. path := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(path, []byte("7777777"), perm.SharedFile)) + require.NoError(t, os.WriteFile(path, []byte("7777777"), perm.PrivateWriteOnceFile)) // The process isn't alive, so we expect neither an error nor a process to be // returned. @@ -116,7 +116,7 @@ func TestFindProcess(t *testing.T) { require.NoError(t, err) path := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(path, []byte(strconv.FormatInt(int64(cmd.Process.Pid), 10)), perm.SharedFile)) + require.NoError(t, os.WriteFile(path, []byte(strconv.FormatInt(int64(cmd.Process.Pid), 10)), perm.PrivateWriteOnceFile)) process, err := findProcess(path) require.NotNil(t, process) @@ -174,7 +174,7 @@ func TestReadPIDFile(t *testing.T) { t.Parallel() path := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(path, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(path, nil, perm.PrivateWriteOnceFile)) _, err := readPIDFile(path) _, expectedErr := strconv.Atoi("") require.Equal(t, expectedErr, err) @@ -184,7 +184,7 @@ func TestReadPIDFile(t *testing.T) { t.Parallel() path := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(path, []byte("invalid"), perm.SharedFile)) + require.NoError(t, os.WriteFile(path, []byte("invalid"), perm.PrivateWriteOnceFile)) _, err := readPIDFile(path) _, expectedErr := strconv.Atoi("invalid") require.Equal(t, expectedErr, err) @@ -194,7 +194,7 @@ func TestReadPIDFile(t *testing.T) { t.Parallel() path := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(path, []byte("12345"), perm.SharedFile)) + require.NoError(t, os.WriteFile(path, []byte("12345"), perm.PrivateWriteOnceFile)) pid, err := readPIDFile(path) require.NoError(t, err) require.Equal(t, 12345, pid) @@ -347,7 +347,7 @@ func TestRun(t *testing.T) { // Write the PID of the running process into the PID file. As a result, it should // get adopted by gitaly-wrapper, which means it wouldn't try to execute it anew. pidPath := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(pidPath, []byte(strconv.FormatInt(int64(scriptCmd.Process.Pid), 10)), perm.SharedFile)) + require.NoError(t, os.WriteFile(pidPath, []byte(strconv.FormatInt(int64(scriptCmd.Process.Pid), 10)), perm.PrivateWriteOnceFile)) // Run gitaly-script with a binary path whose basename matches, but which ultimately // doesn't exist. This proves that it doesn't try to execute the script again. @@ -411,7 +411,7 @@ func TestRun(t *testing.T) { `)) pidPath := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(pidPath, []byte("12345"), perm.SharedFile)) + require.NoError(t, os.WriteFile(pidPath, []byte("12345"), perm.PrivateWriteOnceFile)) cmd := exec.CommandContext(ctx, binary, script) cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", bootstrap.EnvPidFile, pidPath)) @@ -442,7 +442,7 @@ func TestRun(t *testing.T) { require.NoError(t, err) pidPath := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(pidPath, []byte(strconv.FormatInt(int64(scriptCmd.Process.Pid), 10)), perm.SharedFile)) + require.NoError(t, os.WriteFile(pidPath, []byte(strconv.FormatInt(int64(scriptCmd.Process.Pid), 10)), perm.PrivateWriteOnceFile)) cmd := exec.CommandContext(ctx, binary, script) cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", bootstrap.EnvPidFile, pidPath)) diff --git a/internal/backup/locator_test.go b/internal/backup/locator_test.go index 711bae2197..05d625aaf6 100644 --- a/internal/backup/locator_test.go +++ b/internal/backup/locator_test.go @@ -148,8 +148,8 @@ func TestPointerLocator(t *testing.T) { expectedOffset: 1, setup: func(tb testing.TB, ctx context.Context, backupPath string) { require.NoError(t, os.MkdirAll(filepath.Join(backupPath, repo.RelativePath, "abc123"), perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte("abc123"), perm.SharedFile)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "abc123", "LATEST"), []byte("001"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte("abc123"), perm.PrivateWriteOnceFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "abc123", "LATEST"), []byte("001"), perm.PrivateWriteOnceFile)) }, }, } { @@ -220,8 +220,8 @@ func TestPointerLocator(t *testing.T) { require.ErrorIs(t, err, ErrDoesntExist) require.NoError(t, os.MkdirAll(filepath.Join(backupPath, repo.RelativePath, backupID), perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte(backupID), perm.SharedFile)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("003"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte(backupID), perm.PrivateWriteOnceFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("003"), perm.PrivateWriteOnceFile)) expected := &Backup{ ID: backupID, Repository: repo, @@ -281,8 +281,8 @@ func TestPointerLocator(t *testing.T) { require.Equal(t, expectedFallback, fallbackFull) require.NoError(t, os.MkdirAll(filepath.Join(backupPath, repo.RelativePath, backupID), perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte(backupID), perm.SharedFile)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("001"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte(backupID), perm.PrivateWriteOnceFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("001"), perm.PrivateWriteOnceFile)) expected := &Backup{ ID: backupID, Repository: repo, @@ -315,7 +315,7 @@ func TestPointerLocator(t *testing.T) { require.ErrorIs(t, err, ErrDoesntExist) require.NoError(t, os.MkdirAll(filepath.Join(backupPath, repo.RelativePath), perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte("invalid"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte("invalid"), perm.PrivateWriteOnceFile)) _, err = l.FindLatest(ctx, repo) require.EqualError(t, err, "pointer locator: find latest: find: find latest ID: storage service sink: new reader for \"TestPointerLocator/invalid/LATEST\": doesn't exist") }) @@ -334,8 +334,8 @@ func TestPointerLocator(t *testing.T) { require.ErrorIs(t, err, ErrDoesntExist) require.NoError(t, os.MkdirAll(filepath.Join(backupPath, repo.RelativePath, backupID), perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte(backupID), perm.SharedFile)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("invalid"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte(backupID), perm.PrivateWriteOnceFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("invalid"), perm.PrivateWriteOnceFile)) _, err = l.FindLatest(ctx, repo) require.EqualError(t, err, "pointer locator: find latest: find: determine increment ID: strconv.Atoi: parsing \"invalid\": invalid syntax") @@ -370,7 +370,7 @@ func TestPointerLocator(t *testing.T) { } require.NoError(t, os.MkdirAll(filepath.Join(backupPath, repo.RelativePath, backupID), perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("003"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("003"), perm.PrivateWriteOnceFile)) expected := &Backup{ ID: backupID, Repository: repo, @@ -415,7 +415,7 @@ func TestPointerLocator(t *testing.T) { require.ErrorIs(t, err, ErrDoesntExist) require.NoError(t, os.MkdirAll(filepath.Join(backupPath, repo.RelativePath, backupID), perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("invalid"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("invalid"), perm.PrivateWriteOnceFile)) _, err = l.Find(ctx, repo, backupID) require.EqualError(t, err, "pointer locator: find: determine increment ID: strconv.Atoi: parsing \"invalid\": invalid syntax") diff --git a/internal/backup/sink_test.go b/internal/backup/sink_test.go index 1fbef67942..1e88ef8c24 100644 --- a/internal/backup/sink_test.go +++ b/internal/backup/sink_test.go @@ -53,7 +53,7 @@ func TestResolveSink(t *testing.T) { "token_uri": "https://accounts.google.com/o/oauth2/token", "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/303724477529-compute%40developer.gserviceaccount.com" -}`), perm.SharedFile)) +}`), perm.PrivateWriteOnceFile)) for _, tc := range []struct { desc string diff --git a/internal/bundleuri/sink_test.go b/internal/bundleuri/sink_test.go index 00dba139e6..e3534173bd 100644 --- a/internal/bundleuri/sink_test.go +++ b/internal/bundleuri/sink_test.go @@ -100,7 +100,7 @@ func TestSink_SignedURL(t *testing.T) { setup: func(t *testing.T, sinkDir string, sink *Sink) { path := filepath.Join(sinkDir, sink.relativePath(repo, "default")) require.NoError(t, os.MkdirAll(filepath.Dir(path), perm.PrivateDir)) - require.NoError(t, os.WriteFile(path, []byte("hello"), perm.SharedFile)) + require.NoError(t, os.WriteFile(path, []byte("hello"), perm.PrivateWriteOnceFile)) }, }, { diff --git a/internal/cache/walker_test.go b/internal/cache/walker_test.go index 6c5bc945c3..3cad56dec7 100644 --- a/internal/cache/walker_test.go +++ b/internal/cache/walker_test.go @@ -116,7 +116,7 @@ func TestCleanWalkEmptyDirs(t *testing.T) { if strings.HasSuffix(tt.path, "/") { require.NoError(t, os.MkdirAll(p, perm.PrivateDir)) } else { - require.NoError(t, os.WriteFile(p, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(p, nil, perm.PrivateWriteOnceFile)) if tt.stale { require.NoError(t, os.Chtimes(p, time.Now(), time.Now().Add(-time.Hour))) } diff --git a/internal/cgroups/mock_linux_test.go b/internal/cgroups/mock_linux_test.go index 88f0b44463..a1fde9b81c 100644 --- a/internal/cgroups/mock_linux_test.go +++ b/internal/cgroups/mock_linux_test.go @@ -22,6 +22,7 @@ package cgroups import ( "fmt" + "io/fs" "os" "path/filepath" "testing" @@ -156,7 +157,7 @@ func (m *mockCgroupV1) setupMockCgroupFiles( for filename, content := range content { controlFilePath := filepath.Join(cgroupPath, filename) - require.NoError(t, os.WriteFile(controlFilePath, []byte(content), perm.SharedFile)) + require.NoError(t, os.WriteFile(controlFilePath, []byte(content), fs.ModePerm)) } for _, shard := range shards { @@ -165,7 +166,7 @@ func (m *mockCgroupV1) setupMockCgroupFiles( for filename, content := range content { shardControlFilePath := filepath.Join(shardPath, filename) - require.NoError(t, os.WriteFile(shardControlFilePath, []byte(content), perm.SharedFile)) + require.NoError(t, os.WriteFile(shardControlFilePath, []byte(content), fs.ModePerm)) } } } @@ -244,12 +245,12 @@ func (m *mockCgroupV2) setupMockCgroupFiles( for filename, content := range content { controlFilePath := filepath.Join(m.root, manager.cfg.HierarchyRoot, filename) - require.NoError(t, os.WriteFile(controlFilePath, []byte(content), perm.SharedFile)) + require.NoError(t, os.WriteFile(controlFilePath, []byte(content), fs.ModePerm)) } for filename, content := range content { controlFilePath := filepath.Join(cgroupPath, filename) - require.NoError(t, os.WriteFile(controlFilePath, []byte(content), perm.SharedFile)) + require.NoError(t, os.WriteFile(controlFilePath, []byte(content), fs.ModePerm)) } for _, shard := range shards { @@ -258,7 +259,7 @@ func (m *mockCgroupV2) setupMockCgroupFiles( for filename, content := range content { shardControlFilePath := filepath.Join(shardPath, filename) - require.NoError(t, os.WriteFile(shardControlFilePath, []byte(content), perm.SharedFile)) + require.NoError(t, os.WriteFile(shardControlFilePath, []byte(content), fs.ModePerm)) } } } diff --git a/internal/git/conflict/parser_test.go b/internal/git/conflict/parser_test.go index 31793e400a..4d965d16ce 100644 --- a/internal/git/conflict/parser_test.go +++ b/internal/git/conflict/parser_test.go @@ -112,7 +112,7 @@ we can both agree on this line though t.Run(tt.name, func(t *testing.T) { entry := Entry{ Path: tt.path, - Mode: uint(perm.SharedFile), + Mode: uint(perm.PrivateWriteOnceFile), Contents: []byte("something-with-trailing-newline\n"), } diff --git a/internal/git/gittest/http_server.go b/internal/git/gittest/http_server.go index 64757b3026..f1407d4464 100644 --- a/internal/git/gittest/http_server.go +++ b/internal/git/gittest/http_server.go @@ -19,7 +19,7 @@ import ( // prepared such that git-http-backend(1) will serve it by creating the "git-daemon-export-ok" magic // file. func HTTPServer(tb testing.TB, ctx context.Context, gitCmdFactory git.CommandFactory, repoPath string, middleware func(http.ResponseWriter, *http.Request, http.Handler)) int { - require.NoError(tb, os.WriteFile(filepath.Join(repoPath, "git-daemon-export-ok"), nil, perm.SharedFile)) + require.NoError(tb, os.WriteFile(filepath.Join(repoPath, "git-daemon-export-ok"), nil, perm.PrivateWriteOnceFile)) listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(tb, err) diff --git a/internal/git/housekeeping/clean_stale_data_test.go b/internal/git/housekeeping/clean_stale_data_test.go index 254577ff53..c2341d7e1b 100644 --- a/internal/git/housekeeping/clean_stale_data_test.go +++ b/internal/git/housekeeping/clean_stale_data_test.go @@ -255,7 +255,7 @@ func TestPruneEmptyConfigSections(t *testing.T) { }, } { t.Run(tc.desc, func(t *testing.T) { - require.NoError(t, os.WriteFile(configPath, []byte(tc.configData), perm.SharedFile)) + require.NoError(t, os.WriteFile(configPath, []byte(tc.configData), perm.PrivateWriteOnceFile)) skippedSections, err := PruneEmptyConfigSections(ctx, repo) require.NoError(t, err) @@ -364,7 +364,7 @@ func TestRemoveGitLabFullPathConfig(t *testing.T) { }, } { t.Run(tc.desc, func(t *testing.T) { - require.NoError(t, os.WriteFile(configPath, []byte(tc.configData), perm.SharedFile)) + require.NoError(t, os.WriteFile(configPath, []byte(tc.configData), perm.PrivateWriteOnceFile)) cleanupCount, err := removeGitLabFullPathConfig(ctx, repo, &transaction.MockManager{}) require.NoError(t, err) diff --git a/internal/git/housekeeping/manager/optimize_repository_test.go b/internal/git/housekeeping/manager/optimize_repository_test.go index c25e4e126b..b5aa1a2baa 100644 --- a/internal/git/housekeeping/manager/optimize_repository_test.go +++ b/internal/git/housekeeping/manager/optimize_repository_test.go @@ -731,7 +731,7 @@ func TestOptimizeRepository(t *testing.T) { for i := 0; i < housekeeping.LooseObjectLimit+1; i++ { blobPath := filepath.Join(repoPath, "objects", "17", fmt.Sprintf("%d", i)) - require.NoError(t, os.WriteFile(blobPath, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(blobPath, nil, perm.PrivateWriteOnceFile)) require.NoError(t, os.Chtimes(blobPath, almostTwoWeeksAgo, almostTwoWeeksAgo)) } @@ -766,7 +766,7 @@ func TestOptimizeRepository(t *testing.T) { for i := 0; i < housekeeping.LooseObjectLimit+1; i++ { blobPath := filepath.Join(repoPath, "objects", "17", fmt.Sprintf("%d", i)) - require.NoError(t, os.WriteFile(blobPath, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(blobPath, nil, perm.PrivateWriteOnceFile)) require.NoError(t, os.Chtimes(blobPath, moreThanTwoWeeksAgo, moreThanTwoWeeksAgo)) } @@ -1733,7 +1733,7 @@ func TestRepositoryManager_CleanStaleData_reftable(t *testing.T) { path := filepath.Join(repoPath, "reftable", "tables.list.lock") - require.NoError(t, os.WriteFile(path, []byte{}, perm.SharedFile)) + require.NoError(t, os.WriteFile(path, []byte{}, perm.PrivateWriteOnceFile)) filetime := time.Now().Add(-tc.age) require.NoError(t, os.Chtimes(path, filetime, filetime)) @@ -1843,7 +1843,7 @@ func TestRepositoryManager_CleanStaleData_references(t *testing.T) { path := filepath.Join(repoPath, ref.name) require.NoError(t, os.MkdirAll(filepath.Dir(path), perm.PrivateDir)) - require.NoError(t, os.WriteFile(path, bytes.Repeat([]byte{0}, ref.size), perm.SharedFile)) + require.NoError(t, os.WriteFile(path, bytes.Repeat([]byte{0}, ref.size), perm.PrivateWriteOnceFile)) filetime := time.Now().Add(-ref.age) require.NoError(t, os.Chtimes(path, filetime, filetime)) } @@ -2492,7 +2492,7 @@ func TestRepositoryManager_CleanStaleData_unsetConfiguration(t *testing.T) { else = untouched [totally] unrelated = untouched -`), perm.SharedFile)) +`), perm.PrivateWriteOnceFile)) mgr := New(cfg.Prometheus, testhelper.SharedLogger(t), nil, nil) @@ -2588,7 +2588,7 @@ func TestRepositoryManager_CleanStaleData_pruneEmptyConfigSections(t *testing.T) [remote "tmp-03b5e8c765135b343214d471843a062a"] [remote "tmp-f57338181aca1d599669dbb71ce9ce57"] [remote "tmp-8c948ca94832c2725733e48cb2902287"] -`), perm.SharedFile)) +`), perm.PrivateWriteOnceFile)) mgr := New(cfg.Prometheus, testhelper.SharedLogger(t), nil, nil) @@ -2629,7 +2629,7 @@ func TestRepositoryManager_CleanStaleData_removeGitLabFullPathConfig(t *testing. [gitlab] fullpath = foo/bar other = config -`), perm.SharedFile)) +`), perm.PrivateWriteOnceFile)) mgr := New(cfg.Prometheus, testhelper.SharedLogger(t), nil, nil) diff --git a/internal/git/localrepo/refs_external_test.go b/internal/git/localrepo/refs_external_test.go index 4539a280ba..bf982a5401 100644 --- a/internal/git/localrepo/refs_external_test.go +++ b/internal/git/localrepo/refs_external_test.go @@ -179,7 +179,7 @@ func TestRepo_SetDefaultBranch_errors(t *testing.T) { require.NoError(t, updater.Prepare()) t.Cleanup(func() { require.NoError(t, updater.Close()) }) } else { - require.NoError(t, os.WriteFile(filepath.Join(repoPath, "HEAD.lock"), []byte(""), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, "HEAD.lock"), []byte(""), perm.PrivateWriteOnceFile)) } err = repo.SetDefaultBranch(ctx, &transaction.MockManager{}, "refs/heads/branch") diff --git a/internal/git/localrepo/snapshot_test.go b/internal/git/localrepo/snapshot_test.go index b7bf639a7c..dec1aba7cc 100644 --- a/internal/git/localrepo/snapshot_test.go +++ b/internal/git/localrepo/snapshot_test.go @@ -95,7 +95,7 @@ doesn't seem to test a realistic scenario.`) ) // The shallow file, used if the repository is a shallow clone, is also included in snapshots. - require.NoError(t, os.WriteFile(filepath.Join(repoPath, "shallow"), nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, "shallow"), nil, perm.PrivateWriteOnceFile)) // Custom Git hooks are not included in snapshots. require.NoError(t, os.MkdirAll(filepath.Join(repoPath, "hooks"), perm.PrivateDir)) @@ -104,7 +104,7 @@ doesn't seem to test a realistic scenario.`) require.NoError(t, os.WriteFile( filepath.Join(repoPath, "objects/this-should-not-be-included"), nil, - perm.SharedFile, + perm.PrivateWriteOnceFile, )) return setupData{ @@ -136,7 +136,7 @@ doesn't seem to test a realistic scenario.`) require.NoError(t, os.WriteFile( altFile, []byte(fmt.Sprintf("%s\n", altObjectDir)), - perm.SharedFile, + perm.PrivateWriteOnceFile, )) refs := gittest.FilesOrReftables( @@ -198,7 +198,7 @@ doesn't seem to test a realistic scenario.`) require.NoError(t, os.WriteFile( altFile, []byte(fmt.Sprintf("%s\n", altObjectDir)), - perm.SharedFile, + perm.PrivateWriteOnceFile, )) gittest.RequireObjectExists(t, cfg, repoPath, commitID) @@ -245,7 +245,7 @@ doesn't seem to test a realistic scenario.`) require.NoError(t, os.WriteFile( altFile, []byte(fmt.Sprintf("%s\n", relAltObjectDir)), - perm.SharedFile, + perm.PrivateWriteOnceFile, )) gittest.RequireObjectExists(t, cfg, repoPath, commitID) diff --git a/internal/git/objectpool/disconnect_test.go b/internal/git/objectpool/disconnect_test.go index a4eecbef36..7521404738 100644 --- a/internal/git/objectpool/disconnect_test.go +++ b/internal/git/objectpool/disconnect_test.go @@ -92,7 +92,7 @@ func TestDisconnect(t *testing.T) { altPath, err := repo.InfoAlternatesPath(ctx) require.NoError(t, err) - require.NoError(t, os.WriteFile(altPath, []byte(altContent), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte(altContent), perm.PrivateWriteOnceFile)) return repo } @@ -188,7 +188,7 @@ func TestDisconnect(t *testing.T) { altPath, err := repo.InfoAlternatesPath(ctx) require.NoError(t, err) - require.NoError(t, os.WriteFile(altPath, []byte(altObjectDir), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte(altObjectDir), perm.PrivateWriteOnceFile)) return setupData{ repository: repo, @@ -382,7 +382,7 @@ func TestRemoveAlternatesIfOk(t *testing.T) { altPath, err := repo.InfoAlternatesPath(ctx) require.NoError(t, err) altContent := testhelper.TempDir(t) + "\n" - require.NoError(t, os.WriteFile(altPath, []byte(altContent), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte(altContent), perm.PrivateWriteOnceFile)) // Intentionally break the repository so that the consistency check will cause an // error. @@ -412,7 +412,7 @@ func TestRemoveAlternatesIfOk(t *testing.T) { altPath, err := repo.InfoAlternatesPath(ctx) require.NoError(t, err) altContent := testhelper.TempDir(t) + "\n" - require.NoError(t, os.WriteFile(altPath, []byte(altContent), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte(altContent), perm.PrivateWriteOnceFile)) // In order to test the scenario where a commit is in a commit graph but not in the // object database, we will first write a new commit, write the commit graph, then diff --git a/internal/git/objectpool/link_test.go b/internal/git/objectpool/link_test.go index 7c0c25b5c2..9f78257236 100644 --- a/internal/git/objectpool/link_test.go +++ b/internal/git/objectpool/link_test.go @@ -112,7 +112,7 @@ func TestLink(t *testing.T) { // Link the repository to object pool using the absolute path of the object pool. // The alternates file should be rewritten to use the relative path. poolObjectsPath := gittest.RepositoryPath(t, ctx, pool, "objects") - require.NoError(t, os.WriteFile(altPath, []byte(poolObjectsPath), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte(poolObjectsPath), perm.PrivateWriteOnceFile)) return setupData{ cfg: cfg, @@ -130,7 +130,7 @@ func TestLink(t *testing.T) { // nothing and completes normally. altPath, err := repo.InfoAlternatesPath(ctx) require.NoError(t, err) - require.NoError(t, os.WriteFile(altPath, []byte(getRelAltPath(t, repo, pool.Repo)), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte(getRelAltPath(t, repo, pool.Repo)), perm.PrivateWriteOnceFile)) return setupData{ cfg: cfg, @@ -148,7 +148,7 @@ func TestLink(t *testing.T) { // linking operation fails. altPath, err := repo.InfoAlternatesPath(ctx) require.NoError(t, err) - require.NoError(t, os.WriteFile(altPath, []byte("../different/object/pool"), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte("../different/object/pool"), perm.PrivateWriteOnceFile)) return setupData{ cfg: cfg, @@ -195,7 +195,7 @@ func TestLink(t *testing.T) { // to the same object pool. altPath, err := repo.InfoAlternatesPath(ctx) require.NoError(t, err) - require.NoError(t, os.WriteFile(altPath, []byte(getRelAltPath(t, repo, pool.Repo)), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte(getRelAltPath(t, repo, pool.Repo)), perm.PrivateWriteOnceFile)) return setupData{ cfg: cfg, diff --git a/internal/git/objectpool/pool_test.go b/internal/git/objectpool/pool_test.go index 47fad57215..397bfd6ac5 100644 --- a/internal/git/objectpool/pool_test.go +++ b/internal/git/objectpool/pool_test.go @@ -114,7 +114,7 @@ func TestFromRepo_failures(t *testing.T) { require.NoError(t, os.MkdirAll(filepath.Join(repoPath, "objects", "info"), perm.PrivateDir)) alternateFilePath := filepath.Join(repoPath, "objects", "info", "alternates") - require.NoError(t, os.WriteFile(alternateFilePath, tc.fileContent, perm.SharedFile)) + require.NoError(t, os.WriteFile(alternateFilePath, tc.fileContent, perm.PrivateWriteOnceFile)) poolFromRepo, err := FromRepo(ctx, logger, locator, pool.gitCmdFactory, nil, nil, nil, repo) require.Equal(t, tc.expectedErr, err) require.Nil(t, poolFromRepo) diff --git a/internal/git/remoterepo/repository_test.go b/internal/git/remoterepo/repository_test.go index 513a1d1cf1..678f5405c4 100644 --- a/internal/git/remoterepo/repository_test.go +++ b/internal/git/remoterepo/repository_test.go @@ -117,7 +117,7 @@ func TestRepository_ObjectHash(t *testing.T) { "[extensions]", "objectFormat = blake2b", }, "\n"), - ), perm.SharedFile)) + ), perm.PrivateWriteOnceFile)) repo, err := remoterepo.New(ctx, repoProto, pool) require.NoError(t, err) diff --git a/internal/git/stats/repository_info_test.go b/internal/git/stats/repository_info_test.go index 910d2282fb..f9aa26878a 100644 --- a/internal/git/stats/repository_info_test.go +++ b/internal/git/stats/repository_info_test.go @@ -1029,7 +1029,7 @@ func TestReferencesInfoForRepository(t *testing.T) { // We just write some random garbage -- we don't verify contents // anyway, but just the size. And testing like that is at least // deterministic as we don't have to special-case hash sizes. - require.NoError(t, os.WriteFile(filepath.Join(repoPath, "packed-refs"), []byte("content"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, "packed-refs"), []byte("content"), perm.PrivateWriteOnceFile)) }, expectedInfo: ReferencesInfo{ ReferenceBackendName: gittest.DefaultReferenceBackend.Name, @@ -1050,7 +1050,7 @@ func TestReferencesInfoForRepository(t *testing.T) { // We just write some random garbage -- we don't verify contents // anyway, but just the size. And testing like that is at least // deterministic as we don't have to special-case hash sizes. - require.NoError(t, os.WriteFile(filepath.Join(repoPath, "packed-refs"), []byte("content"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, "packed-refs"), []byte("content"), perm.PrivateWriteOnceFile)) }, expectedInfo: ReferencesInfo{ ReferenceBackendName: gittest.DefaultReferenceBackend.Name, @@ -1102,7 +1102,7 @@ func TestCountLooseObjects(t *testing.T) { differentShard := filepath.Join(repoPath, "objects", "a0") require.NoError(t, os.MkdirAll(differentShard, perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(differentShard, "123456"), []byte("foobar"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(differentShard, "123456"), []byte("foobar"), perm.PrivateWriteOnceFile)) requireLooseObjectsInfo(t, repo, time.Now(), LooseObjectsInfo{ Count: 1, @@ -1118,7 +1118,7 @@ func TestCountLooseObjects(t *testing.T) { for i, shard := range []string{"00", "17", "32", "ff"} { shardPath := filepath.Join(repoPath, "objects", shard) require.NoError(t, os.MkdirAll(shardPath, perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(shardPath, "123456"), make([]byte, i), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(shardPath, "123456"), make([]byte, i), perm.PrivateWriteOnceFile)) } requireLooseObjectsInfo(t, repo, time.Now(), LooseObjectsInfo{ @@ -1173,8 +1173,8 @@ func TestCountLooseObjects(t *testing.T) { shard := filepath.Join(repoPath, "objects", "17") require.NoError(t, os.MkdirAll(shard, perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(shard, "012345"), []byte("valid"), perm.SharedFile)) - require.NoError(t, os.WriteFile(filepath.Join(shard, "garbage"), []byte("garbage"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(shard, "012345"), []byte("valid"), perm.PrivateWriteOnceFile)) + require.NoError(t, os.WriteFile(filepath.Join(shard, "garbage"), []byte("garbage"), perm.PrivateWriteOnceFile)) requireLooseObjectsInfo(t, repo, time.Now(), LooseObjectsInfo{ Count: 1, @@ -1213,7 +1213,7 @@ func BenchmarkCountLooseObjects(b *testing.B) { objectPath := filepath.Join(repoPath, "objects", "17", "12345") require.NoError(b, os.Mkdir(filepath.Dir(objectPath), perm.PrivateDir)) - require.NoError(b, os.WriteFile(objectPath, nil, perm.SharedFile)) + require.NoError(b, os.WriteFile(objectPath, nil, perm.PrivateWriteOnceFile)) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -1228,7 +1228,7 @@ func BenchmarkCountLooseObjects(b *testing.B) { for i := 0; i < 256; i++ { objectPath := filepath.Join(repoPath, "objects", fmt.Sprintf("%02x", i), "12345") require.NoError(b, os.Mkdir(filepath.Dir(objectPath), perm.PrivateDir)) - require.NoError(b, os.WriteFile(objectPath, nil, perm.SharedFile)) + require.NoError(b, os.WriteFile(objectPath, nil, perm.PrivateWriteOnceFile)) } b.ResetTimer() @@ -1257,7 +1257,7 @@ func BenchmarkCountLooseObjects(b *testing.B) { for j := 0; j < looseObjectCount; j++ { objectPath := filepath.Join(shardPath, fmt.Sprintf("%d", j)) - require.NoError(b, os.WriteFile(objectPath, nil, perm.SharedFile)) + require.NoError(b, os.WriteFile(objectPath, nil, perm.PrivateWriteOnceFile)) } } @@ -1277,7 +1277,7 @@ func BenchmarkCountLooseObjects(b *testing.B) { for j := 0; j < 1000; j++ { objectPath := filepath.Join(shardPath, fmt.Sprintf("%d", j)) - require.NoError(b, os.WriteFile(objectPath, nil, perm.SharedFile)) + require.NoError(b, os.WriteFile(objectPath, nil, perm.PrivateWriteOnceFile)) } } @@ -1311,7 +1311,7 @@ func TestPackfileInfoForRepository(t *testing.T) { seedRepository: func(t *testing.T, repoPath string) { packfileDir := filepath.Join(repoPath, "objects", "pack") require.NoError(t, os.MkdirAll(packfileDir, perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.pack"), []byte("foobar"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.pack"), []byte("foobar"), perm.PrivateWriteOnceFile)) }, expectedInfo: PackfilesInfo{ Count: 1, @@ -1323,8 +1323,8 @@ func TestPackfileInfoForRepository(t *testing.T) { seedRepository: func(t *testing.T, repoPath string) { packfileDir := filepath.Join(repoPath, "objects", "pack") require.NoError(t, os.MkdirAll(packfileDir, perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.pack"), []byte("foobar"), perm.SharedFile)) - require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.keep"), []byte("foobar"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.pack"), []byte("foobar"), perm.PrivateWriteOnceFile)) + require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.keep"), []byte("foobar"), perm.PrivateWriteOnceFile)) }, expectedInfo: PackfilesInfo{ Count: 1, @@ -1338,8 +1338,8 @@ func TestPackfileInfoForRepository(t *testing.T) { seedRepository: func(t *testing.T, repoPath string) { packfileDir := filepath.Join(repoPath, "objects", "pack") require.NoError(t, os.MkdirAll(packfileDir, perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.pack"), []byte("foobar"), perm.SharedFile)) - require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.mtimes"), []byte("foobar"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.pack"), []byte("foobar"), perm.PrivateWriteOnceFile)) + require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.mtimes"), []byte("foobar"), perm.PrivateWriteOnceFile)) }, expectedInfo: PackfilesInfo{ Count: 1, @@ -1353,8 +1353,8 @@ func TestPackfileInfoForRepository(t *testing.T) { seedRepository: func(t *testing.T, repoPath string) { packfileDir := filepath.Join(repoPath, "objects", "pack") require.NoError(t, os.MkdirAll(packfileDir, perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.pack"), []byte("foobar"), perm.SharedFile)) - require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-bar.pack"), []byte("123"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.pack"), []byte("foobar"), perm.PrivateWriteOnceFile)) + require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-bar.pack"), []byte("123"), perm.PrivateWriteOnceFile)) }, expectedInfo: PackfilesInfo{ Count: 2, @@ -1425,7 +1425,7 @@ func TestPackfileInfoForRepository(t *testing.T) { gittest.WriteCommit(t, cfg, repoPath, gittest.WithMessage("second"), gittest.WithBranch("second")) gittest.Exec(t, cfg, "-c", "pack.writeReverseIndex=true", "-C", repoPath, "repack", "-db", "--write-midx") - require.NoError(t, os.WriteFile(filepath.Join(repoPath, "objects", "pack", "garbage"), []byte("1"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, "objects", "pack", "garbage"), []byte("1"), perm.PrivateWriteOnceFile)) }, expectedInfo: PackfilesInfo{ Count: 2, @@ -1750,7 +1750,7 @@ func TestBitmapInfoForPath(t *testing.T) { desc: "header is too short", setup: func(t *testing.T) string { bitmapPath := filepath.Join(testhelper.TempDir(t), "bitmap") - require.NoError(t, os.WriteFile(bitmapPath, []byte{0, 0, 0}, perm.SharedFile)) + require.NoError(t, os.WriteFile(bitmapPath, []byte{0, 0, 0}, perm.PrivateWriteOnceFile)) return bitmapPath }, expectedErr: fmt.Errorf("reading bitmap header: %w", io.ErrUnexpectedEOF), @@ -1761,7 +1761,7 @@ func TestBitmapInfoForPath(t *testing.T) { bitmapPath := filepath.Join(testhelper.TempDir(t), "bitmap") require.NoError(t, os.WriteFile(bitmapPath, []byte{ 'B', 'I', 'T', 'O', 0, 0, 0, 0, - }, perm.SharedFile)) + }, perm.PrivateWriteOnceFile)) return bitmapPath }, expectedErr: fmt.Errorf("invalid bitmap signature: %q", "BITO"), @@ -1772,7 +1772,7 @@ func TestBitmapInfoForPath(t *testing.T) { bitmapPath := filepath.Join(testhelper.TempDir(t), "bitmap") require.NoError(t, os.WriteFile(bitmapPath, []byte{ 'B', 'I', 'T', 'M', 0, 2, 0, 0, - }, perm.SharedFile)) + }, perm.PrivateWriteOnceFile)) return bitmapPath }, expectedErr: fmt.Errorf("unsupported version: 2"), diff --git a/internal/gitaly/config/config_test.go b/internal/gitaly/config/config_test.go index 1f5f5caa28..0a94f54573 100644 --- a/internal/gitaly/config/config_test.go +++ b/internal/gitaly/config/config_test.go @@ -1753,7 +1753,7 @@ func TestSetupRuntimeDirectory(t *testing.T) { t.Run("validation", func(t *testing.T) { dirPath := testhelper.TempDir(t) filePath := filepath.Join(dirPath, "file") - require.NoError(t, os.WriteFile(filePath, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(filePath, nil, perm.PrivateWriteOnceFile)) for _, tc := range []struct { desc string @@ -2098,7 +2098,7 @@ func TestStorage_Validate(t *testing.T) { dirPath := testhelper.TempDir(t) filePath := filepath.Join(dirPath, "file") - require.NoError(t, os.WriteFile(filePath, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(filePath, nil, perm.PrivateWriteOnceFile)) for _, tc := range []struct { name string storage Storage @@ -2140,7 +2140,7 @@ func TestTLS_Validate(t *testing.T) { tmpDir := testhelper.TempDir(t) tmpFile := filepath.Join(tmpDir, "file") - require.NoError(t, os.WriteFile(tmpFile, []byte("I am not a certificate"), perm.SharedFile)) + require.NoError(t, os.WriteFile(tmpFile, []byte("I am not a certificate"), perm.PrivateWriteOnceFile)) for _, tc := range []struct { name string @@ -2241,7 +2241,7 @@ func TestGitlabShell_Validate(t *testing.T) { tmpDir := testhelper.TempDir(t) tmpFile := filepath.Join(tmpDir, "file") - require.NoError(t, os.WriteFile(tmpFile, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(tmpFile, nil, perm.PrivateWriteOnceFile)) for _, tc := range []struct { name string diff --git a/internal/gitaly/linguist/language_stats_test.go b/internal/gitaly/linguist/language_stats_test.go index 0f003054b9..0b5046513d 100644 --- a/internal/gitaly/linguist/language_stats_test.go +++ b/internal/gitaly/linguist/language_stats_test.go @@ -51,7 +51,7 @@ func TestInitLanguageStats(t *testing.T) { { desc: "corrupt cache", run: func(t *testing.T, repo *localrepo.Repo, repoPath string) { - require.NoError(t, os.WriteFile(filepath.Join(repoPath, languageStatsFilename), []byte("garbage"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, languageStatsFilename), []byte("garbage"), perm.PrivateWriteOnceFile)) stats, err := initLanguageStats(ctx, repo) require.Errorf(t, err, "new language stats zlib reader: invalid header") diff --git a/internal/gitaly/linguist/linguist_test.go b/internal/gitaly/linguist/linguist_test.go index 73a8dcece2..bf3750d718 100644 --- a/internal/gitaly/linguist/linguist_test.go +++ b/internal/gitaly/linguist/linguist_test.go @@ -457,7 +457,7 @@ func TestInstance_Stats(t *testing.T) { gittest.TreeEntry{Path: "application.rb", Mode: "100644", Content: strings.Repeat("a", 2943)}, )) - require.NoError(t, os.WriteFile(filepath.Join(repoPath, languageStatsFilename), []byte("garbage"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, languageStatsFilename), []byte("garbage"), perm.PrivateWriteOnceFile)) return repoProto, repoPath, commitID }, diff --git a/internal/gitaly/repoutil/custom_hooks_test.go b/internal/gitaly/repoutil/custom_hooks_test.go index 1e8aa4c8ef..3e187d86f5 100644 --- a/internal/gitaly/repoutil/custom_hooks_test.go +++ b/internal/gitaly/repoutil/custom_hooks_test.go @@ -348,10 +348,10 @@ func TestNewDirectoryVote(t *testing.T) { { desc: "generated hash matches with changed file mode", files: []testFile{ - {name: "pre-commit.sample", content: "foo", mode: perm.SharedFile}, + {name: "pre-commit.sample", content: "foo", mode: perm.PrivateWriteOnceFile}, {name: "pre-push.sample", content: "bar", mode: perm.PrivateExecutable}, }, - expectedHash: "c81ab4e8cca863a4e8d24c080d3daefcf5f0f8aa", + expectedHash: "ad20a4fea20e9049bb70e084e757fcc5d2cf2cc7", }, } { t.Run(tc.desc, func(t *testing.T) { diff --git a/internal/gitaly/repoutil/remove_test.go b/internal/gitaly/repoutil/remove_test.go index 087cdc8a3c..13ab9ecce5 100644 --- a/internal/gitaly/repoutil/remove_test.go +++ b/internal/gitaly/repoutil/remove_test.go @@ -50,7 +50,7 @@ func TestRemove(t *testing.T) { // Simulate a concurrent RPC holding the repository lock. lockPath := repoPath + ".lock" - require.NoError(t, os.WriteFile(lockPath, []byte{}, perm.SharedFile)) + require.NoError(t, os.WriteFile(lockPath, []byte{}, perm.PrivateWriteOnceFile)) tb.Cleanup(func() { require.NoError(t, os.RemoveAll(lockPath)) }) diff --git a/internal/gitaly/service/objectpool/alternates_test.go b/internal/gitaly/service/objectpool/alternates_test.go index 2dfa3dbccc..fc482fcf8a 100644 --- a/internal/gitaly/service/objectpool/alternates_test.go +++ b/internal/gitaly/service/objectpool/alternates_test.go @@ -93,7 +93,7 @@ func TestDisconnectGitAlternatesUnexpectedAlternates(t *testing.T) { altPath, err := repo.InfoAlternatesPath(ctx) require.NoError(t, err) - require.NoError(t, os.WriteFile(altPath, []byte(tc.altContent), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte(tc.altContent), perm.PrivateWriteOnceFile)) _, err = client.DisconnectGitAlternates(ctx, &gitalypb.DisconnectGitAlternatesRequest{Repository: repoProto}) require.Error(t, err) diff --git a/internal/gitaly/service/objectpool/create_test.go b/internal/gitaly/service/objectpool/create_test.go index 49452decdd..d0f497ee6b 100644 --- a/internal/gitaly/service/objectpool/create_test.go +++ b/internal/gitaly/service/objectpool/create_test.go @@ -144,7 +144,7 @@ func TestCreate_unsuccessful(t *testing.T) { lockedRelativePath := gittest.NewObjectPoolName(t) lockedFullPath := filepath.Join(cfg.Storages[0].Path, lockedRelativePath+".lock") require.NoError(t, os.MkdirAll(filepath.Dir(lockedFullPath), perm.PrivateDir)) - require.NoError(t, os.WriteFile(lockedFullPath, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(lockedFullPath, nil, perm.PrivateWriteOnceFile)) // Create a preexisting object pool. preexistingPool := &gitalypb.ObjectPool{ diff --git a/internal/gitaly/service/objectpool/get_test.go b/internal/gitaly/service/objectpool/get_test.go index 6ef44351ba..f05276c120 100644 --- a/internal/gitaly/service/objectpool/get_test.go +++ b/internal/gitaly/service/objectpool/get_test.go @@ -55,7 +55,7 @@ func TestGetObjectPoolBadFile(t *testing.T) { alternatesFilePath := filepath.Join(repoPath, "objects", "info", "alternates") require.NoError(t, os.MkdirAll(filepath.Dir(alternatesFilePath), perm.PrivateDir)) - require.NoError(t, os.WriteFile(alternatesFilePath, []byte("not-a-directory"), perm.SharedFile)) + require.NoError(t, os.WriteFile(alternatesFilePath, []byte("not-a-directory"), perm.PrivateWriteOnceFile)) resp, err := client.GetObjectPool(ctx, &gitalypb.GetObjectPoolRequest{ Repository: repo, diff --git a/internal/gitaly/service/objectpool/link_test.go b/internal/gitaly/service/objectpool/link_test.go index 031c73957e..3294b5216a 100644 --- a/internal/gitaly/service/objectpool/link_test.go +++ b/internal/gitaly/service/objectpool/link_test.go @@ -186,7 +186,7 @@ func TestLink_noClobber(t *testing.T) { require.NoFileExists(t, alternatesFile) contentBefore := "mock/objects\n" - require.NoError(t, os.WriteFile(alternatesFile, []byte(contentBefore), perm.SharedFile)) + require.NoError(t, os.WriteFile(alternatesFile, []byte(contentBefore), perm.PrivateWriteOnceFile)) request := &gitalypb.LinkRepositoryToObjectPoolRequest{ Repository: repoProto, diff --git a/internal/gitaly/service/repository/create_fork_test.go b/internal/gitaly/service/repository/create_fork_test.go index f1ff59f8dc..492ed63209 100644 --- a/internal/gitaly/service/repository/create_fork_test.go +++ b/internal/gitaly/service/repository/create_fork_test.go @@ -310,7 +310,7 @@ func TestCreateFork_targetExists(t *testing.T) { require.NoError(t, os.WriteFile( filepath.Join(targetPath, "config"), nil, - perm.SharedFile, + perm.PrivateWriteOnceFile, )) }, expectedErr: func() error { @@ -325,7 +325,7 @@ func TestCreateFork_targetExists(t *testing.T) { desc: "target file", seed: func(t *testing.T, targetPath string) { require.NoError(t, os.MkdirAll(filepath.Dir(targetPath), perm.PrivateDir)) - require.NoError(t, os.WriteFile(targetPath, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(targetPath, nil, perm.PrivateWriteOnceFile)) }, expectedErr: func() error { if testhelper.IsWALEnabled() { diff --git a/internal/gitaly/service/repository/create_repository_from_url_test.go b/internal/gitaly/service/repository/create_repository_from_url_test.go index 9fdf60af3d..3720ac23ce 100644 --- a/internal/gitaly/service/repository/create_repository_from_url_test.go +++ b/internal/gitaly/service/repository/create_repository_from_url_test.go @@ -142,7 +142,7 @@ testing of this scenario should be left to the relevant package. require.NoError(t, os.MkdirAll(importedRepoPath, perm.PrivateDir)) } else { require.NoError(t, os.MkdirAll(filepath.Dir(importedRepoPath), perm.PrivateDir)) - require.NoError(t, os.WriteFile(importedRepoPath, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(importedRepoPath, nil, perm.PrivateWriteOnceFile)) } t.Cleanup(func() { require.NoError(t, os.RemoveAll(importedRepoPath)) }) diff --git a/internal/gitaly/service/repository/fetch_remote_test.go b/internal/gitaly/service/repository/fetch_remote_test.go index f4ed58261a..be7964795a 100644 --- a/internal/gitaly/service/repository/fetch_remote_test.go +++ b/internal/gitaly/service/repository/fetch_remote_test.go @@ -1127,7 +1127,7 @@ func TestFetchRemote_pooledRepository(t *testing.T) { // Create the pooled repository and link it to its pool. This is the // repository we're fetching into. pooledRepoProto, pooledRepoPath := gittest.CreateRepository(t, ctx, cfg) - require.NoError(t, os.WriteFile(filepath.Join(pooledRepoPath, "objects", "info", "alternates"), []byte(filepath.Join(poolRepoPath, "objects")), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(pooledRepoPath, "objects", "info", "alternates"), []byte(filepath.Join(poolRepoPath, "objects")), perm.PrivateWriteOnceFile)) // And then finally create a third repository that emulates the remote side // we're fetching from. We need to create at least one reference so that Git diff --git a/internal/gitaly/service/repository/fsck_test.go b/internal/gitaly/service/repository/fsck_test.go index 283be0d5e2..365ef9a3ec 100644 --- a/internal/gitaly/service/repository/fsck_test.go +++ b/internal/gitaly/service/repository/fsck_test.go @@ -77,7 +77,7 @@ func TestFsck(t *testing.T) { // This makes the repo severely broken so that `git` does not // identify it as a proper repository anymore. require.NoError(t, os.RemoveAll(filepath.Join(repoPath, "objects"))) - require.NoError(t, os.WriteFile(filepath.Join(repoPath, "objects"), nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, "objects"), nil, perm.PrivateWriteOnceFile)) setupData := setupData{ repo: repo, diff --git a/internal/gitaly/service/repository/info_attributes_test.go b/internal/gitaly/service/repository/info_attributes_test.go index 75a4e6a4a5..3af71bd576 100644 --- a/internal/gitaly/service/repository/info_attributes_test.go +++ b/internal/gitaly/service/repository/info_attributes_test.go @@ -30,7 +30,7 @@ func TestGetInfoAttributesExisting(t *testing.T) { buffSize := streamio.WriteBufferSize + 1 data := bytes.Repeat([]byte("*.pbxproj binary\n"), buffSize) attrsPath := filepath.Join(infoPath, "attributes") - err := os.WriteFile(attrsPath, data, perm.SharedFile) + err := os.WriteFile(attrsPath, data, perm.PrivateWriteOnceFile) require.NoError(t, err) gitattributesContent := "*.go diff=go text\n*.md text\n*.jpg -text" diff --git a/internal/gitaly/service/repository/object_format_test.go b/internal/gitaly/service/repository/object_format_test.go index d11a7e9cf5..1efc6acd7c 100644 --- a/internal/gitaly/service/repository/object_format_test.go +++ b/internal/gitaly/service/repository/object_format_test.go @@ -142,7 +142,7 @@ func TestObjectFormat(t *testing.T) { "[extensions]", "objectFormat = blake2b", }, "\n"), - ), perm.SharedFile)) + ), perm.PrivateWriteOnceFile)) return setupData{ request: &gitalypb.ObjectFormatRequest{ diff --git a/internal/gitaly/service/repository/remove_test.go b/internal/gitaly/service/repository/remove_test.go index 05d3f876e5..4f12587271 100644 --- a/internal/gitaly/service/repository/remove_test.go +++ b/internal/gitaly/service/repository/remove_test.go @@ -83,7 +83,7 @@ logic will be removed once transaction managements is always enabled.`) // Simulate a concurrent RPC holding the repository lock. lockPath := repoPath + ".lock" - require.NoError(t, os.WriteFile(lockPath, []byte{}, perm.SharedFile)) + require.NoError(t, os.WriteFile(lockPath, []byte{}, perm.PrivateWriteOnceFile)) defer func() { require.NoError(t, os.RemoveAll(lockPath)) }() _, err := client.RemoveRepository(ctx, &gitalypb.RemoveRepositoryRequest{Repository: repo}) diff --git a/internal/gitaly/service/repository/replicate_test.go b/internal/gitaly/service/repository/replicate_test.go index f67ab3d5e7..eaf6b3c691 100644 --- a/internal/gitaly/service/repository/replicate_test.go +++ b/internal/gitaly/service/repository/replicate_test.go @@ -105,7 +105,7 @@ func TestReplicateRepository(t *testing.T) { attrFilePath := filepath.Join(sourcePath, "info", "attributes") require.NoError(t, os.MkdirAll(filepath.Dir(attrFilePath), perm.PrivateDir)) attributesData := []byte("*.pbxproj binary\n") - require.NoError(t, os.WriteFile(attrFilePath, attributesData, perm.SharedFile)) + require.NoError(t, os.WriteFile(attrFilePath, attributesData, perm.PrivateWriteOnceFile)) return setupData{ source: source, diff --git a/internal/gitaly/service/repository/snapshot_test.go b/internal/gitaly/service/repository/snapshot_test.go index c2761f1039..0edbe895b4 100644 --- a/internal/gitaly/service/repository/snapshot_test.go +++ b/internal/gitaly/service/repository/snapshot_test.go @@ -139,7 +139,7 @@ func TestGetSnapshot(t *testing.T) { ) // The shallow file, used if the repository is a shallow clone, is also included in snapshots. - require.NoError(t, os.WriteFile(filepath.Join(repoPath, "shallow"), nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, "shallow"), nil, perm.PrivateWriteOnceFile)) // Custom Git hooks are not included in snapshots. require.NoError(t, os.MkdirAll(filepath.Join(repoPath, "hooks"), perm.PrivateDir)) @@ -148,7 +148,7 @@ func TestGetSnapshot(t *testing.T) { require.NoError(t, os.WriteFile( filepath.Join(repoPath, "objects/this-should-not-be-included"), nil, - perm.SharedFile, + perm.PrivateWriteOnceFile, )) return setupData{ @@ -227,7 +227,7 @@ func TestGetSnapshot(t *testing.T) { require.NoError(t, os.WriteFile( altFile, []byte(fmt.Sprintf("%s\n", altObjectDir)), - perm.SharedFile, + perm.PrivateWriteOnceFile, )) gittest.RequireObjectExists(t, cfg, repoPath, commitID) diff --git a/internal/gitaly/service/smarthttp/inforefs_test.go b/internal/gitaly/service/smarthttp/inforefs_test.go index f7cc7e4d7c..13c395fa6b 100644 --- a/internal/gitaly/service/smarthttp/inforefs_test.go +++ b/internal/gitaly/service/smarthttp/inforefs_test.go @@ -676,7 +676,7 @@ func withInfoRefCache(cache infoRefCache) ServerOpt { func replaceCachedResponse(tb testing.TB, ctx context.Context, cache *cache.DiskCache, req *gitalypb.InfoRefsRequest, newContents string) { path := pathToCachedResponse(tb, ctx, cache, req) - require.NoError(tb, os.WriteFile(path, []byte(newContents), perm.SharedFile)) + require.NoError(tb, os.WriteFile(path, []byte(newContents), perm.PrivateWriteOnceFile)) } func setInfoRefsUploadPackMethod(ctx context.Context) context.Context { diff --git a/internal/gitaly/service/ssh/receive_pack_test.go b/internal/gitaly/service/ssh/receive_pack_test.go index 604613c772..d3de1390ee 100644 --- a/internal/gitaly/service/ssh/receive_pack_test.go +++ b/internal/gitaly/service/ssh/receive_pack_test.go @@ -294,7 +294,7 @@ func TestReceivePack_invalidGitconfig(t *testing.T) { // Remove the config file first as files are read-only with transactions. configPath := filepath.Join(remoteRepoPath, "config") require.NoError(t, os.Remove(configPath)) - require.NoError(t, os.WriteFile(configPath, []byte("x x x foobar"), perm.SharedFile)) + require.NoError(t, os.WriteFile(configPath, []byte("x x x foobar"), perm.PrivateWriteOnceFile)) remoteRepo.GlProjectPath = "something" lHead, rHead, err := setupRepoAndPush(t, ctx, cfg, &gitalypb.SSHReceivePackRequest{ diff --git a/internal/gitaly/service/ssh/upload_pack_test.go b/internal/gitaly/service/ssh/upload_pack_test.go index f5d6069b56..b8fd592838 100644 --- a/internal/gitaly/service/ssh/upload_pack_test.go +++ b/internal/gitaly/service/ssh/upload_pack_test.go @@ -835,7 +835,7 @@ func testUploadPackGitFailure(t *testing.T, ctx context.Context) { // Remove the config file first as files are read-only with transactions. configPath := filepath.Join(repoPath, "config") require.NoError(t, os.Remove(configPath)) - require.NoError(t, os.WriteFile(configPath, []byte("Not a valid gitconfig"), perm.SharedFile)) + require.NoError(t, os.WriteFile(configPath, []byte("Not a valid gitconfig"), perm.PrivateWriteOnceFile)) stream, err := client.SSHUploadPack(ctx) require.NoError(t, err) diff --git a/internal/gitaly/storage/storagemgr/apply_operations_test.go b/internal/gitaly/storage/storagemgr/apply_operations_test.go index 159f2bcfba..20661864dc 100644 --- a/internal/gitaly/storage/storagemgr/apply_operations_test.go +++ b/internal/gitaly/storage/storagemgr/apply_operations_test.go @@ -35,7 +35,7 @@ func TestApplyOperations(t *testing.T) { "parent": {Mode: fs.ModeDir | perm.PrivateDir}, "parent/relative-path": {Mode: fs.ModeDir | perm.PrivateDir}, "parent/relative-path/private-file": {Mode: perm.PrivateWriteOnceFile, Data: []byte("private")}, - "parent/relative-path/shared-file": {Mode: perm.SharedFile, Data: []byte("shared")}, + "parent/relative-path/shared-file": {Mode: perm.PrivateWriteOnceFile, Data: []byte("shared")}, "parent/relative-path/empty-dir": {Mode: fs.ModeDir | perm.PrivateDir}, "parent/relative-path/removed-dir": {Mode: fs.ModeDir | perm.PrivateDir}, "parent/relative-path/dir-with-removed-file": {Mode: fs.ModeDir | perm.PrivateDir}, diff --git a/internal/gitaly/storage/wal/entry_test.go b/internal/gitaly/storage/wal/entry_test.go index 9bb5b0e97e..5e0e7b31d7 100644 --- a/internal/gitaly/storage/wal/entry_test.go +++ b/internal/gitaly/storage/wal/entry_test.go @@ -24,7 +24,7 @@ func setupTestDirectory(t *testing.T, path string) { require.NoError(t, os.WriteFile(filepath.Join(path, "file-1"), []byte("file-1"), perm.PrivateExecutable)) privateSubDir := filepath.Join(filepath.Join(path, "subdir-private")) require.NoError(t, os.Mkdir(privateSubDir, perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(privateSubDir, "file-2"), []byte("file-2"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(privateSubDir, "file-2"), []byte("file-2"), perm.PrivateWriteOnceFile)) sharedSubDir := filepath.Join(path, "subdir-shared") require.NoError(t, os.Mkdir(sharedSubDir, perm.PrivateDir)) require.NoError(t, os.WriteFile(filepath.Join(sharedSubDir, "file-3"), []byte("file-3"), perm.PrivateWriteOnceFile)) @@ -268,12 +268,12 @@ func TestRecordAlternateUnlink(t *testing.T) { "objects/info": {Mode: fs.ModeDir | perm.PrivateDir}, "objects/3f": {Mode: fs.ModeDir | perm.PrivateDir}, "objects/3f/1": {Mode: perm.PrivateWriteOnceFile}, - "objects/3f/2": {Mode: perm.SharedFile}, + "objects/3f/2": {Mode: perm.PrivateWriteOnceFile}, "objects/4f": {Mode: fs.ModeDir | perm.PrivateDir}, - "objects/4f/3": {Mode: perm.SharedFile}, + "objects/4f/3": {Mode: perm.PrivateWriteOnceFile}, "objects/pack": {Mode: fs.ModeDir | perm.PrivateDir}, "objects/pack/pack.pack": {Mode: perm.PrivateWriteOnceFile}, - "objects/pack/pack.idx": {Mode: perm.SharedFile}, + "objects/pack/pack.idx": {Mode: perm.PrivateWriteOnceFile}, }) } @@ -311,9 +311,9 @@ func TestRecordAlternateUnlink(t *testing.T) { "objects/3f": {Mode: fs.ModeDir | perm.PrivateDir}, "objects/3f/1": {Mode: perm.PrivateWriteOnceFile}, "objects/4f": {Mode: fs.ModeDir | perm.PrivateDir}, - "objects/4f/3": {Mode: perm.SharedFile}, + "objects/4f/3": {Mode: perm.PrivateWriteOnceFile}, "objects/pack": {Mode: fs.ModeDir | perm.PrivateDir}, - "objects/pack/pack.idx": {Mode: perm.SharedFile}, + "objects/pack/pack.idx": {Mode: perm.PrivateWriteOnceFile}, }) }, expectedOperations: func() operations { diff --git a/internal/gitlab/test_server.go b/internal/gitlab/test_server.go index b0a2ec5ee4..f9fa31efae 100644 --- a/internal/gitlab/test_server.go +++ b/internal/gitlab/test_server.go @@ -28,7 +28,7 @@ func WriteShellSecretFile(tb testing.TB, dir, secretToken string) string { require.NoError(tb, os.MkdirAll(dir, perm.PrivateDir)) filePath := filepath.Join(dir, ".gitlab_shell_secret") - require.NoError(tb, os.WriteFile(filePath, []byte(secretToken), perm.SharedFile)) + require.NoError(tb, os.WriteFile(filePath, []byte(secretToken), perm.PrivateWriteOnceFile)) return filePath } diff --git a/internal/safe/locking_directory_test.go b/internal/safe/locking_directory_test.go index 717c1e498b..054684c601 100644 --- a/internal/safe/locking_directory_test.go +++ b/internal/safe/locking_directory_test.go @@ -29,7 +29,7 @@ func TestLockingDirectory(t *testing.T) { require.NoError(t, os.WriteFile( filepath.Join(path, "somefile"), []byte("data"), - perm.SharedFile), + perm.PrivateWriteOnceFile), ) assert.ErrorIs(t, secondLockingDir.Lock(), safe.ErrFileAlreadyLocked) require.NoError(t, lockingDir.Unlock()) diff --git a/internal/safe/locking_file_writer_test.go b/internal/safe/locking_file_writer_test.go index 2302aca4e5..627742c989 100644 --- a/internal/safe/locking_file_writer_test.go +++ b/internal/safe/locking_file_writer_test.go @@ -148,7 +148,7 @@ func TestLockingFileWriter_seedingWithExistingTarget(t *testing.T) { t.Parallel() target := filepath.Join(testhelper.TempDir(t), "file") - require.NoError(t, os.WriteFile(target, []byte("seed"), perm.SharedFile)) + require.NoError(t, os.WriteFile(target, []byte("seed"), perm.PrivateWriteOnceFile)) writer, err := safe.NewLockingFileWriter(target, safe.LockingFileWriterConfig{ SeedContents: true, @@ -166,7 +166,7 @@ func TestLockingFileWriter_modifiesExistingFiles(t *testing.T) { t.Parallel() target := filepath.Join(testhelper.TempDir(t), "file") - require.NoError(t, os.WriteFile(target, []byte("preexisting"), perm.SharedFile)) + require.NoError(t, os.WriteFile(target, []byte("preexisting"), perm.PrivateWriteOnceFile)) writer, err := safe.NewLockingFileWriter(target) require.NoError(t, err) @@ -182,7 +182,7 @@ func TestLockingFileWriter_modifiesExistingFilesWithMode(t *testing.T) { t.Parallel() target := filepath.Join(testhelper.TempDir(t), "file") - require.NoError(t, os.WriteFile(target, []byte("preexisting"), perm.SharedFile)) + require.NoError(t, os.WriteFile(target, []byte("preexisting"), perm.PrivateWriteOnceFile)) writer, err := safe.NewLockingFileWriter(target, safe.LockingFileWriterConfig{ FileWriterConfig: safe.FileWriterConfig{FileMode: 0o060}, @@ -205,7 +205,7 @@ func TestLockingFileWriter_concurrentCreation(t *testing.T) { require.NoError(t, err) // Create file concurrently. - require.NoError(t, os.WriteFile(target, []byte("concurrent"), perm.SharedFile)) + require.NoError(t, os.WriteFile(target, []byte("concurrent"), perm.PrivateWriteOnceFile)) require.Equal(t, fmt.Errorf("file concurrently created"), writer.Lock()) @@ -217,7 +217,7 @@ func TestLockingFileWriter_concurrentDeletion(t *testing.T) { target := filepath.Join(testhelper.TempDir(t), "file") - require.NoError(t, os.WriteFile(target, []byte("base"), perm.SharedFile)) + require.NoError(t, os.WriteFile(target, []byte("base"), perm.PrivateWriteOnceFile)) writer, err := safe.NewLockingFileWriter(target) require.NoError(t, err) @@ -234,12 +234,13 @@ func TestLockingFileWriter_concurrentModification(t *testing.T) { target := filepath.Join(testhelper.TempDir(t), "file") - require.NoError(t, os.WriteFile(target, []byte("base"), perm.SharedFile)) + require.NoError(t, os.WriteFile(target, []byte("base"), perm.PrivateWriteOnceFile)) writer, err := safe.NewLockingFileWriter(target) require.NoError(t, err) // Concurrently modify the file. - require.NoError(t, os.WriteFile(target, []byte("concurrent"), perm.SharedFile)) + require.NoError(t, os.Remove(target)) + require.NoError(t, os.WriteFile(target, []byte("concurrent"), perm.PrivateWriteOnceFile)) require.Equal(t, fmt.Errorf("file concurrently modified"), writer.Lock()) @@ -272,13 +273,13 @@ func TestLockingFileWriter_locked(t *testing.T) { t.Parallel() target := filepath.Join(testhelper.TempDir(t), "file") - require.NoError(t, os.WriteFile(target, []byte("base"), perm.SharedFile)) + require.NoError(t, os.WriteFile(target, []byte("base"), perm.PrivateWriteOnceFile)) writer, err := safe.NewLockingFileWriter(target) require.NoError(t, err) // Concurrently lock the file. - require.NoError(t, os.WriteFile(target+".lock", nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(target+".lock", nil, perm.PrivateWriteOnceFile)) require.Equal(t, safe.ErrFileAlreadyLocked, writer.Lock()) @@ -291,7 +292,7 @@ func TestLockingFileWriter_externalProcess(t *testing.T) { cfg := testcfg.Build(t) target := filepath.Join(testhelper.TempDir(t), "file") - require.NoError(t, os.WriteFile(target, []byte("base"), perm.SharedFile)) + require.NoError(t, os.WriteFile(target, []byte("base"), perm.PrivateWriteOnceFile)) writer, err := safe.NewLockingFileWriter(target) require.NoError(t, err) diff --git a/internal/streamcache/cache_test.go b/internal/streamcache/cache_test.go index 421f6cc38f..6fabb71d4f 100644 --- a/internal/streamcache/cache_test.go +++ b/internal/streamcache/cache_test.go @@ -358,7 +358,7 @@ func TestCache_unWriteableFile(t *testing.T) { c := newCache(t, tmp) innerCache(c).createFile = func() (namedWriteCloser, error) { - return os.OpenFile(filepath.Join(tmp, "unwriteable"), os.O_RDONLY|os.O_CREATE|os.O_EXCL, perm.SharedFile) + return os.OpenFile(filepath.Join(tmp, "unwriteable"), os.O_RDONLY|os.O_CREATE|os.O_EXCL, perm.PrivateWriteOnceFile) } _, _, err := c.Fetch(ctx, "key", io.Discard, func(w io.Writer) error { @@ -379,7 +379,7 @@ func TestCache_unCloseableFile(t *testing.T) { c := newCache(t, tmp) innerCache(c).createFile = func() (namedWriteCloser, error) { - f, err := os.OpenFile(filepath.Join(tmp, "uncloseable"), os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm.SharedFile) + f, err := os.OpenFile(filepath.Join(tmp, "uncloseable"), os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm.PrivateWriteOnceFile) if err != nil { return nil, err } @@ -401,7 +401,7 @@ func TestCache_cannotOpenFileForReading(t *testing.T) { c := newCache(t, tmp) innerCache(c).createFile = func() (namedWriteCloser, error) { - f, err := os.OpenFile(filepath.Join(tmp, "unopenable"), os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm.SharedFile) + f, err := os.OpenFile(filepath.Join(tmp, "unopenable"), os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm.PrivateWriteOnceFile) if err != nil { return nil, err } diff --git a/internal/streamcache/filestore_test.go b/internal/streamcache/filestore_test.go index f722cc7edb..16404d4c7e 100644 --- a/internal/streamcache/filestore_test.go +++ b/internal/streamcache/filestore_test.go @@ -109,7 +109,7 @@ func TestFilestoreCleanwalk(t *testing.T) { file := filepath.Join(dir2, "file") require.NoError(t, os.Mkdir(dir1, perm.PrivateDir)) require.NoError(t, os.Mkdir(dir2, perm.PrivateDir)) - require.NoError(t, os.WriteFile(file, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(file, nil, perm.PrivateWriteOnceFile)) require.NoError(t, os.Chmod(dir2, 0), "create dir with pathological permissions") require.NoError(t, fs.cleanWalk(time.Now().Add(time.Hour))) diff --git a/internal/tempdir/clean_test.go b/internal/tempdir/clean_test.go index 75ce5a8145..587efc76c3 100644 --- a/internal/tempdir/clean_test.go +++ b/internal/tempdir/clean_test.go @@ -159,7 +159,7 @@ func makeFile(t *testing.T, locator storage.Locator, storage config.Storage, fil require.NoError(t, err) fullPath := filepath.Join(root, filePath) - require.NoError(t, os.WriteFile(fullPath, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(fullPath, nil, perm.PrivateWriteOnceFile)) require.NoError(t, os.Chtimes(fullPath, mtime, mtime)) } diff --git a/internal/tempdir/tempdir_test.go b/internal/tempdir/tempdir_test.go index 391afb1128..1e09c1dd47 100644 --- a/internal/tempdir/tempdir_test.go +++ b/internal/tempdir/tempdir_test.go @@ -29,7 +29,7 @@ func TestNewRepositorySuccess(t *testing.T) { require.NoError(t, err) require.Equal(t, tempDir.Path(), calculatedPath) - require.NoError(t, os.WriteFile(filepath.Join(tempDir.Path(), "test"), []byte("hello"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(tempDir.Path(), "test"), []byte("hello"), perm.PrivateWriteOnceFile)) require.DirExists(t, tempDir.Path()) diff --git a/internal/testhelper/testcfg/gitaly.go b/internal/testhelper/testcfg/gitaly.go index 135d9f3256..6f4bfc5984 100644 --- a/internal/testhelper/testcfg/gitaly.go +++ b/internal/testhelper/testcfg/gitaly.go @@ -171,7 +171,7 @@ func WriteTemporaryGitalyConfigFile(tb testing.TB, cfg config.Cfg) string { contents, err := toml.Marshal(cfg) require.NoError(tb, err) - require.NoError(tb, os.WriteFile(path, contents, perm.SharedFile)) + require.NoError(tb, os.WriteFile(path, contents, perm.PrivateWriteOnceFile)) return path } -- GitLab From 8cfa9c61f3a409dfde36c8e625a0de33643986b9 Mon Sep 17 00:00:00 2001 From: Divya Rani Date: Wed, 17 Jul 2024 08:33:10 +0530 Subject: [PATCH 17/17] Add gitaly git subcommand --- cmd/gitaly/main_test.go | 2 +- internal/cli/gitaly/app.go | 1 + internal/cli/gitaly/subcmd_git.go | 77 ++++++++++++++++++ internal/cli/gitaly/subcmd_git_test.go | 105 +++++++++++++++++++++++++ 4 files changed, 184 insertions(+), 1 deletion(-) create mode 100644 internal/cli/gitaly/subcmd_git.go create mode 100644 internal/cli/gitaly/subcmd_git_test.go diff --git a/cmd/gitaly/main_test.go b/cmd/gitaly/main_test.go index 0b2cac70e1..0746fc8a08 100644 --- a/cmd/gitaly/main_test.go +++ b/cmd/gitaly/main_test.go @@ -31,7 +31,7 @@ func TestGitalyCLI(t *testing.T) { { desc: "without arguments", exitCode: 2, - stdout: "NAME:\n gitaly - a Git RPC service\n\nUSAGE:\n gitaly command [command options] \n\nDESCRIPTION:\n Gitaly is a Git RPC service for handling Git calls.\n\nCOMMANDS:\n serve launch the server daemon\n check verify internal API is accessible\n configuration run configuration-related commands\n hooks manage Git hooks\n bundle-uri Generate bundle URI bundle\n\nOPTIONS:\n --help, -h show help\n --version, -v print the version\n", + stdout: "NAME:\n gitaly - a Git RPC service\n\nUSAGE:\n gitaly command [command options] \n\nDESCRIPTION:\n Gitaly is a Git RPC service for handling Git calls.\n\nCOMMANDS:\n serve launch the server daemon\n check verify internal API is accessible\n configuration run configuration-related commands\n hooks manage Git hooks\n bundle-uri Generate bundle URI bundle\n git execute Git commands using Gitaly's embedded Git\n\nOPTIONS:\n --help, -h show help\n --version, -v print the version\n", }, { desc: "with non-existent config", diff --git a/internal/cli/gitaly/app.go b/internal/cli/gitaly/app.go index a6976d96ce..02212f94b2 100644 --- a/internal/cli/gitaly/app.go +++ b/internal/cli/gitaly/app.go @@ -33,6 +33,7 @@ func NewApp() *cli.App { newConfigurationCommand(), newHooksCommand(), newBundleURICommand(), + newGitCommand(), }, } } diff --git a/internal/cli/gitaly/subcmd_git.go b/internal/cli/gitaly/subcmd_git.go new file mode 100644 index 0000000000..7c252c3104 --- /dev/null +++ b/internal/cli/gitaly/subcmd_git.go @@ -0,0 +1,77 @@ +package gitaly + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/urfave/cli/v2" + "gitlab.com/gitlab-org/gitaly/v16/internal/git" + "gitlab.com/gitlab-org/gitaly/v16/internal/log" +) + +func newGitCommand() *cli.Command { + return &cli.Command{ + Name: "git", + Usage: "execute Git commands using Gitaly's embedded Git", + UsageText: `gitaly git [git-command] [args...] + +Example: gitaly git status`, + Description: "Execute Git commands using the same Git execution environment as Gitaly.", + Action: gitAction, + HideHelpCommand: true, + ArgsUsage: "[git-command] [args...]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: flagConfig, + Usage: "path to Gitaly configuration", + Aliases: []string{"c"}, + Required: true, + }, + }, + } +} + +func gitAction(ctx *cli.Context) error { + logger := log.ConfigureCommand() + + if ctx.NArg() < 1 { + if err := cli.ShowSubcommandHelp(ctx); err != nil { + return err + } + return cli.Exit("error: Git command required", 1) + } + + cfg, err := loadConfig(ctx.String(flagConfig)) + if err != nil { + return fmt.Errorf("load config: %w", err) + } + + gitCmdFactory, cleanup, err := git.NewExecCommandFactory(cfg, logger) + if err != nil { + return fmt.Errorf("creating Git command factory: %w", err) + } + defer cleanup() + + gitBinaryPath := gitCmdFactory.GetExecutionEnvironment(ctx.Context).BinaryPath + + cmd := exec.Command(gitBinaryPath, ctx.Args().Slice()...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Env = append(os.Environ(), + fmt.Sprintf("GIT_EXEC_PATH=%s", filepath.Dir(gitBinaryPath)), + fmt.Sprintf("PATH=%s:%s", filepath.Dir(gitBinaryPath), os.Getenv("PATH")), + ) + + err = cmd.Run() + if err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + return cli.Exit("", exitError.ExitCode()) + } + return fmt.Errorf("executing git command: %w", err) + } + + return nil +} diff --git a/internal/cli/gitaly/subcmd_git_test.go b/internal/cli/gitaly/subcmd_git_test.go new file mode 100644 index 0000000000..07be305f3a --- /dev/null +++ b/internal/cli/gitaly/subcmd_git_test.go @@ -0,0 +1,105 @@ +package gitaly + +import ( + "bytes" + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "gitlab.com/gitlab-org/gitaly/v16/internal/git/gittest" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/config" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/service/setup" + "gitlab.com/gitlab-org/gitaly/v16/internal/helper/perm" + "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper" + "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper/testcfg" + "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper/testserver" +) + +func TestGitalyGitCommand(t *testing.T) { + ctx := testhelper.Context(t) + cfg := testcfg.Build(t) + dir := testhelper.TempDir(t) + seedDirWithExecutables := func(t *testing.T, executableNames ...string) { + for _, executableName := range executableNames { + require.NoError(t, os.WriteFile(filepath.Join(dir, executableName), nil, perm.PrivateExecutable)) + } + } + + // Git environments supported by the Git command factory. + seedDirWithExecutables(t, "gitaly-git-v2.45", "gitaly-git-remote-http-v2.45", "gitaly-git-http-backend-v2.45", + "gitaly-git-v2.44", "gitaly-git-remote-http-v2.44", "gitaly-git-http-backend-v2.44") + t.Setenv("GITALY_TESTING_BUNDLED_GIT_PATH", dir) + + // Ensure we're using bundled binaries + cfg.Git.UseBundledBinaries = true + cfg.BinDir = dir + testcfg.BuildGitaly(t, cfg) + cfg.SocketPath = testserver.RunGitalyServer(t, cfg, setup.RegisterAll) + + repo, repoPath := gittest.CreateRepository(t, ctx, cfg) + gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch("main")) + + cfg.SocketPath = testhelper.GetTemporaryGitalySocketFileName(t) + + cfg.Storages = []config.Storage{ + { + Name: repo.StorageName, + Path: filepath.Dir(repoPath), + }, + } + + configPath := testcfg.WriteTemporaryGitalyConfigFile(t, cfg) + + tests := []struct { + name string + args []string + expectedOutput string + expectedError string + }{ + { + name: "git status", + args: []string{"status"}, + expectedOutput: "On branch main", + }, + { + name: "invalid git command", + args: []string{"invalid-command"}, + expectedError: "exit status 1", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + cmd := exec.Command(cfg.BinaryPath("gitaly"), + "git", + "-c", configPath, + ) + cmd.Args = append(cmd.Args, tt.args...) + cmd.Dir = repoPath // Set the working directory to the repository path + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + t.Logf("Running command: %v in directory: %s", cmd.Args, cmd.Dir) + + err := cmd.Run() + + t.Logf("Stdout: %s", stdout.String()) + t.Logf("Stderr: %s", stderr.String()) + + if tt.expectedError != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tt.expectedError) + } else { + require.NoError(t, err) + require.Contains(t, stdout.String(), tt.expectedOutput) + } + }) + } +} -- GitLab