diff --git a/cmd/gitaly-wrapper/main_test.go b/cmd/gitaly-wrapper/main_test.go index ff113e0da74bbf84836d17d420be7432f969c773..dab9dd91e5b969f2e0aae8c55f396e385ace6492 100644 --- a/cmd/gitaly-wrapper/main_test.go +++ b/cmd/gitaly-wrapper/main_test.go @@ -68,7 +68,7 @@ func TestFindProcess(t *testing.T) { t.Parallel() path := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(path, []byte("garbage"), perm.SharedFile)) + require.NoError(t, os.WriteFile(path, []byte("garbage"), perm.PrivateWriteOnceFile)) _, err := findProcess(path) _, expectedErr := strconv.Atoi("garbage") @@ -81,7 +81,7 @@ func TestFindProcess(t *testing.T) { // The below PID can exist, but chances are sufficiently low to hopefully not matter // in practice. path := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(path, []byte("7777777"), perm.SharedFile)) + require.NoError(t, os.WriteFile(path, []byte("7777777"), perm.PrivateWriteOnceFile)) // The process isn't alive, so we expect neither an error nor a process to be // returned. @@ -116,7 +116,7 @@ func TestFindProcess(t *testing.T) { require.NoError(t, err) path := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(path, []byte(strconv.FormatInt(int64(cmd.Process.Pid), 10)), perm.SharedFile)) + require.NoError(t, os.WriteFile(path, []byte(strconv.FormatInt(int64(cmd.Process.Pid), 10)), perm.PrivateWriteOnceFile)) process, err := findProcess(path) require.NotNil(t, process) @@ -174,7 +174,7 @@ func TestReadPIDFile(t *testing.T) { t.Parallel() path := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(path, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(path, nil, perm.PrivateWriteOnceFile)) _, err := readPIDFile(path) _, expectedErr := strconv.Atoi("") require.Equal(t, expectedErr, err) @@ -184,7 +184,7 @@ func TestReadPIDFile(t *testing.T) { t.Parallel() path := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(path, []byte("invalid"), perm.SharedFile)) + require.NoError(t, os.WriteFile(path, []byte("invalid"), perm.PrivateWriteOnceFile)) _, err := readPIDFile(path) _, expectedErr := strconv.Atoi("invalid") require.Equal(t, expectedErr, err) @@ -194,7 +194,7 @@ func TestReadPIDFile(t *testing.T) { t.Parallel() path := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(path, []byte("12345"), perm.SharedFile)) + require.NoError(t, os.WriteFile(path, []byte("12345"), perm.PrivateWriteOnceFile)) pid, err := readPIDFile(path) require.NoError(t, err) require.Equal(t, 12345, pid) @@ -347,7 +347,7 @@ func TestRun(t *testing.T) { // Write the PID of the running process into the PID file. As a result, it should // get adopted by gitaly-wrapper, which means it wouldn't try to execute it anew. pidPath := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(pidPath, []byte(strconv.FormatInt(int64(scriptCmd.Process.Pid), 10)), perm.SharedFile)) + require.NoError(t, os.WriteFile(pidPath, []byte(strconv.FormatInt(int64(scriptCmd.Process.Pid), 10)), perm.PrivateWriteOnceFile)) // Run gitaly-script with a binary path whose basename matches, but which ultimately // doesn't exist. This proves that it doesn't try to execute the script again. @@ -411,7 +411,7 @@ func TestRun(t *testing.T) { `)) pidPath := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(pidPath, []byte("12345"), perm.SharedFile)) + require.NoError(t, os.WriteFile(pidPath, []byte("12345"), perm.PrivateWriteOnceFile)) cmd := exec.CommandContext(ctx, binary, script) cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", bootstrap.EnvPidFile, pidPath)) @@ -442,7 +442,7 @@ func TestRun(t *testing.T) { require.NoError(t, err) pidPath := filepath.Join(testhelper.TempDir(t), "pid") - require.NoError(t, os.WriteFile(pidPath, []byte(strconv.FormatInt(int64(scriptCmd.Process.Pid), 10)), perm.SharedFile)) + require.NoError(t, os.WriteFile(pidPath, []byte(strconv.FormatInt(int64(scriptCmd.Process.Pid), 10)), perm.PrivateWriteOnceFile)) cmd := exec.CommandContext(ctx, binary, script) cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", bootstrap.EnvPidFile, pidPath)) diff --git a/cmd/gitaly/main_test.go b/cmd/gitaly/main_test.go index 0b2cac70e1ca17384d665cdc343a203f400f61d2..0746fc8a0878df0d41226a642554a8695f75d7cb 100644 --- a/cmd/gitaly/main_test.go +++ b/cmd/gitaly/main_test.go @@ -31,7 +31,7 @@ func TestGitalyCLI(t *testing.T) { { desc: "without arguments", exitCode: 2, - stdout: "NAME:\n gitaly - a Git RPC service\n\nUSAGE:\n gitaly command [command options] \n\nDESCRIPTION:\n Gitaly is a Git RPC service for handling Git calls.\n\nCOMMANDS:\n serve launch the server daemon\n check verify internal API is accessible\n configuration run configuration-related commands\n hooks manage Git hooks\n bundle-uri Generate bundle URI bundle\n\nOPTIONS:\n --help, -h show help\n --version, -v print the version\n", + stdout: "NAME:\n gitaly - a Git RPC service\n\nUSAGE:\n gitaly command [command options] \n\nDESCRIPTION:\n Gitaly is a Git RPC service for handling Git calls.\n\nCOMMANDS:\n serve launch the server daemon\n check verify internal API is accessible\n configuration run configuration-related commands\n hooks manage Git hooks\n bundle-uri Generate bundle URI bundle\n git execute Git commands using Gitaly's embedded Git\n\nOPTIONS:\n --help, -h show help\n --version, -v print the version\n", }, { desc: "with non-existent config", diff --git a/internal/backup/locator_test.go b/internal/backup/locator_test.go index 711bae219774866c246f6ed1a033f5931c91f66e..05d625aaf617d809a63f48f3079f31b4d533aa12 100644 --- a/internal/backup/locator_test.go +++ b/internal/backup/locator_test.go @@ -148,8 +148,8 @@ func TestPointerLocator(t *testing.T) { expectedOffset: 1, setup: func(tb testing.TB, ctx context.Context, backupPath string) { require.NoError(t, os.MkdirAll(filepath.Join(backupPath, repo.RelativePath, "abc123"), perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte("abc123"), perm.SharedFile)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "abc123", "LATEST"), []byte("001"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte("abc123"), perm.PrivateWriteOnceFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "abc123", "LATEST"), []byte("001"), perm.PrivateWriteOnceFile)) }, }, } { @@ -220,8 +220,8 @@ func TestPointerLocator(t *testing.T) { require.ErrorIs(t, err, ErrDoesntExist) require.NoError(t, os.MkdirAll(filepath.Join(backupPath, repo.RelativePath, backupID), perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte(backupID), perm.SharedFile)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("003"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte(backupID), perm.PrivateWriteOnceFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("003"), perm.PrivateWriteOnceFile)) expected := &Backup{ ID: backupID, Repository: repo, @@ -281,8 +281,8 @@ func TestPointerLocator(t *testing.T) { require.Equal(t, expectedFallback, fallbackFull) require.NoError(t, os.MkdirAll(filepath.Join(backupPath, repo.RelativePath, backupID), perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte(backupID), perm.SharedFile)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("001"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte(backupID), perm.PrivateWriteOnceFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("001"), perm.PrivateWriteOnceFile)) expected := &Backup{ ID: backupID, Repository: repo, @@ -315,7 +315,7 @@ func TestPointerLocator(t *testing.T) { require.ErrorIs(t, err, ErrDoesntExist) require.NoError(t, os.MkdirAll(filepath.Join(backupPath, repo.RelativePath), perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte("invalid"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte("invalid"), perm.PrivateWriteOnceFile)) _, err = l.FindLatest(ctx, repo) require.EqualError(t, err, "pointer locator: find latest: find: find latest ID: storage service sink: new reader for \"TestPointerLocator/invalid/LATEST\": doesn't exist") }) @@ -334,8 +334,8 @@ func TestPointerLocator(t *testing.T) { require.ErrorIs(t, err, ErrDoesntExist) require.NoError(t, os.MkdirAll(filepath.Join(backupPath, repo.RelativePath, backupID), perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte(backupID), perm.SharedFile)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("invalid"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, "LATEST"), []byte(backupID), perm.PrivateWriteOnceFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("invalid"), perm.PrivateWriteOnceFile)) _, err = l.FindLatest(ctx, repo) require.EqualError(t, err, "pointer locator: find latest: find: determine increment ID: strconv.Atoi: parsing \"invalid\": invalid syntax") @@ -370,7 +370,7 @@ func TestPointerLocator(t *testing.T) { } require.NoError(t, os.MkdirAll(filepath.Join(backupPath, repo.RelativePath, backupID), perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("003"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("003"), perm.PrivateWriteOnceFile)) expected := &Backup{ ID: backupID, Repository: repo, @@ -415,7 +415,7 @@ func TestPointerLocator(t *testing.T) { require.ErrorIs(t, err, ErrDoesntExist) require.NoError(t, os.MkdirAll(filepath.Join(backupPath, repo.RelativePath, backupID), perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("invalid"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(backupPath, repo.RelativePath, backupID, "LATEST"), []byte("invalid"), perm.PrivateWriteOnceFile)) _, err = l.Find(ctx, repo, backupID) require.EqualError(t, err, "pointer locator: find: determine increment ID: strconv.Atoi: parsing \"invalid\": invalid syntax") diff --git a/internal/backup/sink_test.go b/internal/backup/sink_test.go index 1fbef679421b1b666eef920f617bb58da4e7ccbf..1e88ef8c2444ccd763c09b348f214d61f76249a0 100644 --- a/internal/backup/sink_test.go +++ b/internal/backup/sink_test.go @@ -53,7 +53,7 @@ func TestResolveSink(t *testing.T) { "token_uri": "https://accounts.google.com/o/oauth2/token", "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/303724477529-compute%40developer.gserviceaccount.com" -}`), perm.SharedFile)) +}`), perm.PrivateWriteOnceFile)) for _, tc := range []struct { desc string diff --git a/internal/bundleuri/git_config.go b/internal/bundleuri/git_config.go index 69cc21b49b9eb82aedfc84d84da7a3b8145b6024..f18fd4bda176fdff63e758ab623101b321e4ddb9 100644 --- a/internal/bundleuri/git_config.go +++ b/internal/bundleuri/git_config.go @@ -11,6 +11,9 @@ import ( "gitlab.com/gitlab-org/gitaly/v16/internal/log" ) +// ErrSinkMissing indicates a sink is missing +var ErrSinkMissing = errors.New("bundle-URI sink missing") + // CapabilitiesGitConfig returns a slice of git.ConfigPairs that can be injected // into the Git config to make it aware the bundle-URI capabilities are // supported. @@ -42,7 +45,7 @@ func UploadPackGitConfig( } if sink == nil { - return CapabilitiesGitConfig(ctx), errors.New("bundle-URI sink missing") + return CapabilitiesGitConfig(ctx), ErrSinkMissing } uri, err := sink.SignedURL(ctx, repo) diff --git a/internal/bundleuri/git_config_test.go b/internal/bundleuri/git_config_test.go index 0a5da2bbc24c9de079b4d4005cfda31e6871dc4a..d81224bf4f5d8e41ab67272755a39f7b0ebff05b 100644 --- a/internal/bundleuri/git_config_test.go +++ b/internal/bundleuri/git_config_test.go @@ -14,7 +14,6 @@ import ( "gitlab.com/gitlab-org/gitaly/v16/internal/git" "gitlab.com/gitlab-org/gitaly/v16/internal/git/gittest" "gitlab.com/gitlab-org/gitaly/v16/internal/git/localrepo" - "gitlab.com/gitlab-org/gitaly/v16/internal/structerr" "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper" "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper/testcfg" ) @@ -60,7 +59,7 @@ func testUploadPackGitConfig(t *testing.T, ctx context.Context) { return setupData{} }, expectedConfig: nil, - expectedErr: errors.New("bundle-URI sink missing"), + expectedErr: ErrSinkMissing, }, { desc: "no bundle found", @@ -74,7 +73,7 @@ func testUploadPackGitConfig(t *testing.T, ctx context.Context) { } }, expectedConfig: nil, - expectedErr: structerr.NewNotFound("no bundle available"), + expectedErr: ErrBundleNotFound, }, { desc: "not signed", @@ -144,7 +143,7 @@ func testUploadPackGitConfig(t *testing.T, ctx context.Context) { actual, err := UploadPackGitConfig(ctx, sink, repoProto) if featureflag.BundleURI.IsEnabled(ctx) { - require.Equal(t, tc.expectedErr, err) + require.True(t, errors.Is(err, tc.expectedErr) || strings.Contains(err.Error(), tc.expectedErr.Error())) if tc.expectedConfig != nil { require.Equal(t, len(tc.expectedConfig), len(actual)) diff --git a/internal/bundleuri/sink.go b/internal/bundleuri/sink.go index a2d1ac8d0047cdb753b39232f1abbe58934b288b..e6beabe5e79d90b06fd19dc361668e32b386704f 100644 --- a/internal/bundleuri/sink.go +++ b/internal/bundleuri/sink.go @@ -7,6 +7,7 @@ import ( "io" "path/filepath" "strings" + "sync" "time" "gitlab.com/gitlab-org/gitaly/v16/internal/backup" @@ -29,21 +30,61 @@ const ( defaultExpiry = 10 * time.Minute ) +var ( + // ErrBundleGenerationInProgress indicates that an existing bundle generation + // is already in progress. + ErrBundleGenerationInProgress = errors.New("bundle generation in progress") + // ErrBundleNotFound indicates that no bundle could be found for a given repository. + ErrBundleNotFound = errors.New("no bundle found") +) + // Sink is a wrapper around the storage bucket used for accessing/writing // bundleuri bundles. type Sink struct { - bucket *blob.Bucket + bucket *blob.Bucket + bundleCreationMutex map[string]*sync.Mutex + + config sinkConfig +} + +type sinkConfig struct { + notifyBundleGeneration func(string, error) +} + +// SinkOption can be passed into NewSink to pass in options when creating a new sink. +type SinkOption func(s *sinkConfig) + +// WithBundleGenerationNotifier sets a notifier function that gets called when GenerateOneAtATime +// finishes. GenerateOneAtATime will be called in a separate background goroutine, so this function +// is an entrypoint to pass in logic to be called after the bundle has been generated. +func WithBundleGenerationNotifier(f func(string, error)) SinkOption { + return func(s *sinkConfig) { + s.notifyBundleGeneration = f + } } // NewSink creates a Sink from the given parameters. -func NewSink(ctx context.Context, uri string) (*Sink, error) { +func NewSink(ctx context.Context, uri string, options ...SinkOption) (*Sink, error) { bucket, err := blob.OpenBucket(ctx, uri) if err != nil { return nil, fmt.Errorf("open bucket: %w", err) } - return &Sink{ - bucket: bucket, - }, nil + + s := &Sink{ + bucket: bucket, + bundleCreationMutex: make(map[string]*sync.Mutex), + } + + var c sinkConfig + if len(options) > 0 { + for _, option := range options { + option(&c) + } + + s.config = c + } + + return s, nil } // relativePath returns a relative path of the bundle-URI bundle inside the @@ -73,6 +114,43 @@ func (s *Sink) getWriter(ctx context.Context, relativePath string) (io.WriteClos return writer, nil } +// GenerateOneAtATime generates a bundle for a repository, but only if there is not already +// one in flight. +func (s *Sink) GenerateOneAtATime(ctx context.Context, repo *localrepo.Repo) error { + bundlePath := s.relativePath(repo, defaultBundle) + + var m *sync.Mutex + var ok bool + + if m, ok = s.bundleCreationMutex[bundlePath]; !ok { + s.bundleCreationMutex[bundlePath] = &sync.Mutex{} + m = s.bundleCreationMutex[bundlePath] + } + + if m.TryLock() { + defer m.Unlock() + errChan := make(chan error) + + go func(ctx context.Context) { + select { + case errChan <- s.Generate(ctx, repo): + case <-ctx.Done(): + errChan <- ctx.Err() + } + }(ctx) + + err := <-errChan + + if s.config.notifyBundleGeneration != nil { + s.config.notifyBundleGeneration(bundlePath, err) + } + } else { + return fmt.Errorf("%w: %s", ErrBundleGenerationInProgress, bundlePath) + } + + return nil +} + // Generate creates a bundle for bundle-URI use into the bucket. func (s Sink) Generate(ctx context.Context, repo *localrepo.Repo) (returnErr error) { ref, err := repo.HeadReference(ctx) @@ -132,9 +210,9 @@ func (s Sink) SignedURL(ctx context.Context, repo storage.Repository) (string, e if exists, err := s.bucket.Exists(ctx, relativePath); !exists { if err == nil { - return "", structerr.NewNotFound("no bundle available") + return "", ErrBundleNotFound } - return "", structerr.NewNotFound("no bundle available: %w", err) + return "", fmt.Errorf("%w: %w", ErrBundleNotFound, err) } uri, err := s.bucket.SignedURL(ctx, relativePath, &blob.SignedURLOptions{ diff --git a/internal/bundleuri/sink_test.go b/internal/bundleuri/sink_test.go index 01aa6907af82ae991312a1d7808f9eb53a9e2dff..e3534173bd3d16f4cf12ba5e8a2ae42e66cda526 100644 --- a/internal/bundleuri/sink_test.go +++ b/internal/bundleuri/sink_test.go @@ -1,6 +1,7 @@ package bundleuri import ( + "context" "fmt" "os" "path/filepath" @@ -99,13 +100,13 @@ func TestSink_SignedURL(t *testing.T) { setup: func(t *testing.T, sinkDir string, sink *Sink) { path := filepath.Join(sinkDir, sink.relativePath(repo, "default")) require.NoError(t, os.MkdirAll(filepath.Dir(path), perm.PrivateDir)) - require.NoError(t, os.WriteFile(path, []byte("hello"), perm.SharedFile)) + require.NoError(t, os.WriteFile(path, []byte("hello"), perm.PrivateWriteOnceFile)) }, }, { desc: "fails with missing bundle", setup: func(t *testing.T, sinkDir string, sink *Sink) {}, - expectedErr: structerr.NewNotFound("no bundle available"), + expectedErr: ErrBundleNotFound, }, } { tc := tc @@ -123,9 +124,170 @@ func TestSink_SignedURL(t *testing.T) { if tc.expectedErr == nil { require.NoError(t, err) require.Regexp(t, "http://example\\.com", uri) + } else { + require.ErrorIs(t, err, tc.expectedErr) + } + }) + } +} + +func TestSink_GenerateOneAtATime(t *testing.T) { + t.Parallel() + + cfg := testcfg.Build(t) + ctx := testhelper.Context(t) + + for _, tc := range []struct { + desc string + setup func(t *testing.T, repoPath string) + expectedErr error + }{ + { + desc: "creates bundle successfully", + setup: func(t *testing.T, repoPath string) { + gittest.WriteCommit(t, cfg, repoPath, + gittest.WithTreeEntries(gittest.TreeEntry{Mode: "100644", Path: "README", Content: "much"}), + gittest.WithBranch("main")) + }, + }, + { + desc: "fails with missing HEAD", + setup: func(t *testing.T, repoPath string) {}, + expectedErr: structerr.NewFailedPrecondition("ref %q does not exist: %w", "refs/heads/main", fmt.Errorf("create bundle: %w", localrepo.ErrEmptyBundle)), + }, + } { + tc := tc + + t.Run(tc.desc, func(t *testing.T) { + t.Parallel() + + repoProto, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + repo := localrepo.NewTestRepo(t, cfg, repoProto) + + tc.setup(t, repoPath) + + doneChan := make(chan struct{}) + errChan := make(chan error) + sinkDir := t.TempDir() + sink, err := NewSink( + ctx, + "file://"+sinkDir, + WithBundleGenerationNotifier( + func(_ string, err error) { + close(doneChan) + errChan <- err + }, + ), + ) + require.NoError(t, err) + + go func() { + err := sink.GenerateOneAtATime(ctx, repo) + require.NoError(t, err) + }() + + <-doneChan + err = <-errChan + + if tc.expectedErr == nil { + require.NoError(t, err) + require.FileExists(t, filepath.Join(sinkDir, sink.relativePath(repo, "default"))) } else { require.Equal(t, err, tc.expectedErr, err) } }) } } + +func TestSink_GenerateOneAtATimeConcurrent(t *testing.T) { + t.Parallel() + + cfg := testcfg.Build(t) + ctx := testhelper.Context(t) + + repoProto, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + repo := localrepo.NewTestRepo(t, cfg, repoProto) + + gittest.WriteCommit(t, cfg, repoPath, + gittest.WithTreeEntries(gittest.TreeEntry{Mode: "100644", Path: "README", Content: "much"}), + gittest.WithBranch("main")) + + doneChan, startNotifierCh := make(chan struct{}), make(chan struct{}) + errChan := make(chan error) + + sinkDir := t.TempDir() + sink, err := NewSink( + ctx, + "file://"+sinkDir, + WithBundleGenerationNotifier( + func(_ string, err error) { + close(startNotifierCh) + close(doneChan) + errChan <- err + }, + ), + ) + require.NoError(t, err) + + go func() { + err := sink.GenerateOneAtATime(ctx, repo) + require.NoError(t, err) + }() + + <-startNotifierCh + + err = sink.GenerateOneAtATime(ctx, repo) + require.ErrorIs(t, err, ErrBundleGenerationInProgress) + + <-doneChan + err = <-errChan + + require.NoError(t, err) + require.FileExists(t, filepath.Join(sinkDir, sink.relativePath(repo, "default"))) +} + +func TestSink_GenerateOneAtATime_ContextCancelled(t *testing.T) { + t.Parallel() + + cfg := testcfg.Build(t) + ctx := testhelper.Context(t) + ctx, cancel := context.WithCancel(ctx) + + repoProto, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + repo := localrepo.NewTestRepo(t, cfg, repoProto) + + gittest.WriteCommit(t, cfg, repoPath, + gittest.WithTreeEntries(gittest.TreeEntry{Mode: "100644", Path: "README", Content: "much"}), + gittest.WithBranch("main")) + + errChan := make(chan error) + + sinkDir := t.TempDir() + sink, err := NewSink( + ctx, + "file://"+sinkDir, + WithBundleGenerationNotifier( + func(_ string, err error) { + errChan <- err + }, + ), + ) + require.NoError(t, err) + + cancel() + + go func() { + require.NoError(t, sink.GenerateOneAtATime(ctx, repo)) + }() + + err = <-errChan + + require.ErrorIs(t, err, context.Canceled) + require.NoFileExists(t, filepath.Join(sinkDir, sink.relativePath(repo, "default"))) +} diff --git a/internal/cache/walker_test.go b/internal/cache/walker_test.go index 6c5bc945c3a495cd76b6d2728a8e181958c75b08..3cad56dec7f1785e014afbc0ecc62fc18de9c09c 100644 --- a/internal/cache/walker_test.go +++ b/internal/cache/walker_test.go @@ -116,7 +116,7 @@ func TestCleanWalkEmptyDirs(t *testing.T) { if strings.HasSuffix(tt.path, "/") { require.NoError(t, os.MkdirAll(p, perm.PrivateDir)) } else { - require.NoError(t, os.WriteFile(p, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(p, nil, perm.PrivateWriteOnceFile)) if tt.stale { require.NoError(t, os.Chtimes(p, time.Now(), time.Now().Add(-time.Hour))) } diff --git a/internal/cgroups/mock_linux_test.go b/internal/cgroups/mock_linux_test.go index 88f0b444635fe308efac182c4936e18d115d693a..a1fde9b81c4736501956e5004f6f9d09705a5c66 100644 --- a/internal/cgroups/mock_linux_test.go +++ b/internal/cgroups/mock_linux_test.go @@ -22,6 +22,7 @@ package cgroups import ( "fmt" + "io/fs" "os" "path/filepath" "testing" @@ -156,7 +157,7 @@ func (m *mockCgroupV1) setupMockCgroupFiles( for filename, content := range content { controlFilePath := filepath.Join(cgroupPath, filename) - require.NoError(t, os.WriteFile(controlFilePath, []byte(content), perm.SharedFile)) + require.NoError(t, os.WriteFile(controlFilePath, []byte(content), fs.ModePerm)) } for _, shard := range shards { @@ -165,7 +166,7 @@ func (m *mockCgroupV1) setupMockCgroupFiles( for filename, content := range content { shardControlFilePath := filepath.Join(shardPath, filename) - require.NoError(t, os.WriteFile(shardControlFilePath, []byte(content), perm.SharedFile)) + require.NoError(t, os.WriteFile(shardControlFilePath, []byte(content), fs.ModePerm)) } } } @@ -244,12 +245,12 @@ func (m *mockCgroupV2) setupMockCgroupFiles( for filename, content := range content { controlFilePath := filepath.Join(m.root, manager.cfg.HierarchyRoot, filename) - require.NoError(t, os.WriteFile(controlFilePath, []byte(content), perm.SharedFile)) + require.NoError(t, os.WriteFile(controlFilePath, []byte(content), fs.ModePerm)) } for filename, content := range content { controlFilePath := filepath.Join(cgroupPath, filename) - require.NoError(t, os.WriteFile(controlFilePath, []byte(content), perm.SharedFile)) + require.NoError(t, os.WriteFile(controlFilePath, []byte(content), fs.ModePerm)) } for _, shard := range shards { @@ -258,7 +259,7 @@ func (m *mockCgroupV2) setupMockCgroupFiles( for filename, content := range content { shardControlFilePath := filepath.Join(shardPath, filename) - require.NoError(t, os.WriteFile(shardControlFilePath, []byte(content), perm.SharedFile)) + require.NoError(t, os.WriteFile(shardControlFilePath, []byte(content), fs.ModePerm)) } } } diff --git a/internal/cli/gitaly/app.go b/internal/cli/gitaly/app.go index a6976d96ce536dcc22defabc255ef83ac5ab0b5d..02212f94b2b6fa8ffef5aca54f246206449fbf4d 100644 --- a/internal/cli/gitaly/app.go +++ b/internal/cli/gitaly/app.go @@ -33,6 +33,7 @@ func NewApp() *cli.App { newConfigurationCommand(), newHooksCommand(), newBundleURICommand(), + newGitCommand(), }, } } diff --git a/internal/cli/gitaly/serve.go b/internal/cli/gitaly/serve.go index 6e4ccad05a8b12a8700e57758b4d2f7bee967daa..3a7520f0b5270f851a7ce8549de85100ed88e110 100644 --- a/internal/cli/gitaly/serve.go +++ b/internal/cli/gitaly/serve.go @@ -498,7 +498,17 @@ func run(appCtx *cli.Context, cfg config.Cfg, logger log.Logger) error { var bundleURISink *bundleuri.Sink if cfg.BundleURI.GoCloudURL != "" { - bundleURISink, err = bundleuri.NewSink(ctx, cfg.BundleURI.GoCloudURL) + bundleURISink, err = bundleuri.NewSink( + ctx, + cfg.BundleURI.GoCloudURL, + bundleuri.WithBundleGenerationNotifier(func(bundlePath string, err error) { + if err != nil { + logger.WithField("bundle_path", bundlePath). + WithError(err). + Warn("bundle generation failed") + } + }), + ) if err != nil { return fmt.Errorf("create bundle-URI sink: %w", err) } @@ -547,6 +557,7 @@ func run(appCtx *cli.Context, cfg config.Cfg, logger log.Logger) error { BackupSink: backupSink, BackupLocator: backupLocator, BundleURISink: bundleURISink, + InProgressTracker: service.NewInProgressTracker(), }) b.RegisterStarter(starter.New(c, srv, logger)) } diff --git a/internal/cli/gitaly/subcmd_git.go b/internal/cli/gitaly/subcmd_git.go new file mode 100644 index 0000000000000000000000000000000000000000..7c252c3104df53c688b1bd767112195a82d407c5 --- /dev/null +++ b/internal/cli/gitaly/subcmd_git.go @@ -0,0 +1,77 @@ +package gitaly + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + + "github.com/urfave/cli/v2" + "gitlab.com/gitlab-org/gitaly/v16/internal/git" + "gitlab.com/gitlab-org/gitaly/v16/internal/log" +) + +func newGitCommand() *cli.Command { + return &cli.Command{ + Name: "git", + Usage: "execute Git commands using Gitaly's embedded Git", + UsageText: `gitaly git [git-command] [args...] + +Example: gitaly git status`, + Description: "Execute Git commands using the same Git execution environment as Gitaly.", + Action: gitAction, + HideHelpCommand: true, + ArgsUsage: "[git-command] [args...]", + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: flagConfig, + Usage: "path to Gitaly configuration", + Aliases: []string{"c"}, + Required: true, + }, + }, + } +} + +func gitAction(ctx *cli.Context) error { + logger := log.ConfigureCommand() + + if ctx.NArg() < 1 { + if err := cli.ShowSubcommandHelp(ctx); err != nil { + return err + } + return cli.Exit("error: Git command required", 1) + } + + cfg, err := loadConfig(ctx.String(flagConfig)) + if err != nil { + return fmt.Errorf("load config: %w", err) + } + + gitCmdFactory, cleanup, err := git.NewExecCommandFactory(cfg, logger) + if err != nil { + return fmt.Errorf("creating Git command factory: %w", err) + } + defer cleanup() + + gitBinaryPath := gitCmdFactory.GetExecutionEnvironment(ctx.Context).BinaryPath + + cmd := exec.Command(gitBinaryPath, ctx.Args().Slice()...) + cmd.Stdin = os.Stdin + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Env = append(os.Environ(), + fmt.Sprintf("GIT_EXEC_PATH=%s", filepath.Dir(gitBinaryPath)), + fmt.Sprintf("PATH=%s:%s", filepath.Dir(gitBinaryPath), os.Getenv("PATH")), + ) + + err = cmd.Run() + if err != nil { + if exitError, ok := err.(*exec.ExitError); ok { + return cli.Exit("", exitError.ExitCode()) + } + return fmt.Errorf("executing git command: %w", err) + } + + return nil +} diff --git a/internal/cli/gitaly/subcmd_git_test.go b/internal/cli/gitaly/subcmd_git_test.go new file mode 100644 index 0000000000000000000000000000000000000000..07be305f3a472edbaece4ed3833551880c9e1383 --- /dev/null +++ b/internal/cli/gitaly/subcmd_git_test.go @@ -0,0 +1,105 @@ +package gitaly + +import ( + "bytes" + "os" + "os/exec" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + "gitlab.com/gitlab-org/gitaly/v16/internal/git/gittest" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/config" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/service/setup" + "gitlab.com/gitlab-org/gitaly/v16/internal/helper/perm" + "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper" + "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper/testcfg" + "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper/testserver" +) + +func TestGitalyGitCommand(t *testing.T) { + ctx := testhelper.Context(t) + cfg := testcfg.Build(t) + dir := testhelper.TempDir(t) + seedDirWithExecutables := func(t *testing.T, executableNames ...string) { + for _, executableName := range executableNames { + require.NoError(t, os.WriteFile(filepath.Join(dir, executableName), nil, perm.PrivateExecutable)) + } + } + + // Git environments supported by the Git command factory. + seedDirWithExecutables(t, "gitaly-git-v2.45", "gitaly-git-remote-http-v2.45", "gitaly-git-http-backend-v2.45", + "gitaly-git-v2.44", "gitaly-git-remote-http-v2.44", "gitaly-git-http-backend-v2.44") + t.Setenv("GITALY_TESTING_BUNDLED_GIT_PATH", dir) + + // Ensure we're using bundled binaries + cfg.Git.UseBundledBinaries = true + cfg.BinDir = dir + testcfg.BuildGitaly(t, cfg) + cfg.SocketPath = testserver.RunGitalyServer(t, cfg, setup.RegisterAll) + + repo, repoPath := gittest.CreateRepository(t, ctx, cfg) + gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch("main")) + + cfg.SocketPath = testhelper.GetTemporaryGitalySocketFileName(t) + + cfg.Storages = []config.Storage{ + { + Name: repo.StorageName, + Path: filepath.Dir(repoPath), + }, + } + + configPath := testcfg.WriteTemporaryGitalyConfigFile(t, cfg) + + tests := []struct { + name string + args []string + expectedOutput string + expectedError string + }{ + { + name: "git status", + args: []string{"status"}, + expectedOutput: "On branch main", + }, + { + name: "invalid git command", + args: []string{"invalid-command"}, + expectedError: "exit status 1", + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + cmd := exec.Command(cfg.BinaryPath("gitaly"), + "git", + "-c", configPath, + ) + cmd.Args = append(cmd.Args, tt.args...) + cmd.Dir = repoPath // Set the working directory to the repository path + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + t.Logf("Running command: %v in directory: %s", cmd.Args, cmd.Dir) + + err := cmd.Run() + + t.Logf("Stdout: %s", stdout.String()) + t.Logf("Stderr: %s", stderr.String()) + + if tt.expectedError != "" { + require.Error(t, err) + require.Contains(t, err.Error(), tt.expectedError) + } else { + require.NoError(t, err) + require.Contains(t, stdout.String(), tt.expectedOutput) + } + }) + } +} diff --git a/internal/featureflag/ff_autogenerate_bundles.go b/internal/featureflag/ff_autogenerate_bundles.go new file mode 100644 index 0000000000000000000000000000000000000000..61692778f32d355af1a5b5d4fe028d201216b386 --- /dev/null +++ b/internal/featureflag/ff_autogenerate_bundles.go @@ -0,0 +1,9 @@ +package featureflag + +// AutogenerateBundlesForBundleURI enables the use of git's bundle URI feature +var AutogenerateBundlesForBundleURI = NewFeatureFlag( + "autogenerate_bundles_for_bundleuri", + "v17.3.0", + "https://gitlab.com/gitlab-org/gitaly/-/issues/6204", + false, +) diff --git a/internal/git/conflict/parser_test.go b/internal/git/conflict/parser_test.go index 31793e400a5c05689f45308b14c676e01807c57d..4d965d16ceee733654193938461dcd0b8161f6a7 100644 --- a/internal/git/conflict/parser_test.go +++ b/internal/git/conflict/parser_test.go @@ -112,7 +112,7 @@ we can both agree on this line though t.Run(tt.name, func(t *testing.T) { entry := Entry{ Path: tt.path, - Mode: uint(perm.SharedFile), + Mode: uint(perm.PrivateWriteOnceFile), Contents: []byte("something-with-trailing-newline\n"), } diff --git a/internal/git/gittest/http_server.go b/internal/git/gittest/http_server.go index 64757b3026e58f5a08c820840005872d622506f1..f1407d4464fd71f52cbe7b31565ec8d9c2e52778 100644 --- a/internal/git/gittest/http_server.go +++ b/internal/git/gittest/http_server.go @@ -19,7 +19,7 @@ import ( // prepared such that git-http-backend(1) will serve it by creating the "git-daemon-export-ok" magic // file. func HTTPServer(tb testing.TB, ctx context.Context, gitCmdFactory git.CommandFactory, repoPath string, middleware func(http.ResponseWriter, *http.Request, http.Handler)) int { - require.NoError(tb, os.WriteFile(filepath.Join(repoPath, "git-daemon-export-ok"), nil, perm.SharedFile)) + require.NoError(tb, os.WriteFile(filepath.Join(repoPath, "git-daemon-export-ok"), nil, perm.PrivateWriteOnceFile)) listener, err := net.Listen("tcp", "127.0.0.1:0") require.NoError(tb, err) diff --git a/internal/git/housekeeping/clean_stale_data_test.go b/internal/git/housekeeping/clean_stale_data_test.go index 254577ff53cd7c76d9b39d9b3af2d7b3d7995959..c2341d7e1b7c6b3b79868be4b11abea15a0cc858 100644 --- a/internal/git/housekeeping/clean_stale_data_test.go +++ b/internal/git/housekeeping/clean_stale_data_test.go @@ -255,7 +255,7 @@ func TestPruneEmptyConfigSections(t *testing.T) { }, } { t.Run(tc.desc, func(t *testing.T) { - require.NoError(t, os.WriteFile(configPath, []byte(tc.configData), perm.SharedFile)) + require.NoError(t, os.WriteFile(configPath, []byte(tc.configData), perm.PrivateWriteOnceFile)) skippedSections, err := PruneEmptyConfigSections(ctx, repo) require.NoError(t, err) @@ -364,7 +364,7 @@ func TestRemoveGitLabFullPathConfig(t *testing.T) { }, } { t.Run(tc.desc, func(t *testing.T) { - require.NoError(t, os.WriteFile(configPath, []byte(tc.configData), perm.SharedFile)) + require.NoError(t, os.WriteFile(configPath, []byte(tc.configData), perm.PrivateWriteOnceFile)) cleanupCount, err := removeGitLabFullPathConfig(ctx, repo, &transaction.MockManager{}) require.NoError(t, err) diff --git a/internal/git/housekeeping/manager/optimize_repository_test.go b/internal/git/housekeeping/manager/optimize_repository_test.go index c25e4e126bcd78e2f16b7a67f4e3cfd759ecad7c..b5aa1a2baa3dea4b8d2cab67d00cffb3b7ddaf90 100644 --- a/internal/git/housekeeping/manager/optimize_repository_test.go +++ b/internal/git/housekeeping/manager/optimize_repository_test.go @@ -731,7 +731,7 @@ func TestOptimizeRepository(t *testing.T) { for i := 0; i < housekeeping.LooseObjectLimit+1; i++ { blobPath := filepath.Join(repoPath, "objects", "17", fmt.Sprintf("%d", i)) - require.NoError(t, os.WriteFile(blobPath, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(blobPath, nil, perm.PrivateWriteOnceFile)) require.NoError(t, os.Chtimes(blobPath, almostTwoWeeksAgo, almostTwoWeeksAgo)) } @@ -766,7 +766,7 @@ func TestOptimizeRepository(t *testing.T) { for i := 0; i < housekeeping.LooseObjectLimit+1; i++ { blobPath := filepath.Join(repoPath, "objects", "17", fmt.Sprintf("%d", i)) - require.NoError(t, os.WriteFile(blobPath, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(blobPath, nil, perm.PrivateWriteOnceFile)) require.NoError(t, os.Chtimes(blobPath, moreThanTwoWeeksAgo, moreThanTwoWeeksAgo)) } @@ -1733,7 +1733,7 @@ func TestRepositoryManager_CleanStaleData_reftable(t *testing.T) { path := filepath.Join(repoPath, "reftable", "tables.list.lock") - require.NoError(t, os.WriteFile(path, []byte{}, perm.SharedFile)) + require.NoError(t, os.WriteFile(path, []byte{}, perm.PrivateWriteOnceFile)) filetime := time.Now().Add(-tc.age) require.NoError(t, os.Chtimes(path, filetime, filetime)) @@ -1843,7 +1843,7 @@ func TestRepositoryManager_CleanStaleData_references(t *testing.T) { path := filepath.Join(repoPath, ref.name) require.NoError(t, os.MkdirAll(filepath.Dir(path), perm.PrivateDir)) - require.NoError(t, os.WriteFile(path, bytes.Repeat([]byte{0}, ref.size), perm.SharedFile)) + require.NoError(t, os.WriteFile(path, bytes.Repeat([]byte{0}, ref.size), perm.PrivateWriteOnceFile)) filetime := time.Now().Add(-ref.age) require.NoError(t, os.Chtimes(path, filetime, filetime)) } @@ -2492,7 +2492,7 @@ func TestRepositoryManager_CleanStaleData_unsetConfiguration(t *testing.T) { else = untouched [totally] unrelated = untouched -`), perm.SharedFile)) +`), perm.PrivateWriteOnceFile)) mgr := New(cfg.Prometheus, testhelper.SharedLogger(t), nil, nil) @@ -2588,7 +2588,7 @@ func TestRepositoryManager_CleanStaleData_pruneEmptyConfigSections(t *testing.T) [remote "tmp-03b5e8c765135b343214d471843a062a"] [remote "tmp-f57338181aca1d599669dbb71ce9ce57"] [remote "tmp-8c948ca94832c2725733e48cb2902287"] -`), perm.SharedFile)) +`), perm.PrivateWriteOnceFile)) mgr := New(cfg.Prometheus, testhelper.SharedLogger(t), nil, nil) @@ -2629,7 +2629,7 @@ func TestRepositoryManager_CleanStaleData_removeGitLabFullPathConfig(t *testing. [gitlab] fullpath = foo/bar other = config -`), perm.SharedFile)) +`), perm.PrivateWriteOnceFile)) mgr := New(cfg.Prometheus, testhelper.SharedLogger(t), nil, nil) diff --git a/internal/git/localrepo/refs_external_test.go b/internal/git/localrepo/refs_external_test.go index 4539a280ba93495a466b72792446f822d8c5a864..bf982a54012f1c25a407bc0a065eac44b60e27f6 100644 --- a/internal/git/localrepo/refs_external_test.go +++ b/internal/git/localrepo/refs_external_test.go @@ -179,7 +179,7 @@ func TestRepo_SetDefaultBranch_errors(t *testing.T) { require.NoError(t, updater.Prepare()) t.Cleanup(func() { require.NoError(t, updater.Close()) }) } else { - require.NoError(t, os.WriteFile(filepath.Join(repoPath, "HEAD.lock"), []byte(""), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, "HEAD.lock"), []byte(""), perm.PrivateWriteOnceFile)) } err = repo.SetDefaultBranch(ctx, &transaction.MockManager{}, "refs/heads/branch") diff --git a/internal/git/localrepo/snapshot_test.go b/internal/git/localrepo/snapshot_test.go index b7bf639a7cf2f017622c5f9bf6663dce8c927e7e..dec1aba7ccf635d79eba3ad2d2a31a481fec3f44 100644 --- a/internal/git/localrepo/snapshot_test.go +++ b/internal/git/localrepo/snapshot_test.go @@ -95,7 +95,7 @@ doesn't seem to test a realistic scenario.`) ) // The shallow file, used if the repository is a shallow clone, is also included in snapshots. - require.NoError(t, os.WriteFile(filepath.Join(repoPath, "shallow"), nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, "shallow"), nil, perm.PrivateWriteOnceFile)) // Custom Git hooks are not included in snapshots. require.NoError(t, os.MkdirAll(filepath.Join(repoPath, "hooks"), perm.PrivateDir)) @@ -104,7 +104,7 @@ doesn't seem to test a realistic scenario.`) require.NoError(t, os.WriteFile( filepath.Join(repoPath, "objects/this-should-not-be-included"), nil, - perm.SharedFile, + perm.PrivateWriteOnceFile, )) return setupData{ @@ -136,7 +136,7 @@ doesn't seem to test a realistic scenario.`) require.NoError(t, os.WriteFile( altFile, []byte(fmt.Sprintf("%s\n", altObjectDir)), - perm.SharedFile, + perm.PrivateWriteOnceFile, )) refs := gittest.FilesOrReftables( @@ -198,7 +198,7 @@ doesn't seem to test a realistic scenario.`) require.NoError(t, os.WriteFile( altFile, []byte(fmt.Sprintf("%s\n", altObjectDir)), - perm.SharedFile, + perm.PrivateWriteOnceFile, )) gittest.RequireObjectExists(t, cfg, repoPath, commitID) @@ -245,7 +245,7 @@ doesn't seem to test a realistic scenario.`) require.NoError(t, os.WriteFile( altFile, []byte(fmt.Sprintf("%s\n", relAltObjectDir)), - perm.SharedFile, + perm.PrivateWriteOnceFile, )) gittest.RequireObjectExists(t, cfg, repoPath, commitID) diff --git a/internal/git/objectpool/disconnect_test.go b/internal/git/objectpool/disconnect_test.go index a4eecbef36cffa82e239b56c136ee8aeeba50858..75214047386b6f54cec629aef84410fa32da5e24 100644 --- a/internal/git/objectpool/disconnect_test.go +++ b/internal/git/objectpool/disconnect_test.go @@ -92,7 +92,7 @@ func TestDisconnect(t *testing.T) { altPath, err := repo.InfoAlternatesPath(ctx) require.NoError(t, err) - require.NoError(t, os.WriteFile(altPath, []byte(altContent), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte(altContent), perm.PrivateWriteOnceFile)) return repo } @@ -188,7 +188,7 @@ func TestDisconnect(t *testing.T) { altPath, err := repo.InfoAlternatesPath(ctx) require.NoError(t, err) - require.NoError(t, os.WriteFile(altPath, []byte(altObjectDir), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte(altObjectDir), perm.PrivateWriteOnceFile)) return setupData{ repository: repo, @@ -382,7 +382,7 @@ func TestRemoveAlternatesIfOk(t *testing.T) { altPath, err := repo.InfoAlternatesPath(ctx) require.NoError(t, err) altContent := testhelper.TempDir(t) + "\n" - require.NoError(t, os.WriteFile(altPath, []byte(altContent), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte(altContent), perm.PrivateWriteOnceFile)) // Intentionally break the repository so that the consistency check will cause an // error. @@ -412,7 +412,7 @@ func TestRemoveAlternatesIfOk(t *testing.T) { altPath, err := repo.InfoAlternatesPath(ctx) require.NoError(t, err) altContent := testhelper.TempDir(t) + "\n" - require.NoError(t, os.WriteFile(altPath, []byte(altContent), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte(altContent), perm.PrivateWriteOnceFile)) // In order to test the scenario where a commit is in a commit graph but not in the // object database, we will first write a new commit, write the commit graph, then diff --git a/internal/git/objectpool/link_test.go b/internal/git/objectpool/link_test.go index 7c0c25b5c2221aa68ad012b82636fb3885867aab..9f782572362ed6be52f37fb32fba69619d43124e 100644 --- a/internal/git/objectpool/link_test.go +++ b/internal/git/objectpool/link_test.go @@ -112,7 +112,7 @@ func TestLink(t *testing.T) { // Link the repository to object pool using the absolute path of the object pool. // The alternates file should be rewritten to use the relative path. poolObjectsPath := gittest.RepositoryPath(t, ctx, pool, "objects") - require.NoError(t, os.WriteFile(altPath, []byte(poolObjectsPath), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte(poolObjectsPath), perm.PrivateWriteOnceFile)) return setupData{ cfg: cfg, @@ -130,7 +130,7 @@ func TestLink(t *testing.T) { // nothing and completes normally. altPath, err := repo.InfoAlternatesPath(ctx) require.NoError(t, err) - require.NoError(t, os.WriteFile(altPath, []byte(getRelAltPath(t, repo, pool.Repo)), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte(getRelAltPath(t, repo, pool.Repo)), perm.PrivateWriteOnceFile)) return setupData{ cfg: cfg, @@ -148,7 +148,7 @@ func TestLink(t *testing.T) { // linking operation fails. altPath, err := repo.InfoAlternatesPath(ctx) require.NoError(t, err) - require.NoError(t, os.WriteFile(altPath, []byte("../different/object/pool"), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte("../different/object/pool"), perm.PrivateWriteOnceFile)) return setupData{ cfg: cfg, @@ -195,7 +195,7 @@ func TestLink(t *testing.T) { // to the same object pool. altPath, err := repo.InfoAlternatesPath(ctx) require.NoError(t, err) - require.NoError(t, os.WriteFile(altPath, []byte(getRelAltPath(t, repo, pool.Repo)), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte(getRelAltPath(t, repo, pool.Repo)), perm.PrivateWriteOnceFile)) return setupData{ cfg: cfg, diff --git a/internal/git/objectpool/pool_test.go b/internal/git/objectpool/pool_test.go index 47fad57215da334d991cd05b9bb8ab0544947726..397bfd6ac57b43e7916cecbe6f59ac3358166e00 100644 --- a/internal/git/objectpool/pool_test.go +++ b/internal/git/objectpool/pool_test.go @@ -114,7 +114,7 @@ func TestFromRepo_failures(t *testing.T) { require.NoError(t, os.MkdirAll(filepath.Join(repoPath, "objects", "info"), perm.PrivateDir)) alternateFilePath := filepath.Join(repoPath, "objects", "info", "alternates") - require.NoError(t, os.WriteFile(alternateFilePath, tc.fileContent, perm.SharedFile)) + require.NoError(t, os.WriteFile(alternateFilePath, tc.fileContent, perm.PrivateWriteOnceFile)) poolFromRepo, err := FromRepo(ctx, logger, locator, pool.gitCmdFactory, nil, nil, nil, repo) require.Equal(t, tc.expectedErr, err) require.Nil(t, poolFromRepo) diff --git a/internal/git/reference.go b/internal/git/reference.go index e342c00630ce484e5b1108e17d8cf12bb2d3c835..c56343d86364e4f2b91873f75b5fd8196a307642 100644 --- a/internal/git/reference.go +++ b/internal/git/reference.go @@ -9,6 +9,23 @@ import ( "strings" ) +// ReferenceUpdate describes the state of a reference's old and new tip in an update. +type ReferenceUpdate struct { + // OldOID is the old OID the reference is expected to point to prior to updating it. + // If the reference does not point to the old value, the reference verification fails. + OldOID ObjectID + // NewOID is the new desired OID to point the reference to. + NewOID ObjectID + // OldTarget is the expected target for a symbolic reference. + OldTarget ReferenceName + // NewTarget stores the desired target for a symbolic reference. + NewTarget ReferenceName +} + +// ReferenceUpdates contains references to update. Reference name is used as the key and the value +// is the expected old tip and the desired new tip. +type ReferenceUpdates map[ReferenceName]ReferenceUpdate + // InternalReferenceType is the type of an internal reference. type InternalReferenceType int diff --git a/internal/git/reftable.go b/internal/git/reftable.go new file mode 100644 index 0000000000000000000000000000000000000000..fc56ea9c68dc313364e3c1f7c826065d8d8c6eab --- /dev/null +++ b/internal/git/reftable.go @@ -0,0 +1,331 @@ +package git + +import ( + "bytes" + "encoding/binary" + "encoding/hex" + "fmt" + "math/big" +) + +type reftableHeader struct { + Name [4]byte + Version uint8 + BlockSize [3]byte + MinUpdateIndex uint64 + MaxUpdateIndex uint64 + // HashID is only present if version is 2 + HashID [4]byte +} + +type reftableFooterBase struct { + Name [4]byte + Version uint8 + BlockSize [3]byte + MinUpdateIndex uint64 + MaxUpdateIndex uint64 +} + +type reftableFooterEnd struct { + RefIndexOffset uint64 + ObjectOffsetAndLen uint64 + ObjectIndexOffset uint64 + LogOffset uint64 + LogIndexPosition uint64 + CR32 uint32 +} + +type reftableFooter struct { + reftableFooterBase + HashID [4]byte + reftableFooterEnd +} + +type reftableBlock struct { + BlockStart uint + FullBlockSize uint + HeaderOffset uint + RestartCount uint16 + RestartStart uint +} + +type reftable struct { + blockSize *uint + headerSize uint + footerSize uint + size uint + src []byte + header *reftableHeader + footer *reftableFooter +} + +// shaFormat maps reftable sha format to Gitaly's hash object. +func (t *reftable) shaFormat() ObjectHash { + if t.footer.Version == 2 && bytes.Equal(t.footer.HashID[:], []byte("s256")) { + return ObjectHashSHA256 + } + return ObjectHashSHA1 +} + +// parseBlockSize parses the table's header for the block size. +func (t *reftable) parseBlockSize() uint { + if t.blockSize == nil { + blockSize := uint(big.NewInt(0).SetBytes(t.header.BlockSize[:]).Uint64()) + t.blockSize = &blockSize + } + + return *t.blockSize +} + +// getBlockRange provides the abs block range if the block is smaller +// than the table. +func (t *reftable) getBlockRange(offset, size uint) (uint, uint) { + if offset >= t.size { + return 0, 0 + } + + if offset+size > t.size { + size = t.size - offset + } + + return offset, offset + size +} + +// extractBlockLen extracts the block length from a given location. +func (t *reftable) extractBlockLen(blockStart uint) uint { + return uint(big.NewInt(0).SetBytes(t.src[blockStart+1 : blockStart+4]).Uint64()) +} + +// getVarInt parses a variable int and increases the index. +func (t *reftable) getVarInt(start uint, blockEnd uint) (uint, uint, error) { + var val uint + + val = uint(t.src[start]) & 0x7f + + for (uint(t.src[start]) & 0x80) > 0 { + start++ + if start > blockEnd { + return 0, 0, fmt.Errorf("exceeded block length") + } + + val = ((val + 1) << 7) | (uint(t.src[start]) & 0x7f) + } + + return start + 1, val, nil +} + +// getRefsFromBlock provides the ref udpates from a reference block. +func (t *reftable) getRefsFromBlock(b *reftableBlock) (ReferenceUpdates, error) { + u := make(map[ReferenceName]ReferenceUpdate) + + prefix := "" + + // Skip the block_type and block_len + idx := b.BlockStart + 4 + + for idx < b.RestartStart { + var prefixLength, suffixLength, updateIndexDelta uint + var err error + + idx, prefixLength, err = t.getVarInt(idx, b.RestartStart) + if err != nil { + return u, fmt.Errorf("getting prefix length: %w", err) + } + + idx, suffixLength, err = t.getVarInt(idx, b.RestartStart) + if err != nil { + return u, fmt.Errorf("getting suffix length: %w", err) + } + + extra := (suffixLength & 0x7) + suffixLength >>= 3 + + refname := prefix[:prefixLength] + string(t.src[idx:idx+suffixLength]) + idx = idx + suffixLength + + idx, updateIndexDelta, err = t.getVarInt(idx, b.FullBlockSize) + if err != nil { + return u, fmt.Errorf("getting update index delta: %w", err) + } + // we don't use this for now + _ = updateIndexDelta + + refUpdate := ReferenceUpdate{} + + switch extra { + case 0: + // Deletion, no value + refUpdate.NewOID = t.shaFormat().ZeroOID + case 1: + // Regular reference + hashSize := t.shaFormat().Hash().Size() + refUpdate.NewOID = ObjectID(hex.EncodeToString(t.src[idx : idx+uint(hashSize)])) + + idx += uint(hashSize) + case 2: + // Peeled Tag + hashSize := t.shaFormat().Hash().Size() + refUpdate.NewOID = ObjectID(hex.EncodeToString(t.src[idx : idx+uint(hashSize)])) + + idx += uint(hashSize) + + // For now we don't need the peeledOID, but we still need + // to skip the index. + // peeledOID := ObjectID(bytesToHex(t.src[idx : idx+uint(hashSize)])) + idx += uint(hashSize) + case 3: + // Symref + var size uint + idx, size, err = t.getVarInt(idx, b.FullBlockSize) + if err != nil { + return u, fmt.Errorf("getting symref size: %w", err) + } + + refUpdate.NewTarget = ReferenceName(t.src[idx : idx+size]) + idx = idx + size + } + + u[ReferenceName(refname)] = refUpdate + prefix = refname + } + + return u, nil +} + +// parseRefBlock parses a block and if it is a ref block, provides +// all the reference updates. +func (t *reftable) parseRefBlock(headerOffset, blockStart, blockEnd uint) (ReferenceUpdates, error) { + currentBS := t.extractBlockLen(blockStart + headerOffset) + + fullBlockSize := t.parseBlockSize() + if fullBlockSize == 0 { + fullBlockSize = currentBS + } else if currentBS < fullBlockSize && currentBS < (blockEnd-blockStart) && t.src[blockStart+currentBS] != 0 { + fullBlockSize = currentBS + } + + b := &reftableBlock{ + BlockStart: blockStart + headerOffset, + FullBlockSize: fullBlockSize, + } + + if err := binary.Read(bytes.NewBuffer(t.src[blockStart+currentBS-2:]), binary.BigEndian, &b.RestartCount); err != nil { + return nil, fmt.Errorf("reading restart count: %w", err) + } + + b.RestartStart = blockStart + currentBS - 2 - 3*uint(b.RestartCount) + + return t.getRefsFromBlock(b) +} + +// IterateRefs provides all the refs present in a table. +func (t *reftable) IterateRefs() (ReferenceUpdates, error) { + if t.footer == nil { + return nil, fmt.Errorf("table not instantiated") + } + + offset := uint(0) + allUpdates := make(map[ReferenceName]ReferenceUpdate) + + for offset < t.size { + headerOffset := uint(0) + if offset == 0 { + headerOffset = t.headerSize + } + + blockStart, blockEnd := t.getBlockRange(offset, t.parseBlockSize()) + if blockStart == 0 && blockEnd == 0 { + break + } + + // If we run out of ref blocks, we can stop the iteration. + if t.src[blockStart+headerOffset] != 'r' { + return nil, nil + } + + u, err := t.parseRefBlock(headerOffset, blockStart, blockEnd) + if err != nil { + return nil, fmt.Errorf("parsing block: %w", err) + } + + if u == nil { + break + } + + for ref, val := range u { + allUpdates[ref] = val + } + + offset = blockEnd + } + + return allUpdates, nil +} + +// NewReftable instantiates a new reftable from the given reftable content. +func NewReftable(content []byte) (*reftable, error) { + t := &reftable{src: content} + block := t.src[0:28] + + var h reftableHeader + if err := binary.Read(bytes.NewBuffer(block), binary.BigEndian, &h); err != nil { + return nil, fmt.Errorf("reading header: %w", err) + } + + if !bytes.Equal(h.Name[:], []byte("REFT")) { + return nil, fmt.Errorf("unexpected header name: %s", h.Name) + } + + if h.Version != 1 && h.Version != 2 { + return nil, fmt.Errorf("unexpected reftable version: %d", h.Version) + } + + t.header = &h + + t.footerSize = uint(68) + t.headerSize = uint(24) + if h.Version == 2 { + t.footerSize = 72 + t.headerSize = 28 + } + t.size = uint(len(t.src)) - t.footerSize + + block = t.src[t.size:len(t.src)] + + var f reftableFooter + if err := binary.Read(bytes.NewBuffer(block), binary.BigEndian, &f.reftableFooterBase); err != nil { + return nil, fmt.Errorf("reading footer: %w", err) + } + + if f.Name != h.Name || + f.Version != h.Version || + !bytes.Equal(f.BlockSize[:], h.BlockSize[:]) || + f.MinUpdateIndex != h.MinUpdateIndex || + f.MaxUpdateIndex != h.MaxUpdateIndex { + return nil, fmt.Errorf("footer doesn't match header") + } + + if h.Version == 2 { + if err := binary.Read(bytes.NewBuffer(block[t.headerSize:]), binary.BigEndian, &f.HashID); err != nil { + return nil, fmt.Errorf("reading hash ID: %w", err) + } + + if f.HashID != h.HashID { + return nil, fmt.Errorf("footer doesn't match header") + } + + if err := binary.Read(bytes.NewBuffer(block[t.headerSize+4:]), binary.BigEndian, &f.reftableFooterEnd); err != nil { + return nil, fmt.Errorf("reading footer: %w", err) + } + } else { + if err := binary.Read(bytes.NewBuffer(block[t.headerSize:]), binary.BigEndian, &f.reftableFooterEnd); err != nil { + return nil, fmt.Errorf("reading footer: %w", err) + } + } + + // TODO: CRC32 validation of the data + // https://gitlab.com/gitlab-org/git/-/blob/master/reftable/reader.c#L143 + t.footer = &f + + return t, nil +} diff --git a/internal/git/reftable_test.go b/internal/git/reftable_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3bebc3ba4d87591b44b09072a0c9a58e226377c7 --- /dev/null +++ b/internal/git/reftable_test.go @@ -0,0 +1,224 @@ +package git_test + +import ( + "fmt" + "io" + "log" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/require" + "gitlab.com/gitlab-org/gitaly/v16/internal/git" + "gitlab.com/gitlab-org/gitaly/v16/internal/git/gittest" + "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper" + "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper/testcfg" +) + +func getReftables(repoPath string) []string { + tables := []string{} + + reftablePath := filepath.Join(repoPath, "reftable") + + files, err := os.ReadDir(reftablePath) + if err != nil { + log.Fatal(err) + } + + for _, file := range files { + if filepath.Base(file.Name()) == "tables.list" { + continue + } + + tables = append(tables, filepath.Join(reftablePath, file.Name())) + } + + return tables +} + +func TestParseReftable(t *testing.T) { + t.Parallel() + + if !testhelper.IsReftableEnabled() { + t.Skip("tests are reftable specific") + } + + ctx := testhelper.Context(t) + cfg := testcfg.Build(t) + + tableName := [4]byte{} + n, err := strings.NewReader("REFT").Read(tableName[:]) + require.NoError(t, err) + require.Equal(t, 4, n) + + type setupData struct { + repoPath string + updates git.ReferenceUpdates + } + + for _, tc := range []struct { + name string + setup func() setupData + expectedErr error + }{ + { + name: "single ref", + setup: func() setupData { + _, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + + mainCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch("main")) + + return setupData{ + repoPath: repoPath, + updates: git.ReferenceUpdates{ + "HEAD": {NewTarget: "refs/heads/main"}, + "refs/heads/main": {NewOID: mainCommit}, + }, + } + }, + }, + { + name: "single ref + annotated tag", + setup: func() setupData { + _, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + + mainCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch("main")) + annotatedTag := gittest.WriteTag(t, cfg, repoPath, "v2.0.0", mainCommit.Revision(), gittest.WriteTagConfig{ + Message: "annotated tag", + }) + + return setupData{ + repoPath: repoPath, + updates: git.ReferenceUpdates{ + "HEAD": {NewTarget: "refs/heads/main"}, + "refs/heads/main": {NewOID: mainCommit}, + "refs/tags/v2.0.0": {NewOID: annotatedTag}, + }, + } + }, + }, + { + name: "two refs without prefix compression", + setup: func() setupData { + _, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + + mainCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch("main")) + rootRefCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithReference("ROOTREF")) + + return setupData{ + repoPath: repoPath, + updates: git.ReferenceUpdates{ + "HEAD": {NewTarget: "refs/heads/main"}, + "refs/heads/main": {NewOID: mainCommit}, + "ROOTREF": {NewOID: rootRefCommit}, + }, + } + }, + }, + { + name: "two refs with prefix compression", + setup: func() setupData { + _, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + + mainCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch("main")) + masterCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch("master")) + + return setupData{ + repoPath: repoPath, + updates: git.ReferenceUpdates{ + "HEAD": {NewTarget: "refs/heads/main"}, + "refs/heads/main": {NewOID: mainCommit}, + "refs/heads/master": {NewOID: masterCommit}, + }, + } + }, + }, + { + name: "multiple refs with different commit IDs", + setup: func() setupData { + _, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + + mainCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch("main")) + masterCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithParents(mainCommit), gittest.WithBranch("master")) + + return setupData{ + repoPath: repoPath, + updates: git.ReferenceUpdates{ + "HEAD": {NewTarget: "refs/heads/main"}, + "refs/heads/main": {NewOID: mainCommit}, + "refs/heads/master": {NewOID: masterCommit}, + }, + } + }, + }, + { + name: "multiple blocks in table", + setup: func() setupData { + _, repoPath := gittest.CreateRepository(t, ctx, cfg, gittest.CreateRepositoryConfig{ + SkipCreationViaService: true, + }) + + updates := make(map[git.ReferenceName]git.ReferenceUpdate) + + updates["HEAD"] = git.ReferenceUpdate{NewTarget: "refs/heads/main"} + + for i := 0; i < 200; i++ { + branch := fmt.Sprintf("branch%d", i) + updates[git.ReferenceName(fmt.Sprintf("refs/heads/%s", branch))] = git.ReferenceUpdate{ + NewOID: gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch(branch)), + } + } + + return setupData{ + repoPath: repoPath, + updates: updates, + } + }, + }, + } { + tc := tc + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + setup := tc.setup() + + repoPath := setup.repoPath + + // pack-refs so there is only one table + gittest.Exec(t, cfg, "-C", repoPath, "pack-refs") + reftablePath := getReftables(repoPath)[0] + + file, err := os.Open(reftablePath) + require.NoError(t, err) + defer file.Close() + + buf, err := io.ReadAll(file) + require.NoError(t, err) + + table, err := git.NewReftable(buf) + require.NoError(t, err) + + u, err := table.IterateRefs() + require.NoError(t, err) + + require.Equal(t, len(setup.updates), len(u)) + for ref, expectedUpdate := range setup.updates { + update, ok := u[ref] + require.True(t, ok) + require.Equal(t, expectedUpdate, update) + } + }) + } +} diff --git a/internal/git/remoterepo/repository_test.go b/internal/git/remoterepo/repository_test.go index 513a1d1cf110cb8757a809241d63a617923d9011..678f5405c4207d3b1c27eb5e814cba03c0bf015a 100644 --- a/internal/git/remoterepo/repository_test.go +++ b/internal/git/remoterepo/repository_test.go @@ -117,7 +117,7 @@ func TestRepository_ObjectHash(t *testing.T) { "[extensions]", "objectFormat = blake2b", }, "\n"), - ), perm.SharedFile)) + ), perm.PrivateWriteOnceFile)) repo, err := remoterepo.New(ctx, repoProto, pool) require.NoError(t, err) diff --git a/internal/git/stats/repository_info_test.go b/internal/git/stats/repository_info_test.go index 910d2282fbb49d8b493091baa21c31dd5a15cc84..f9aa26878a4cc62bb38036c3d0355271f63fa6fd 100644 --- a/internal/git/stats/repository_info_test.go +++ b/internal/git/stats/repository_info_test.go @@ -1029,7 +1029,7 @@ func TestReferencesInfoForRepository(t *testing.T) { // We just write some random garbage -- we don't verify contents // anyway, but just the size. And testing like that is at least // deterministic as we don't have to special-case hash sizes. - require.NoError(t, os.WriteFile(filepath.Join(repoPath, "packed-refs"), []byte("content"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, "packed-refs"), []byte("content"), perm.PrivateWriteOnceFile)) }, expectedInfo: ReferencesInfo{ ReferenceBackendName: gittest.DefaultReferenceBackend.Name, @@ -1050,7 +1050,7 @@ func TestReferencesInfoForRepository(t *testing.T) { // We just write some random garbage -- we don't verify contents // anyway, but just the size. And testing like that is at least // deterministic as we don't have to special-case hash sizes. - require.NoError(t, os.WriteFile(filepath.Join(repoPath, "packed-refs"), []byte("content"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, "packed-refs"), []byte("content"), perm.PrivateWriteOnceFile)) }, expectedInfo: ReferencesInfo{ ReferenceBackendName: gittest.DefaultReferenceBackend.Name, @@ -1102,7 +1102,7 @@ func TestCountLooseObjects(t *testing.T) { differentShard := filepath.Join(repoPath, "objects", "a0") require.NoError(t, os.MkdirAll(differentShard, perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(differentShard, "123456"), []byte("foobar"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(differentShard, "123456"), []byte("foobar"), perm.PrivateWriteOnceFile)) requireLooseObjectsInfo(t, repo, time.Now(), LooseObjectsInfo{ Count: 1, @@ -1118,7 +1118,7 @@ func TestCountLooseObjects(t *testing.T) { for i, shard := range []string{"00", "17", "32", "ff"} { shardPath := filepath.Join(repoPath, "objects", shard) require.NoError(t, os.MkdirAll(shardPath, perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(shardPath, "123456"), make([]byte, i), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(shardPath, "123456"), make([]byte, i), perm.PrivateWriteOnceFile)) } requireLooseObjectsInfo(t, repo, time.Now(), LooseObjectsInfo{ @@ -1173,8 +1173,8 @@ func TestCountLooseObjects(t *testing.T) { shard := filepath.Join(repoPath, "objects", "17") require.NoError(t, os.MkdirAll(shard, perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(shard, "012345"), []byte("valid"), perm.SharedFile)) - require.NoError(t, os.WriteFile(filepath.Join(shard, "garbage"), []byte("garbage"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(shard, "012345"), []byte("valid"), perm.PrivateWriteOnceFile)) + require.NoError(t, os.WriteFile(filepath.Join(shard, "garbage"), []byte("garbage"), perm.PrivateWriteOnceFile)) requireLooseObjectsInfo(t, repo, time.Now(), LooseObjectsInfo{ Count: 1, @@ -1213,7 +1213,7 @@ func BenchmarkCountLooseObjects(b *testing.B) { objectPath := filepath.Join(repoPath, "objects", "17", "12345") require.NoError(b, os.Mkdir(filepath.Dir(objectPath), perm.PrivateDir)) - require.NoError(b, os.WriteFile(objectPath, nil, perm.SharedFile)) + require.NoError(b, os.WriteFile(objectPath, nil, perm.PrivateWriteOnceFile)) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -1228,7 +1228,7 @@ func BenchmarkCountLooseObjects(b *testing.B) { for i := 0; i < 256; i++ { objectPath := filepath.Join(repoPath, "objects", fmt.Sprintf("%02x", i), "12345") require.NoError(b, os.Mkdir(filepath.Dir(objectPath), perm.PrivateDir)) - require.NoError(b, os.WriteFile(objectPath, nil, perm.SharedFile)) + require.NoError(b, os.WriteFile(objectPath, nil, perm.PrivateWriteOnceFile)) } b.ResetTimer() @@ -1257,7 +1257,7 @@ func BenchmarkCountLooseObjects(b *testing.B) { for j := 0; j < looseObjectCount; j++ { objectPath := filepath.Join(shardPath, fmt.Sprintf("%d", j)) - require.NoError(b, os.WriteFile(objectPath, nil, perm.SharedFile)) + require.NoError(b, os.WriteFile(objectPath, nil, perm.PrivateWriteOnceFile)) } } @@ -1277,7 +1277,7 @@ func BenchmarkCountLooseObjects(b *testing.B) { for j := 0; j < 1000; j++ { objectPath := filepath.Join(shardPath, fmt.Sprintf("%d", j)) - require.NoError(b, os.WriteFile(objectPath, nil, perm.SharedFile)) + require.NoError(b, os.WriteFile(objectPath, nil, perm.PrivateWriteOnceFile)) } } @@ -1311,7 +1311,7 @@ func TestPackfileInfoForRepository(t *testing.T) { seedRepository: func(t *testing.T, repoPath string) { packfileDir := filepath.Join(repoPath, "objects", "pack") require.NoError(t, os.MkdirAll(packfileDir, perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.pack"), []byte("foobar"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.pack"), []byte("foobar"), perm.PrivateWriteOnceFile)) }, expectedInfo: PackfilesInfo{ Count: 1, @@ -1323,8 +1323,8 @@ func TestPackfileInfoForRepository(t *testing.T) { seedRepository: func(t *testing.T, repoPath string) { packfileDir := filepath.Join(repoPath, "objects", "pack") require.NoError(t, os.MkdirAll(packfileDir, perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.pack"), []byte("foobar"), perm.SharedFile)) - require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.keep"), []byte("foobar"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.pack"), []byte("foobar"), perm.PrivateWriteOnceFile)) + require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.keep"), []byte("foobar"), perm.PrivateWriteOnceFile)) }, expectedInfo: PackfilesInfo{ Count: 1, @@ -1338,8 +1338,8 @@ func TestPackfileInfoForRepository(t *testing.T) { seedRepository: func(t *testing.T, repoPath string) { packfileDir := filepath.Join(repoPath, "objects", "pack") require.NoError(t, os.MkdirAll(packfileDir, perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.pack"), []byte("foobar"), perm.SharedFile)) - require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.mtimes"), []byte("foobar"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.pack"), []byte("foobar"), perm.PrivateWriteOnceFile)) + require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.mtimes"), []byte("foobar"), perm.PrivateWriteOnceFile)) }, expectedInfo: PackfilesInfo{ Count: 1, @@ -1353,8 +1353,8 @@ func TestPackfileInfoForRepository(t *testing.T) { seedRepository: func(t *testing.T, repoPath string) { packfileDir := filepath.Join(repoPath, "objects", "pack") require.NoError(t, os.MkdirAll(packfileDir, perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.pack"), []byte("foobar"), perm.SharedFile)) - require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-bar.pack"), []byte("123"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-foo.pack"), []byte("foobar"), perm.PrivateWriteOnceFile)) + require.NoError(t, os.WriteFile(filepath.Join(packfileDir, "pack-bar.pack"), []byte("123"), perm.PrivateWriteOnceFile)) }, expectedInfo: PackfilesInfo{ Count: 2, @@ -1425,7 +1425,7 @@ func TestPackfileInfoForRepository(t *testing.T) { gittest.WriteCommit(t, cfg, repoPath, gittest.WithMessage("second"), gittest.WithBranch("second")) gittest.Exec(t, cfg, "-c", "pack.writeReverseIndex=true", "-C", repoPath, "repack", "-db", "--write-midx") - require.NoError(t, os.WriteFile(filepath.Join(repoPath, "objects", "pack", "garbage"), []byte("1"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, "objects", "pack", "garbage"), []byte("1"), perm.PrivateWriteOnceFile)) }, expectedInfo: PackfilesInfo{ Count: 2, @@ -1750,7 +1750,7 @@ func TestBitmapInfoForPath(t *testing.T) { desc: "header is too short", setup: func(t *testing.T) string { bitmapPath := filepath.Join(testhelper.TempDir(t), "bitmap") - require.NoError(t, os.WriteFile(bitmapPath, []byte{0, 0, 0}, perm.SharedFile)) + require.NoError(t, os.WriteFile(bitmapPath, []byte{0, 0, 0}, perm.PrivateWriteOnceFile)) return bitmapPath }, expectedErr: fmt.Errorf("reading bitmap header: %w", io.ErrUnexpectedEOF), @@ -1761,7 +1761,7 @@ func TestBitmapInfoForPath(t *testing.T) { bitmapPath := filepath.Join(testhelper.TempDir(t), "bitmap") require.NoError(t, os.WriteFile(bitmapPath, []byte{ 'B', 'I', 'T', 'O', 0, 0, 0, 0, - }, perm.SharedFile)) + }, perm.PrivateWriteOnceFile)) return bitmapPath }, expectedErr: fmt.Errorf("invalid bitmap signature: %q", "BITO"), @@ -1772,7 +1772,7 @@ func TestBitmapInfoForPath(t *testing.T) { bitmapPath := filepath.Join(testhelper.TempDir(t), "bitmap") require.NoError(t, os.WriteFile(bitmapPath, []byte{ 'B', 'I', 'T', 'M', 0, 2, 0, 0, - }, perm.SharedFile)) + }, perm.PrivateWriteOnceFile)) return bitmapPath }, expectedErr: fmt.Errorf("unsupported version: 2"), diff --git a/internal/gitaly/config/config.go b/internal/gitaly/config/config.go index 4f4587383835cd7e52a470f0535f2b134e9ae9f0..765bf9b04e963bf9840e4d437cc77b120197bcd9 100644 --- a/internal/gitaly/config/config.go +++ b/internal/gitaly/config/config.go @@ -623,6 +623,8 @@ type BundleURIConfig struct { // GoCloudURL is the blob storage GoCloud URL that will be used to store // Git bundles for Bundle-URI use. GoCloudURL string `toml:"go_cloud_url,omitempty" json:"go_cloud_url,omitempty"` + // Autogeneration controls whether or not bundles for bundle uris are auto generated + Autogeneration bool `toml:"autogeneration,omitempty" json:"autogeneration"` } // Validate runs validation on all fields and returns any errors found. @@ -1227,6 +1229,9 @@ type Raft struct { // InitialMembers contains the list of initial members of the cluster. It's a map of NodeID to // RaftAddr. Due to limitations of the TOML format, it's not possible to set the map key as a uint64. InitialMembers map[string]string `toml:"initial_members" json:"initial_members"` + // ReplicationFactor defines the number of nodes where data of this storage are replicated, + // including the original storage. + ReplicationFactor uint64 `toml:"replication_factor" json:"replication_factor"` // RTTMilliseconds is the maximum round trip between two nodes in the cluster. It's used to // calculate multiple types of timeouts of Raft protocol. RTTMilliseconds uint64 `toml:"rtt_milliseconds" json:"rtt_milliseconds"` @@ -1246,6 +1251,9 @@ const ( // RaftDefaultHeartbeatTicks is the default heartbeat RTT for the Raft cluster. The estimated election // timeout is DefaultRTT * DefaultHeartbeatTicks. RaftDefaultHeartbeatTicks = 2 + // RaftDefaultReplicationFactor is the default number of nodes where data of this storage are + // replicated. By default, the factor is 3, which means 1 main storage + 2 replicated storages. + RaftDefaultReplicationFactor = 3 ) func (r Raft) fulfillDefaults() Raft { @@ -1258,6 +1266,9 @@ func (r Raft) fulfillDefaults() Raft { if r.HeartbeatTicks == 0 { r.HeartbeatTicks = RaftDefaultHeartbeatTicks } + if r.ReplicationFactor == 0 { + r.ReplicationFactor = RaftDefaultReplicationFactor + } return r } @@ -1280,6 +1291,7 @@ func (r Raft) Validate(transactions Transactions) error { Append(cfgerror.NotEmpty(r.ClusterID), "cluster_id"). Append(cfgerror.Comparable(r.NodeID).GreaterThan(0), "node_id"). Append(cfgerror.NotEmpty(r.RaftAddr), "raft_addr"). + Append(cfgerror.Comparable(r.ReplicationFactor).GreaterThan(0), "replication_factor"). Append(cfgerror.Comparable(r.RTTMilliseconds).GreaterThan(0), "rtt_millisecond"). Append(cfgerror.Comparable(r.ElectionTicks).GreaterThan(0), "election_rtt"). Append(cfgerror.Comparable(r.HeartbeatTicks).GreaterThan(0), "heartbeat_rtt") diff --git a/internal/gitaly/config/config_test.go b/internal/gitaly/config/config_test.go index 7fdaedc51192ac5ca679eb6e7e0881f5c2c4e388..0a94f5457343b34cccb7833e9bbfd02fdd6fdc11 100644 --- a/internal/gitaly/config/config_test.go +++ b/internal/gitaly/config/config_test.go @@ -1753,7 +1753,7 @@ func TestSetupRuntimeDirectory(t *testing.T) { t.Run("validation", func(t *testing.T) { dirPath := testhelper.TempDir(t) filePath := filepath.Join(dirPath, "file") - require.NoError(t, os.WriteFile(filePath, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(filePath, nil, perm.PrivateWriteOnceFile)) for _, tc := range []struct { desc string @@ -2098,7 +2098,7 @@ func TestStorage_Validate(t *testing.T) { dirPath := testhelper.TempDir(t) filePath := filepath.Join(dirPath, "file") - require.NoError(t, os.WriteFile(filePath, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(filePath, nil, perm.PrivateWriteOnceFile)) for _, tc := range []struct { name string storage Storage @@ -2140,7 +2140,7 @@ func TestTLS_Validate(t *testing.T) { tmpDir := testhelper.TempDir(t) tmpFile := filepath.Join(tmpDir, "file") - require.NoError(t, os.WriteFile(tmpFile, []byte("I am not a certificate"), perm.SharedFile)) + require.NoError(t, os.WriteFile(tmpFile, []byte("I am not a certificate"), perm.PrivateWriteOnceFile)) for _, tc := range []struct { name string @@ -2241,7 +2241,7 @@ func TestGitlabShell_Validate(t *testing.T) { tmpDir := testhelper.TempDir(t) tmpFile := filepath.Join(tmpDir, "file") - require.NoError(t, os.WriteFile(tmpFile, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(tmpFile, nil, perm.PrivateWriteOnceFile)) for _, tc := range []struct { name string @@ -2667,9 +2667,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, }, @@ -2685,9 +2686,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2709,9 +2711,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 3, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2733,9 +2736,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2757,9 +2761,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2772,14 +2777,15 @@ func TestRaftConfig_Validate(t *testing.T) { { name: "empty initial members", cfgRaft: Raft{ - Enabled: true, - ClusterID: "4f04a0e2-0db8-4bfa-b846-01b5b4a093fb", - NodeID: 1, - RaftAddr: "localhost:3001", - InitialMembers: map[string]string{}, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + Enabled: true, + ClusterID: "4f04a0e2-0db8-4bfa-b846-01b5b4a093fb", + NodeID: 1, + RaftAddr: "localhost:3001", + InitialMembers: map[string]string{}, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2802,9 +2808,10 @@ func TestRaftConfig_Validate(t *testing.T) { "3": "localhost:3003", "4": "", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2827,9 +2834,10 @@ func TestRaftConfig_Validate(t *testing.T) { "3": "localhost:3003", "4": "1:2:3", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2851,9 +2859,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 0, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 0, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2875,9 +2884,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 200, - ElectionTicks: 0, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 0, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2899,9 +2909,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 0, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 0, }, cfgTransactions: Transactions{Enabled: true}, expectedErr: cfgerror.ValidationErrors{ @@ -2923,9 +2934,10 @@ func TestRaftConfig_Validate(t *testing.T) { "2": "localhost:3002", "3": "localhost:3003", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 2, + ReplicationFactor: 5, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, }, cfgTransactions: Transactions{Enabled: false}, expectedErr: cfgerror.ValidationErrors{ @@ -2935,6 +2947,31 @@ func TestRaftConfig_Validate(t *testing.T) { ), }, }, + { + name: "invalid replication factor", + cfgRaft: Raft{ + Enabled: true, + ClusterID: "4f04a0e2-0db8-4bfa-b846-01b5b4a093fb", + NodeID: 1, + RaftAddr: "localhost:3001", + InitialMembers: map[string]string{ + "1": "localhost:3001", + "2": "localhost:3002", + "3": "localhost:3003", + }, + ReplicationFactor: 0, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 2, + }, + cfgTransactions: Transactions{Enabled: true}, + expectedErr: cfgerror.ValidationErrors{ + cfgerror.NewValidationError( + fmt.Errorf("%w: 0 is not greater than 0", cfgerror.ErrNotInRange), + "replication_factor", + ), + }, + }, } { t.Run(tc.name, func(t *testing.T) { err := tc.cfgRaft.Validate(tc.cfgTransactions) @@ -2965,9 +3002,10 @@ initial_members = {1 = "localhost:4001", 2 = "localhost:4002", 3 = "localhost:40 "2": "localhost:4002", "3": "localhost:4003", }, - RTTMilliseconds: 200, - ElectionTicks: 20, - HeartbeatTicks: 0, + ReplicationFactor: 3, + RTTMilliseconds: 200, + ElectionTicks: 20, + HeartbeatTicks: 0, }, } require.NoError(t, expectedCfg.Sanitize()) diff --git a/internal/gitaly/hook/manager.go b/internal/gitaly/hook/manager.go index b137b6fca96ffc740388a496e39db3437e309f72..c130d003cecac29f6eb66f3fd261e6eb85863f11 100644 --- a/internal/gitaly/hook/manager.go +++ b/internal/gitaly/hook/manager.go @@ -56,7 +56,7 @@ type Manager interface { // Transaction is the interface of storagemgr.Transaction. It's used for mocking in the tests. type Transaction interface { RecordInitialReferenceValues(context.Context, map[git.ReferenceName]git.Reference) error - UpdateReferences(storagemgr.ReferenceUpdates) + UpdateReferences(git.ReferenceUpdates) Commit(context.Context) error OriginalRepository(*gitalypb.Repository) *gitalypb.Repository RewriteRepository(*gitalypb.Repository) *gitalypb.Repository diff --git a/internal/gitaly/hook/referencetransaction.go b/internal/gitaly/hook/referencetransaction.go index 420b1c444e79fb13b27237db0bf9656a0a99a5f8..7df889829de5dcfcfd2e168ad308b47b1fbdfa2f 100644 --- a/internal/gitaly/hook/referencetransaction.go +++ b/internal/gitaly/hook/referencetransaction.go @@ -10,7 +10,6 @@ import ( "strings" "gitlab.com/gitlab-org/gitaly/v16/internal/git" - "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/storagemgr" "gitlab.com/gitlab-org/gitaly/v16/internal/transaction/voting" ) @@ -124,11 +123,11 @@ func (m *GitLabHookManager) ReferenceTransactionHook(ctx context.Context, state // parseChanges parses the changes from the reader. All updates to references lacking a 'refs/' prefix are ignored. These // are the various pseudo reference like ORIG_HEAD but also HEAD. See the documentation of the reference-transaction hook // for details on the format: https://git-scm.com/docs/githooks#_reference_transaction -func parseChanges(ctx context.Context, objectHash git.ObjectHash, changes io.Reader) (storagemgr.ReferenceUpdates, bool, error) { +func parseChanges(ctx context.Context, objectHash git.ObjectHash, changes io.Reader) (git.ReferenceUpdates, bool, error) { scanner := bufio.NewScanner(changes) defaultBranchUpdated := false - updates := storagemgr.ReferenceUpdates{} + updates := git.ReferenceUpdates{} for scanner.Scan() { line := scanner.Text() components := strings.Split(line, " ") @@ -144,7 +143,7 @@ func parseChanges(ctx context.Context, objectHash git.ObjectHash, changes io.Rea continue } - update := storagemgr.ReferenceUpdate{} + update := git.ReferenceUpdate{} var err error update.OldOID, err = objectHash.FromHex(components[0]) diff --git a/internal/gitaly/linguist/language_stats_test.go b/internal/gitaly/linguist/language_stats_test.go index 0f003054b9413655bad184c55a9095b0ca9e7727..0b5046513df82c4e9fe78a33bf625b8b25647b2a 100644 --- a/internal/gitaly/linguist/language_stats_test.go +++ b/internal/gitaly/linguist/language_stats_test.go @@ -51,7 +51,7 @@ func TestInitLanguageStats(t *testing.T) { { desc: "corrupt cache", run: func(t *testing.T, repo *localrepo.Repo, repoPath string) { - require.NoError(t, os.WriteFile(filepath.Join(repoPath, languageStatsFilename), []byte("garbage"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, languageStatsFilename), []byte("garbage"), perm.PrivateWriteOnceFile)) stats, err := initLanguageStats(ctx, repo) require.Errorf(t, err, "new language stats zlib reader: invalid header") diff --git a/internal/gitaly/linguist/linguist_test.go b/internal/gitaly/linguist/linguist_test.go index 73a8dcece251ee73bb0eb26d071c12e71878d47c..bf3750d7184f6a4d2a8b331dfda009a6bdca2040 100644 --- a/internal/gitaly/linguist/linguist_test.go +++ b/internal/gitaly/linguist/linguist_test.go @@ -457,7 +457,7 @@ func TestInstance_Stats(t *testing.T) { gittest.TreeEntry{Path: "application.rb", Mode: "100644", Content: strings.Repeat("a", 2943)}, )) - require.NoError(t, os.WriteFile(filepath.Join(repoPath, languageStatsFilename), []byte("garbage"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, languageStatsFilename), []byte("garbage"), perm.PrivateWriteOnceFile)) return repoProto, repoPath, commitID }, diff --git a/internal/gitaly/repoutil/custom_hooks_test.go b/internal/gitaly/repoutil/custom_hooks_test.go index 1e8aa4c8ef70dd987dcd9457d4921212d7e010a1..3e187d86f5f5c36a39ade9ccd90d991c17c0e6e3 100644 --- a/internal/gitaly/repoutil/custom_hooks_test.go +++ b/internal/gitaly/repoutil/custom_hooks_test.go @@ -348,10 +348,10 @@ func TestNewDirectoryVote(t *testing.T) { { desc: "generated hash matches with changed file mode", files: []testFile{ - {name: "pre-commit.sample", content: "foo", mode: perm.SharedFile}, + {name: "pre-commit.sample", content: "foo", mode: perm.PrivateWriteOnceFile}, {name: "pre-push.sample", content: "bar", mode: perm.PrivateExecutable}, }, - expectedHash: "c81ab4e8cca863a4e8d24c080d3daefcf5f0f8aa", + expectedHash: "ad20a4fea20e9049bb70e084e757fcc5d2cf2cc7", }, } { t.Run(tc.desc, func(t *testing.T) { diff --git a/internal/gitaly/repoutil/remove_test.go b/internal/gitaly/repoutil/remove_test.go index 087cdc8a3c12aacf72f46274523a25569be70870..13ab9ecce509a9af375e2c7d57ce070a4cb51d96 100644 --- a/internal/gitaly/repoutil/remove_test.go +++ b/internal/gitaly/repoutil/remove_test.go @@ -50,7 +50,7 @@ func TestRemove(t *testing.T) { // Simulate a concurrent RPC holding the repository lock. lockPath := repoPath + ".lock" - require.NoError(t, os.WriteFile(lockPath, []byte{}, perm.SharedFile)) + require.NoError(t, os.WriteFile(lockPath, []byte{}, perm.PrivateWriteOnceFile)) tb.Cleanup(func() { require.NoError(t, os.RemoveAll(lockPath)) }) diff --git a/internal/gitaly/service/dependencies.go b/internal/gitaly/service/dependencies.go index 4fc6f952f9af6bdf70ea21787b48fda7f91ff3f8..73f45e525b32a70db4daca009b52d19233f321bb 100644 --- a/internal/gitaly/service/dependencies.go +++ b/internal/gitaly/service/dependencies.go @@ -48,6 +48,7 @@ type Dependencies struct { BackupLocator backup.Locator BundleURISink *bundleuri.Sink ProcReceiveRegistry *gitalyhook.ProcReceiveRegistry + InProgressTracker *InProgressTracker } // GetLogger returns the logger. @@ -164,3 +165,8 @@ func (dc *Dependencies) GetBundleURISink() *bundleuri.Sink { func (dc *Dependencies) GetProcReceiveRegistry() *gitalyhook.ProcReceiveRegistry { return dc.ProcReceiveRegistry } + +// GetInProgressTracker returns the ProcReceiveRegistry. +func (dc *Dependencies) GetInProgressTracker() *InProgressTracker { + return dc.InProgressTracker +} diff --git a/internal/gitaly/service/hook/reference_transaction_test.go b/internal/gitaly/service/hook/reference_transaction_test.go index 3805fa03959686a40552ac41187ef9814895be13..e62b681bbc549a564fefbedfb9baa1f24a2dc401 100644 --- a/internal/gitaly/service/hook/reference_transaction_test.go +++ b/internal/gitaly/service/hook/reference_transaction_test.go @@ -13,7 +13,6 @@ import ( "gitlab.com/gitlab-org/gitaly/v16/internal/git/gittest" "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/hook" "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage" - "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/storagemgr" "gitlab.com/gitlab-org/gitaly/v16/internal/grpc/backchannel" "gitlab.com/gitlab-org/gitaly/v16/internal/structerr" "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper" @@ -35,12 +34,12 @@ func (m mockTransactionRegistry) Get(id storage.TransactionID) (hook.Transaction type mockTransaction struct { hook.Transaction - updateReferencesFunc func(storagemgr.ReferenceUpdates) + updateReferencesFunc func(git.ReferenceUpdates) recordInitialReferenceValues func(context.Context, map[git.ReferenceName]git.Reference) error markDefaultBranchUpdated func() } -func (m mockTransaction) UpdateReferences(updates storagemgr.ReferenceUpdates) { +func (m mockTransaction) UpdateReferences(updates git.ReferenceUpdates) { m.updateReferencesFunc(updates) } @@ -99,7 +98,7 @@ func TestReferenceTransactionHook(t *testing.T) { expectedErr error expectedResponse *gitalypb.ReferenceTransactionHookResponse expectedReftxHash []byte - expectedReferenceUpdates storagemgr.ReferenceUpdates + expectedReferenceUpdates git.ReferenceUpdates expectedInitialValues map[git.ReferenceName]git.Reference expectedDefaultBranchUpdated bool }{ @@ -167,7 +166,7 @@ func TestReferenceTransactionHook(t *testing.T) { }, }, expectedReftxHash: stdin, - expectedReferenceUpdates: storagemgr.ReferenceUpdates{ + expectedReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": { OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: gittest.DefaultObjectHash.EmptyTreeOID, @@ -268,13 +267,13 @@ func TestReferenceTransactionHook(t *testing.T) { }, nil } - var actualReferenceUpdates storagemgr.ReferenceUpdates + var actualReferenceUpdates git.ReferenceUpdates var actualInitialValues map[git.ReferenceName]git.Reference var defaultBranchUpdated bool txRegistry := mockTransactionRegistry{ getFunc: func(storage.TransactionID) (hook.Transaction, error) { return mockTransaction{ - updateReferencesFunc: func(updates storagemgr.ReferenceUpdates) { + updateReferencesFunc: func(updates git.ReferenceUpdates) { actualReferenceUpdates = updates }, recordInitialReferenceValues: func(_ context.Context, initialValues map[git.ReferenceName]git.Reference) error { diff --git a/internal/gitaly/service/in_progress_tracker.go b/internal/gitaly/service/in_progress_tracker.go new file mode 100644 index 0000000000000000000000000000000000000000..bd1aa7052e2ff1e03771b1dcd77b5cb339a4b77d --- /dev/null +++ b/internal/gitaly/service/in_progress_tracker.go @@ -0,0 +1,42 @@ +package service + +import ( + "sync" +) + +// InProgressTracker can be used to keep track of processes that are in flight +type InProgressTracker struct { + inProgress map[string]int + l sync.RWMutex +} + +// NewInProgressTracker instantiates a new InProgressTracker. +func NewInProgressTracker() *InProgressTracker { + return &InProgressTracker{ + inProgress: make(map[string]int), + } +} + +// GetInProgress gets the number of inflight processes for a given key. +func (p *InProgressTracker) GetInProgress(key string) int { + p.l.RLock() + defer p.l.RUnlock() + + return p.inProgress[key] +} + +// IncrementInProgress increments the number of inflight processes for a given key. +func (p *InProgressTracker) IncrementInProgress(key string) { + p.l.Lock() + defer p.l.Unlock() + + p.inProgress[key]++ +} + +// DecrementInProgress decrements the number of inflight processes for a given key. +func (p *InProgressTracker) DecrementInProgress(key string) { + p.l.Lock() + defer p.l.Unlock() + + p.inProgress[key]-- +} diff --git a/internal/gitaly/service/in_progress_tracker_test.go b/internal/gitaly/service/in_progress_tracker_test.go new file mode 100644 index 0000000000000000000000000000000000000000..99178863c613e3396a47108bd9deebe1f35a1c4a --- /dev/null +++ b/internal/gitaly/service/in_progress_tracker_test.go @@ -0,0 +1,67 @@ +package service_test + +import ( + "sync" + "testing" + + "github.com/stretchr/testify/require" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/service" +) + +func TestInProgressTracker(t *testing.T) { + key := "key1" + + testCases := []struct { + desc string + expectedInProgress int + actions func(*service.InProgressTracker) + }{ + { + desc: "one in flight", + expectedInProgress: 1, + actions: func(t *service.InProgressTracker) { + t.IncrementInProgress(key) + t.IncrementInProgress(key) + t.DecrementInProgress(key) + }, + }, + { + desc: "two in flight with concurrent writes", + expectedInProgress: 2, + actions: func(t *service.InProgressTracker) { + var wg sync.WaitGroup + + wg.Add(4) + go func() { + t.IncrementInProgress(key) + wg.Done() + }() + go func() { + t.IncrementInProgress(key) + wg.Done() + }() + + go func() { + t.IncrementInProgress(key) + wg.Done() + }() + + go func() { + t.DecrementInProgress(key) + wg.Done() + }() + + wg.Wait() + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + tracker := service.NewInProgressTracker() + + tc.actions(tracker) + require.Equal(t, tc.expectedInProgress, tracker.GetInProgress(key)) + }) + } +} diff --git a/internal/gitaly/service/objectpool/alternates_test.go b/internal/gitaly/service/objectpool/alternates_test.go index 2dfa3dbccc80bedc3e5a63b75f6cb47561ff6295..fc482fcf8aaaf763a0245dcc7ce26c2d7aaf33ce 100644 --- a/internal/gitaly/service/objectpool/alternates_test.go +++ b/internal/gitaly/service/objectpool/alternates_test.go @@ -93,7 +93,7 @@ func TestDisconnectGitAlternatesUnexpectedAlternates(t *testing.T) { altPath, err := repo.InfoAlternatesPath(ctx) require.NoError(t, err) - require.NoError(t, os.WriteFile(altPath, []byte(tc.altContent), perm.SharedFile)) + require.NoError(t, os.WriteFile(altPath, []byte(tc.altContent), perm.PrivateWriteOnceFile)) _, err = client.DisconnectGitAlternates(ctx, &gitalypb.DisconnectGitAlternatesRequest{Repository: repoProto}) require.Error(t, err) diff --git a/internal/gitaly/service/objectpool/create_test.go b/internal/gitaly/service/objectpool/create_test.go index 49452decdd9d1e3b0df2cb344a33aac545c9abf3..d0f497ee6b424d1c0e21afae09638e0411f4f105 100644 --- a/internal/gitaly/service/objectpool/create_test.go +++ b/internal/gitaly/service/objectpool/create_test.go @@ -144,7 +144,7 @@ func TestCreate_unsuccessful(t *testing.T) { lockedRelativePath := gittest.NewObjectPoolName(t) lockedFullPath := filepath.Join(cfg.Storages[0].Path, lockedRelativePath+".lock") require.NoError(t, os.MkdirAll(filepath.Dir(lockedFullPath), perm.PrivateDir)) - require.NoError(t, os.WriteFile(lockedFullPath, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(lockedFullPath, nil, perm.PrivateWriteOnceFile)) // Create a preexisting object pool. preexistingPool := &gitalypb.ObjectPool{ diff --git a/internal/gitaly/service/objectpool/get_test.go b/internal/gitaly/service/objectpool/get_test.go index 6ef44351badce68b021eaacaf3037e56856dab58..f05276c120eff0879600a89176a343a784ef5c88 100644 --- a/internal/gitaly/service/objectpool/get_test.go +++ b/internal/gitaly/service/objectpool/get_test.go @@ -55,7 +55,7 @@ func TestGetObjectPoolBadFile(t *testing.T) { alternatesFilePath := filepath.Join(repoPath, "objects", "info", "alternates") require.NoError(t, os.MkdirAll(filepath.Dir(alternatesFilePath), perm.PrivateDir)) - require.NoError(t, os.WriteFile(alternatesFilePath, []byte("not-a-directory"), perm.SharedFile)) + require.NoError(t, os.WriteFile(alternatesFilePath, []byte("not-a-directory"), perm.PrivateWriteOnceFile)) resp, err := client.GetObjectPool(ctx, &gitalypb.GetObjectPoolRequest{ Repository: repo, diff --git a/internal/gitaly/service/objectpool/link_test.go b/internal/gitaly/service/objectpool/link_test.go index 031c73957ec8c8e4ebdabc3928082e2ca6d46bca..3294b5216a7451a54c4616f34fd1bf7176dc2fcb 100644 --- a/internal/gitaly/service/objectpool/link_test.go +++ b/internal/gitaly/service/objectpool/link_test.go @@ -186,7 +186,7 @@ func TestLink_noClobber(t *testing.T) { require.NoFileExists(t, alternatesFile) contentBefore := "mock/objects\n" - require.NoError(t, os.WriteFile(alternatesFile, []byte(contentBefore), perm.SharedFile)) + require.NoError(t, os.WriteFile(alternatesFile, []byte(contentBefore), perm.PrivateWriteOnceFile)) request := &gitalypb.LinkRepositoryToObjectPoolRequest{ Repository: repoProto, diff --git a/internal/gitaly/service/repository/create_fork_test.go b/internal/gitaly/service/repository/create_fork_test.go index f1ff59f8dc08f15504137f3ba40ab8d78db6bca8..492ed63209ae256fe772e6844f7e157bd55d0566 100644 --- a/internal/gitaly/service/repository/create_fork_test.go +++ b/internal/gitaly/service/repository/create_fork_test.go @@ -310,7 +310,7 @@ func TestCreateFork_targetExists(t *testing.T) { require.NoError(t, os.WriteFile( filepath.Join(targetPath, "config"), nil, - perm.SharedFile, + perm.PrivateWriteOnceFile, )) }, expectedErr: func() error { @@ -325,7 +325,7 @@ func TestCreateFork_targetExists(t *testing.T) { desc: "target file", seed: func(t *testing.T, targetPath string) { require.NoError(t, os.MkdirAll(filepath.Dir(targetPath), perm.PrivateDir)) - require.NoError(t, os.WriteFile(targetPath, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(targetPath, nil, perm.PrivateWriteOnceFile)) }, expectedErr: func() error { if testhelper.IsWALEnabled() { diff --git a/internal/gitaly/service/repository/create_repository_from_url_test.go b/internal/gitaly/service/repository/create_repository_from_url_test.go index 9fdf60af3d9035da83d476e6d12e34b4fba771c5..3720ac23ce39b5974c654b5f211f895fca47cce1 100644 --- a/internal/gitaly/service/repository/create_repository_from_url_test.go +++ b/internal/gitaly/service/repository/create_repository_from_url_test.go @@ -142,7 +142,7 @@ testing of this scenario should be left to the relevant package. require.NoError(t, os.MkdirAll(importedRepoPath, perm.PrivateDir)) } else { require.NoError(t, os.MkdirAll(filepath.Dir(importedRepoPath), perm.PrivateDir)) - require.NoError(t, os.WriteFile(importedRepoPath, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(importedRepoPath, nil, perm.PrivateWriteOnceFile)) } t.Cleanup(func() { require.NoError(t, os.RemoveAll(importedRepoPath)) }) diff --git a/internal/gitaly/service/repository/fetch_remote_test.go b/internal/gitaly/service/repository/fetch_remote_test.go index f4ed58261a3404255bedbc7b790552c2041714ab..be7964795acb02bfb893f31772282ff86a161c3c 100644 --- a/internal/gitaly/service/repository/fetch_remote_test.go +++ b/internal/gitaly/service/repository/fetch_remote_test.go @@ -1127,7 +1127,7 @@ func TestFetchRemote_pooledRepository(t *testing.T) { // Create the pooled repository and link it to its pool. This is the // repository we're fetching into. pooledRepoProto, pooledRepoPath := gittest.CreateRepository(t, ctx, cfg) - require.NoError(t, os.WriteFile(filepath.Join(pooledRepoPath, "objects", "info", "alternates"), []byte(filepath.Join(poolRepoPath, "objects")), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(pooledRepoPath, "objects", "info", "alternates"), []byte(filepath.Join(poolRepoPath, "objects")), perm.PrivateWriteOnceFile)) // And then finally create a third repository that emulates the remote side // we're fetching from. We need to create at least one reference so that Git diff --git a/internal/gitaly/service/repository/fsck_test.go b/internal/gitaly/service/repository/fsck_test.go index 283be0d5e236db96f0c9f3f8c21dfa232d64aa20..365ef9a3ecc05d46e49af6a9613ff85c2f863d25 100644 --- a/internal/gitaly/service/repository/fsck_test.go +++ b/internal/gitaly/service/repository/fsck_test.go @@ -77,7 +77,7 @@ func TestFsck(t *testing.T) { // This makes the repo severely broken so that `git` does not // identify it as a proper repository anymore. require.NoError(t, os.RemoveAll(filepath.Join(repoPath, "objects"))) - require.NoError(t, os.WriteFile(filepath.Join(repoPath, "objects"), nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, "objects"), nil, perm.PrivateWriteOnceFile)) setupData := setupData{ repo: repo, diff --git a/internal/gitaly/service/repository/info_attributes_test.go b/internal/gitaly/service/repository/info_attributes_test.go index 75a4e6a4a507547809d284805e09c24b52490cc8..3af71bd576d04194cdfbd0fa9d6bc27abd67bfd1 100644 --- a/internal/gitaly/service/repository/info_attributes_test.go +++ b/internal/gitaly/service/repository/info_attributes_test.go @@ -30,7 +30,7 @@ func TestGetInfoAttributesExisting(t *testing.T) { buffSize := streamio.WriteBufferSize + 1 data := bytes.Repeat([]byte("*.pbxproj binary\n"), buffSize) attrsPath := filepath.Join(infoPath, "attributes") - err := os.WriteFile(attrsPath, data, perm.SharedFile) + err := os.WriteFile(attrsPath, data, perm.PrivateWriteOnceFile) require.NoError(t, err) gitattributesContent := "*.go diff=go text\n*.md text\n*.jpg -text" diff --git a/internal/gitaly/service/repository/object_format_test.go b/internal/gitaly/service/repository/object_format_test.go index d11a7e9cf5dd6bf466da1696885925ef104a2524..1efc6acd7cce412001a06c24d0479ef5a1d55091 100644 --- a/internal/gitaly/service/repository/object_format_test.go +++ b/internal/gitaly/service/repository/object_format_test.go @@ -142,7 +142,7 @@ func TestObjectFormat(t *testing.T) { "[extensions]", "objectFormat = blake2b", }, "\n"), - ), perm.SharedFile)) + ), perm.PrivateWriteOnceFile)) return setupData{ request: &gitalypb.ObjectFormatRequest{ diff --git a/internal/gitaly/service/repository/remove_test.go b/internal/gitaly/service/repository/remove_test.go index 05d3f876e5be18aafbdd819fe68c6f30e7bfb008..4f12587271acfc1cc508d07e178e150904a594d8 100644 --- a/internal/gitaly/service/repository/remove_test.go +++ b/internal/gitaly/service/repository/remove_test.go @@ -83,7 +83,7 @@ logic will be removed once transaction managements is always enabled.`) // Simulate a concurrent RPC holding the repository lock. lockPath := repoPath + ".lock" - require.NoError(t, os.WriteFile(lockPath, []byte{}, perm.SharedFile)) + require.NoError(t, os.WriteFile(lockPath, []byte{}, perm.PrivateWriteOnceFile)) defer func() { require.NoError(t, os.RemoveAll(lockPath)) }() _, err := client.RemoveRepository(ctx, &gitalypb.RemoveRepositoryRequest{Repository: repo}) diff --git a/internal/gitaly/service/repository/replicate.go b/internal/gitaly/service/repository/replicate.go index 64da0ac8dbe9bcc33c8b43de6e487059c9621b8b..f810f780a678e52024930ced805cc8ccb01f6d04 100644 --- a/internal/gitaly/service/repository/replicate.go +++ b/internal/gitaly/service/repository/replicate.go @@ -352,7 +352,7 @@ func (s *server) syncGitconfig(ctx context.Context, source, target *gitalypb.Rep } configPath := filepath.Join(repoPath, "config") - if err := s.writeFile(ctx, configPath, perm.SharedFile, streamio.NewReader(func() ([]byte, error) { + if err := s.writeFile(ctx, configPath, perm.PrivateWriteOnceFile, streamio.NewReader(func() ([]byte, error) { resp, err := stream.Recv() return resp.GetData(), err })); err != nil { diff --git a/internal/gitaly/service/repository/replicate_test.go b/internal/gitaly/service/repository/replicate_test.go index f67ab3d5e7f37a148eb99eb4324a3f0b7512cdf8..eaf6b3c6914ae1d9aa87cd62e1edc52ea84bba69 100644 --- a/internal/gitaly/service/repository/replicate_test.go +++ b/internal/gitaly/service/repository/replicate_test.go @@ -105,7 +105,7 @@ func TestReplicateRepository(t *testing.T) { attrFilePath := filepath.Join(sourcePath, "info", "attributes") require.NoError(t, os.MkdirAll(filepath.Dir(attrFilePath), perm.PrivateDir)) attributesData := []byte("*.pbxproj binary\n") - require.NoError(t, os.WriteFile(attrFilePath, attributesData, perm.SharedFile)) + require.NoError(t, os.WriteFile(attrFilePath, attributesData, perm.PrivateWriteOnceFile)) return setupData{ source: source, diff --git a/internal/gitaly/service/repository/snapshot_test.go b/internal/gitaly/service/repository/snapshot_test.go index c2761f1039511784a5fa649ba9606554fa0cbe66..0edbe895b41f2675312840238b7615313878a9f8 100644 --- a/internal/gitaly/service/repository/snapshot_test.go +++ b/internal/gitaly/service/repository/snapshot_test.go @@ -139,7 +139,7 @@ func TestGetSnapshot(t *testing.T) { ) // The shallow file, used if the repository is a shallow clone, is also included in snapshots. - require.NoError(t, os.WriteFile(filepath.Join(repoPath, "shallow"), nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(repoPath, "shallow"), nil, perm.PrivateWriteOnceFile)) // Custom Git hooks are not included in snapshots. require.NoError(t, os.MkdirAll(filepath.Join(repoPath, "hooks"), perm.PrivateDir)) @@ -148,7 +148,7 @@ func TestGetSnapshot(t *testing.T) { require.NoError(t, os.WriteFile( filepath.Join(repoPath, "objects/this-should-not-be-included"), nil, - perm.SharedFile, + perm.PrivateWriteOnceFile, )) return setupData{ @@ -227,7 +227,7 @@ func TestGetSnapshot(t *testing.T) { require.NoError(t, os.WriteFile( altFile, []byte(fmt.Sprintf("%s\n", altObjectDir)), - perm.SharedFile, + perm.PrivateWriteOnceFile, )) gittest.RequireObjectExists(t, cfg, repoPath, commitID) diff --git a/internal/gitaly/service/server/info.go b/internal/gitaly/service/server/info.go index 8be3a772546d7f43552ce204e6d46d1bae9eff7f..1e330240ff2ffe7db72bca199a7c2201628d082c 100644 --- a/internal/gitaly/service/server/info.go +++ b/internal/gitaly/service/server/info.go @@ -3,11 +3,10 @@ package server import ( "context" "os" - "path/filepath" "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/mode/permission" "gitlab.com/gitlab-org/gitaly/v16/internal/helper/fstype" - "gitlab.com/gitlab-org/gitaly/v16/internal/helper/perm" "gitlab.com/gitlab-org/gitaly/v16/internal/structerr" "gitlab.com/gitlab-org/gitaly/v16/internal/version" "gitlab.com/gitlab-org/gitaly/v16/proto/go/gitalypb" @@ -46,19 +45,14 @@ func (s *server) ServerInfo(ctx context.Context, in *gitalypb.ServerInfoRequest) }, nil } -func shardCheck(shardPath string) (readable bool, writeable bool) { - if _, err := os.Stat(shardPath); err == nil { - readable = true +func shardCheck(shardPath string) (bool, bool) { + info, err := os.Stat(shardPath) + if err != nil { + return false, false } - // the path uses a `+` to avoid naming collisions - testPath := filepath.Join(shardPath, "+testWrite") - - content := []byte("testWrite") - if err := os.WriteFile(testPath, content, perm.SharedFile); err == nil { - writeable = true - } - _ = os.Remove(testPath) + readable := info.Mode()&(permission.OwnerRead|permission.OwnerExecute) == permission.OwnerRead|permission.OwnerExecute + writable := info.Mode()&permission.OwnerWrite == permission.OwnerWrite - return + return readable, writable } diff --git a/internal/gitaly/service/smarthttp/inforefs_test.go b/internal/gitaly/service/smarthttp/inforefs_test.go index f7cc7e4d7ce05c3299eb8e36555c04121ff31486..13c395fa6ba4009469de77a361d9ddca9ab91c10 100644 --- a/internal/gitaly/service/smarthttp/inforefs_test.go +++ b/internal/gitaly/service/smarthttp/inforefs_test.go @@ -676,7 +676,7 @@ func withInfoRefCache(cache infoRefCache) ServerOpt { func replaceCachedResponse(tb testing.TB, ctx context.Context, cache *cache.DiskCache, req *gitalypb.InfoRefsRequest, newContents string) { path := pathToCachedResponse(tb, ctx, cache, req) - require.NoError(tb, os.WriteFile(path, []byte(newContents), perm.SharedFile)) + require.NoError(tb, os.WriteFile(path, []byte(newContents), perm.PrivateWriteOnceFile)) } func setInfoRefsUploadPackMethod(ctx context.Context) context.Context { diff --git a/internal/gitaly/service/smarthttp/server.go b/internal/gitaly/service/smarthttp/server.go index 528deeb82eda9a4f2efd78dd624d2237d0802c17..ffd455aa27579d6a4fe34f92a8b886186ba4edaa 100644 --- a/internal/gitaly/service/smarthttp/server.go +++ b/internal/gitaly/service/smarthttp/server.go @@ -34,6 +34,10 @@ type server struct { backupLocator backup.Locator backupSink backup.Sink bundleURISink *bundleuri.Sink + inflightTracker *service.InProgressTracker + generateBundles bool + partitionMgr *storagemgr.PartitionManager + transactionRegistry *storagemgr.TransactionRegistry } // NewServer creates a new instance of a grpc SmartHTTPServer @@ -52,10 +56,14 @@ func NewServer(deps *service.Dependencies, serverOpts ...ServerOpt) gitalypb.Sma prometheus.CounterOpts{}, []string{"git_negotiation_feature"}, ), - infoRefCache: newInfoRefCache(deps.GetLogger(), deps.GetDiskCache()), - backupLocator: deps.GetBackupLocator(), - backupSink: deps.GetBackupSink(), - bundleURISink: deps.GetBundleURISink(), + infoRefCache: newInfoRefCache(deps.GetLogger(), deps.GetDiskCache()), + backupLocator: deps.GetBackupLocator(), + backupSink: deps.GetBackupSink(), + bundleURISink: deps.GetBundleURISink(), + inflightTracker: deps.GetInProgressTracker(), + generateBundles: deps.GetCfg().BundleURI.Autogeneration, + partitionMgr: deps.GetPartitionManager(), + transactionRegistry: deps.GetTransactionRegistry(), } for _, serverOpt := range serverOpts { diff --git a/internal/gitaly/service/smarthttp/upload_pack.go b/internal/gitaly/service/smarthttp/upload_pack.go index 05b5dedb7b59346707fa116468e4be90bcdd72f8..ee1173fb47c342e9eb4b77af82436f0db4a1363e 100644 --- a/internal/gitaly/service/smarthttp/upload_pack.go +++ b/internal/gitaly/service/smarthttp/upload_pack.go @@ -1,22 +1,35 @@ package smarthttp import ( + "bytes" "context" "crypto/sha1" "errors" "fmt" "io" + "time" + "github.com/grpc-ecosystem/go-grpc-middleware/logging/logrus/ctxlogrus" "gitlab.com/gitlab-org/gitaly/v16/internal/bundleuri" "gitlab.com/gitlab-org/gitaly/v16/internal/command" + "gitlab.com/gitlab-org/gitaly/v16/internal/featureflag" "gitlab.com/gitlab-org/gitaly/v16/internal/git" + "gitlab.com/gitlab-org/gitaly/v16/internal/git/localrepo" "gitlab.com/gitlab-org/gitaly/v16/internal/git/stats" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/storagectx" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/storagemgr" "gitlab.com/gitlab-org/gitaly/v16/internal/grpc/sidechannel" "gitlab.com/gitlab-org/gitaly/v16/internal/log" "gitlab.com/gitlab-org/gitaly/v16/internal/structerr" "gitlab.com/gitlab-org/gitaly/v16/proto/go/gitalypb" ) +const ( + concurrentUploadPackThreshold = 5 + bundleGenerationTimeout = 24 * time.Hour +) + func (s *server) PostUploadPackWithSidechannel(ctx context.Context, req *gitalypb.PostUploadPackWithSidechannelRequest) (*gitalypb.PostUploadPackWithSidechannelResponse, error) { repoPath, gitConfig, err := s.validateUploadPackRequest(ctx, req) if err != nil { @@ -117,21 +130,85 @@ func (s *server) runUploadPack(ctx context.Context, req *gitalypb.PostUploadPack gitConfig = append(gitConfig, bundleuri.CapabilitiesGitConfig(ctx)...) + txID := storage.ExtractTransactionID(ctx) + + var originalRepo *gitalypb.Repository + + if txID != 0 { + currentTx, err := s.transactionRegistry.Get(txID) + if err != nil { + return nil, structerr.NewInternal("error getting transaction: %w", err) + } + originalRepo = currentTx.OriginalRepository(req.GetRepository()) + } else { + originalRepo = req.GetRepository() + } + + key := originalRepo.GetGlRepository() + uploadPackConfig, err := bundleuri.UploadPackGitConfig(ctx, s.bundleURISink, req.GetRepository()) if err != nil { - log.AddFields(ctx, log.Fields{"bundle_uri_error": err}) + if errors.Is(err, bundleuri.ErrBundleNotFound) && + featureflag.AutogenerateBundlesForBundleURI.IsEnabled(ctx) && + s.generateBundles && + s.inflightTracker.GetInProgress(key) > concurrentUploadPackThreshold { + + go func() { + ctx, cancel := context.WithTimeout(context.Background(), bundleGenerationTimeout) + defer cancel() + + tx, err := s.partitionMgr.Begin( + ctx, + originalRepo.GetStorageName(), + originalRepo.GetRelativePath(), + 0, + storagemgr.TransactionOptions{ + ReadOnly: true, + }, + ) + if err != nil { + ctxlogrus.Extract(ctx).WithError(err).Error("failed starting transaction") + } + + ctx = storagectx.ContextWithTransaction(ctx, tx) + + if err := s.bundleURISink.GenerateOneAtATime(ctx, localrepo.New( + s.logger, + s.locator, + s.gitCmdFactory, + s.catfileCache, + originalRepo)); err != nil { + ctxlogrus.Extract(ctx).WithError(err).Error("generate bundle") + if err := tx.Rollback(); err != nil && !errors.Is(err, storagemgr.ErrTransactionAlreadyCommitted) { + ctxlogrus.Extract(ctx).WithError(err).Error("failed rolling back transaction") + } + } + + if err := tx.Commit(ctx); err != nil && !errors.Is(err, storagemgr.ErrTransactionAlreadyCommitted) { + ctxlogrus.Extract(ctx).WithError(err).Error("committing transaction") + } + }() + } else if !errors.Is(err, bundleuri.ErrSinkMissing) { + log.AddFields(ctx, log.Fields{"bundle_uri_error": err}) + } } else { gitConfig = append(gitConfig, uploadPackConfig...) } + var stderr bytes.Buffer + commandOpts := []git.CmdOpt{ git.WithStdin(stdin), + git.WithStderr(&stderr), git.WithSetupStdout(), git.WithGitProtocol(s.logger, req), git.WithConfig(gitConfig...), git.WithPackObjectsHookEnv(req.GetRepository(), "http"), } + s.inflightTracker.IncrementInProgress(key) + defer s.inflightTracker.DecrementInProgress(key) + cmd, err := s.gitCmdFactory.New(ctx, req.GetRepository(), git.Command{ Name: "upload-pack", Flags: []git.Option{git.Flag{Name: "--stateless-rpc"}}, @@ -160,5 +237,6 @@ func (s *server) runUploadPack(ctx context.Context, req *gitalypb.PostUploadPack } s.logger.WithField("request_sha", fmt.Sprintf("%x", h.Sum(nil))).WithField("response_bytes", respBytes).InfoContext(ctx, "request details") + return nil, nil } diff --git a/internal/gitaly/service/smarthttp/upload_pack_test.go b/internal/gitaly/service/smarthttp/upload_pack_test.go index 944268aee543b4938740fb062642fd6205900423..8c91384c5fda4d3b91d27aee062071f725e66646 100644 --- a/internal/gitaly/service/smarthttp/upload_pack_test.go +++ b/internal/gitaly/service/smarthttp/upload_pack_test.go @@ -21,12 +21,17 @@ import ( "gitlab.com/gitlab-org/gitaly/v16/internal/bundleuri" "gitlab.com/gitlab-org/gitaly/v16/internal/featureflag" "gitlab.com/gitlab-org/gitaly/v16/internal/git" + "gitlab.com/gitlab-org/gitaly/v16/internal/git/catfile" "gitlab.com/gitlab-org/gitaly/v16/internal/git/gittest" "gitlab.com/gitlab-org/gitaly/v16/internal/git/localrepo" "gitlab.com/gitlab-org/gitaly/v16/internal/git/pktline" "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/config" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/service" "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/keyvalue" + "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/storagemgr" "gitlab.com/gitlab-org/gitaly/v16/internal/grpc/sidechannel" + "gitlab.com/gitlab-org/gitaly/v16/internal/helper" "gitlab.com/gitlab-org/gitaly/v16/internal/structerr" "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper" "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper/testcfg" @@ -380,6 +385,7 @@ func TestServer_PostUploadPackWithBundleURI(t *testing.T) { ctx := testhelper.Context(t) ctx = featureflag.ContextWithFeatureFlag(ctx, featureflag.BundleURI, true) + ctx = featureflag.ContextWithFeatureFlag(ctx, featureflag.AutogenerateBundlesForBundleURI, false) tempDir := testhelper.TempDir(t) keyFile, err := os.Create(filepath.Join(tempDir, "secret.key")) @@ -500,6 +506,232 @@ func TestServer_PostUploadPackWithBundleURI(t *testing.T) { } } +func TestServer_PostUploadPackAutogenerateBundles(t *testing.T) { + t.Parallel() + + ctx := testhelper.Context(t) + ctx = featureflag.ContextWithFeatureFlag(ctx, featureflag.AutogenerateBundlesForBundleURI, true) + ctx = featureflag.ContextWithFeatureFlag(ctx, featureflag.BundleURI, true) + + tempDir := testhelper.TempDir(t) + keyFile, err := os.Create(filepath.Join(tempDir, "secret.key")) + require.NoError(t, err) + _, err = keyFile.WriteString("super-secret-key") + require.NoError(t, err) + require.NoError(t, keyFile.Close()) + + testCases := []struct { + desc string + sinkDir string + setup func( + t *testing.T, + ctx context.Context, + cfg config.Cfg, + sink *bundleuri.Sink, + tracker *service.InProgressTracker, + repoProto *gitalypb.Repository, + repoPath string, + ) + expectBundleGenerated bool + verifyBundle func(*testing.T, config.Cfg, string, git.ObjectID) + }{ + { + desc: "autogeneration successful", + setup: func( + t *testing.T, + ctx context.Context, + cfg config.Cfg, + sink *bundleuri.Sink, + tracker *service.InProgressTracker, + repoProto *gitalypb.Repository, + repoPath string, + ) { + gittest.WriteCommit(t, cfg, repoPath, + gittest.WithTreeEntries(gittest.TreeEntry{Mode: "100644", Path: "README", Content: "much"}), + gittest.WithBranch("main")) + + key := repoProto.GetGlRepository() + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + }, + expectBundleGenerated: true, + verifyBundle: func(t *testing.T, cfg config.Cfg, bundlePath string, commit git.ObjectID) { + tempDir := t.TempDir() + objectFormat := gittest.DefaultObjectHash.Format + gittest.Exec(t, cfg, "init", "--object-format="+objectFormat, tempDir) + gittest.Exec(t, cfg, "-C", tempDir, "bundle", "unbundle", bundlePath) + // A new bundle is expected to be created containing the new commit + gittest.RequireObjectExists(t, cfg, tempDir, commit) + }, + }, + { + desc: "bundle already exists", + setup: func( + t *testing.T, + ctx context.Context, + cfg config.Cfg, + sink *bundleuri.Sink, + tracker *service.InProgressTracker, + repoProto *gitalypb.Repository, + repoPath string, + ) { + gittest.WriteCommit(t, cfg, repoPath, + gittest.WithTreeEntries(gittest.TreeEntry{Mode: "100644", Path: "README", Content: "much"}), + gittest.WithBranch("main")) + key := repoProto.GetGlRepository() + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + tracker.IncrementInProgress(key) + + repo := localrepo.NewTestRepo(t, cfg, repoProto) + require.NoError(t, sink.Generate(ctx, repo)) + }, + expectBundleGenerated: false, + verifyBundle: func(t *testing.T, cfg config.Cfg, bundlePath string, commit git.ObjectID) { + tempDir := t.TempDir() + objectFormat := gittest.DefaultObjectHash.Format + gittest.Exec(t, cfg, "init", "--object-format="+objectFormat, tempDir) + gittest.Exec(t, cfg, "-C", tempDir, "bundle", "unbundle", bundlePath) + // No new bundle is expected to be created since one already existed. + gittest.RequireObjectNotExists(t, cfg, tempDir, commit) + }, + }, + { + desc: "no concurrent upload packs in flight", + setup: func( + t *testing.T, + ctx context.Context, + cfg config.Cfg, + sink *bundleuri.Sink, + tracker *service.InProgressTracker, + repoProto *gitalypb.Repository, + repoPath string, + ) { + gittest.WriteCommit(t, cfg, repoPath, + gittest.WithTreeEntries(gittest.TreeEntry{Mode: "100644", Path: "README", Content: "much"}), + gittest.WithBranch("main")) + }, + expectBundleGenerated: false, + verifyBundle: func(t *testing.T, cfg config.Cfg, bundlePath string, commit git.ObjectID) { + tempDir := t.TempDir() + gittest.Exec(t, cfg, "init", tempDir) + gittest.Exec(t, cfg, "-C", tempDir, "bundle", "unbundle", bundlePath) + // No new bundle is expected to have been created because there are no + // inflight upload pack calls. + gittest.RequireObjectNotExists(t, cfg, tempDir, commit) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + doneChan := make(chan struct{}) + errChan := make(chan error) + + var bundlePath string + + bundleGeneratedNotifier := func(path string, err error) { + bundlePath = path + + close(doneChan) + errChan <- err + } + + tracker := service.NewInProgressTracker() + + sinkDir := t.TempDir() + sink, err := bundleuri.NewSink(ctx, "file://"+sinkDir, bundleuri.WithBundleGenerationNotifier(bundleGeneratedNotifier)) + require.NoError(t, err) + + cfg := testcfg.Build(t) + logger := testhelper.NewLogger(t) + + cfg.BundleURI.Autogeneration = true + + gitCmdFactory := gittest.NewCommandFactory(t, cfg) + locator := config.NewLocator(cfg) + catfileCache := catfile.NewCache(cfg) + t.Cleanup(catfileCache.Stop) + + dbMgr, err := keyvalue.NewDBManager( + cfg.Storages, + keyvalue.NewBadgerStore, + helper.NewNullTickerFactory(), + logger, + ) + require.NoError(t, err) + t.Cleanup(dbMgr.Close) + + partitionManager, err := storagemgr.NewPartitionManager( + ctx, + cfg.Storages, + gitCmdFactory, + localrepo.NewFactory(logger, locator, gitCmdFactory, catfileCache), + logger, + dbMgr, + cfg.Prometheus, + nil, + ) + require.NoError(t, err) + t.Cleanup(partitionManager.Close) + + server := startSmartHTTPServerWithOptions(t, cfg, nil, []testserver.GitalyServerOpt{ + testserver.WithBundleURISink(sink), + testserver.WithLogger(logger), + testserver.WithInProgressTracker(tracker), + testserver.WithTransactionRegistry(storagemgr.NewTransactionRegistry()), + testserver.WithPartitionManager(partitionManager), + testserver.WithGitCommandFactory(gitCmdFactory), + }) + + cfg.SocketPath = server.Address() + + repoProto, repoPath := gittest.CreateRepository(t, ctx, cfg) + oldCommit := gittest.WriteCommit(t, cfg, repoPath) + newCommit := gittest.WriteCommit(t, cfg, repoPath, gittest.WithBranch("master"), gittest.WithParents(oldCommit)) + + if tc.setup != nil { + tc.setup(t, ctx, cfg, sink, tracker, repoProto, repoPath) + } + + commitInUpdatedBundle := gittest.WriteCommit(t, cfg, repoPath, + gittest.WithTreeEntries(gittest.TreeEntry{Mode: "100644", Path: "CHANGELOG", Content: "nothing changed"}), + gittest.WithBranch("main")) + + // UploadPack request is a "want" packet line followed by a packet flush, then many "have" packets followed by a packet flush. + // This is explained a bit in https://git-scm.com/book/en/v2/Git-Internals-Transfer-Protocols#_downloading_data + requestBuffer := &bytes.Buffer{} + gittest.WritePktlineString(t, requestBuffer, fmt.Sprintf("want %s %s\n", newCommit, clientCapabilities)) + gittest.WritePktlineFlush(t, requestBuffer) + gittest.WritePktlineString(t, requestBuffer, fmt.Sprintf("have %s\n", oldCommit)) + gittest.WritePktlineFlush(t, requestBuffer) + + req := &gitalypb.PostUploadPackWithSidechannelRequest{Repository: repoProto} + responseBuffer, err := makePostUploadPackWithSidechannelRequest(t, ctx, cfg.SocketPath, cfg.Auth.Token, req, requestBuffer) + require.NoError(t, err) + + pack, _, _ := extractPackDataFromResponse(t, responseBuffer) + require.NotEmpty(t, pack, "Expected to find a pack file in response, found none") + + if tc.expectBundleGenerated { + <-doneChan + err := <-errChan + require.NoError(t, err) + tc.verifyBundle(t, cfg, filepath.Join(sinkDir, bundlePath), commitInUpdatedBundle) + } else { + require.Empty(t, bundlePath) + } + }) + } +} + func testServerPostUploadPackWithSideChannelValidation(t *testing.T, ctx context.Context, makeRequest requestMaker, opts ...testcfg.Option) { cfg := testcfg.Build(t, opts...) serverSocketPath := runSmartHTTPServer(t, cfg) diff --git a/internal/gitaly/service/ssh/receive_pack_test.go b/internal/gitaly/service/ssh/receive_pack_test.go index 604613c7729562e89c4766c32fe239149422fd53..d3de1390eeff7cf0280de39fc37cbe9037953f20 100644 --- a/internal/gitaly/service/ssh/receive_pack_test.go +++ b/internal/gitaly/service/ssh/receive_pack_test.go @@ -294,7 +294,7 @@ func TestReceivePack_invalidGitconfig(t *testing.T) { // Remove the config file first as files are read-only with transactions. configPath := filepath.Join(remoteRepoPath, "config") require.NoError(t, os.Remove(configPath)) - require.NoError(t, os.WriteFile(configPath, []byte("x x x foobar"), perm.SharedFile)) + require.NoError(t, os.WriteFile(configPath, []byte("x x x foobar"), perm.PrivateWriteOnceFile)) remoteRepo.GlProjectPath = "something" lHead, rHead, err := setupRepoAndPush(t, ctx, cfg, &gitalypb.SSHReceivePackRequest{ diff --git a/internal/gitaly/service/ssh/upload_pack_test.go b/internal/gitaly/service/ssh/upload_pack_test.go index f5d6069b56c01a8a36fc9a52ef3b94753e328da1..b8fd592838cd89ff11b145b511c04c74bf6ec2c3 100644 --- a/internal/gitaly/service/ssh/upload_pack_test.go +++ b/internal/gitaly/service/ssh/upload_pack_test.go @@ -835,7 +835,7 @@ func testUploadPackGitFailure(t *testing.T, ctx context.Context) { // Remove the config file first as files are read-only with transactions. configPath := filepath.Join(repoPath, "config") require.NoError(t, os.Remove(configPath)) - require.NoError(t, os.WriteFile(configPath, []byte("Not a valid gitconfig"), perm.SharedFile)) + require.NoError(t, os.WriteFile(configPath, []byte("Not a valid gitconfig"), perm.PrivateWriteOnceFile)) stream, err := client.SSHUploadPack(ctx) require.NoError(t, err) diff --git a/internal/gitaly/service/testhelper_test.go b/internal/gitaly/service/testhelper_test.go new file mode 100644 index 0000000000000000000000000000000000000000..43ac8d85bfd2896d10a3903f2c03d42044eac1db --- /dev/null +++ b/internal/gitaly/service/testhelper_test.go @@ -0,0 +1,11 @@ +package service_test + +import ( + "testing" + + "gitlab.com/gitlab-org/gitaly/v16/internal/testhelper" +) + +func TestMain(m *testing.M) { + testhelper.Run(m) +} diff --git a/internal/gitaly/storage/raft/manager.go b/internal/gitaly/storage/raft/manager.go index fff76b445e68f065fd4d62b7d6ed8615c10a331c..10513d234890072a64aa1cfc9f547e82e10859ec 100644 --- a/internal/gitaly/storage/raft/manager.go +++ b/internal/gitaly/storage/raft/manager.go @@ -245,19 +245,23 @@ func (m *Manager) Start() (returnedErr error) { // Register storage ID if not exist. Similarly, this operation is handled by the metadata group. // It will be handled by the metadata authority in the future. for storageName, storageMgr := range m.storageManagers { - if err := storageMgr.loadStorageID(m.ctx); err != nil { - return fmt.Errorf("loading storage ID: %w", err) + if err := storageMgr.loadStorageInfo(m.ctx); err != nil { + return fmt.Errorf("loading persisted storage info: %w", err) } - if storageMgr.id == 0 { - id, err := m.metadataGroup.RegisterStorage(storageName) + if storageMgr.persistedInfo == nil || storageMgr.persistedInfo.GetStorageId() == 0 { + storageInfo, err := m.metadataGroup.RegisterStorage(storageName) if err != nil { return fmt.Errorf("registering storage ID: %w", err) } - if err := storageMgr.saveStorageID(m.ctx, id); err != nil { + if err := storageMgr.saveStorageInfo(m.ctx, storageInfo); err != nil { return fmt.Errorf("saving storage ID: %w", err) } } - m.logger.WithFields(log.Fields{"storage_name": storageName, "storage_id": storageMgr.id}).Info("storage joined the cluster") + m.logger.WithFields(log.Fields{ + "storage_name": storageName, + "storage_id": storageMgr.persistedInfo.GetStorageId(), + "replication_factor": storageMgr.persistedInfo.GetReplicationFactor(), + }).Info("storage joined the cluster") } m.logger.Info("Raft cluster has started") diff --git a/internal/gitaly/storage/raft/manager_test.go b/internal/gitaly/storage/raft/manager_test.go index a7c9aade502f3599c5cc30323e1fa3dd295eeab5..70a49649078512fc3412e975e071b73cf1a9bebd 100644 --- a/internal/gitaly/storage/raft/manager_test.go +++ b/internal/gitaly/storage/raft/manager_test.go @@ -22,6 +22,21 @@ func managerTestConfig(bootstrap bool) ManagerConfig { func TestManager_Start(t *testing.T) { t.Parallel() + replicaGroups := func(i raftID, n uint64) []uint64 { + wrap := func(i raftID) uint64 { + if i.ToUint64() <= n { + return i.ToUint64() + } + return i.ToUint64() % n + } + if n == 1 { + return []uint64{} + } else if n == 2 { + return []uint64{wrap(i + 1)} + } + return []uint64{wrap(i + 1), wrap(i + 2)} + } + startManager := func(t *testing.T) nodeStarter { return func(cluster *testRaftCluster, node raftID) (*testNode, error) { ctx := testhelper.Context(t) @@ -46,7 +61,7 @@ func TestManager_Start(t *testing.T) { resetManager := func(t *testing.T, m *Manager) { m.metadataGroup = nil for _, storageMgr := range m.storageManagers { - storageMgr.clearStorageID() + storageMgr.clearStorageInfo() storageMgr.nodeHost.Close() nodeHost, err := dragonboat.NewNodeHost(storageMgr.nodeHost.NodeHostConfig()) require.NoError(t, err) @@ -72,7 +87,7 @@ func TestManager_Start(t *testing.T) { ClusterId: cluster.clusterID, NextStorageId: 2, Storages: map[uint64]*gitalypb.Storage{ - 1: {StorageId: 1, Name: "storage-1"}, + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, }, }, clusterInfo) }) @@ -99,6 +114,12 @@ func TestManager_Start(t *testing.T) { fanOut(numNode, func(node raftID) { require.NoError(t, cluster.nodes[node].manager.Start()) + + storage := cluster.nodes[node].manager.firstStorage + require.Equal(t, storage.id.ToUint64(), storage.persistedInfo.StorageId) + require.Equal(t, storage.name, storage.persistedInfo.Name) + require.Equal(t, uint64(3), storage.persistedInfo.ReplicationFactor) + require.Equal(t, node.ToUint64(), storage.persistedInfo.NodeId) }) var expectedIDs, allocatedIDs []raftID @@ -117,9 +138,14 @@ func TestManager_Start(t *testing.T) { require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(numNode+1), clusterInfo.NextStorageId) - require.Equal(t, &gitalypb.Storage{ - StorageId: storage.id.ToUint64(), Name: storage.name, - }, clusterInfo.Storages[storage.id.ToUint64()]) + expectedInfo := &gitalypb.Storage{ + StorageId: storage.id.ToUint64(), + Name: storage.name, + ReplicationFactor: 3, + NodeId: node.ToUint64(), + ReplicaGroups: replicaGroups(storage.id, uint64(numNode)), + } + testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[storage.id.ToUint64()]) }) }) }(numNode) @@ -137,6 +163,12 @@ func TestManager_Start(t *testing.T) { fanOut(2, func(node raftID) { require.NoError(t, cluster.nodes[node].manager.Start()) require.Equal(t, true, cluster.nodes[node].manager.Ready()) + + storage := cluster.nodes[node].manager.firstStorage + require.Equal(t, storage.id.ToUint64(), storage.persistedInfo.StorageId) + require.Equal(t, storage.name, storage.persistedInfo.Name) + require.Equal(t, uint64(3), storage.persistedInfo.ReplicationFactor) + require.Equal(t, node.ToUint64(), storage.persistedInfo.NodeId) }) // The quorum is reached @@ -150,13 +182,19 @@ func TestManager_Start(t *testing.T) { storage := mgr.firstStorage clusterInfo, err := mgr.ClusterInfo() + fmt.Printf("%+v %+v\n", node, clusterInfo) require.NoError(t, err) require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(3), clusterInfo.NextStorageId) - require.Equal(t, &gitalypb.Storage{ - StorageId: storage.id.ToUint64(), Name: storage.name, - }, clusterInfo.Storages[storage.id.ToUint64()]) + expectedInfo := &gitalypb.Storage{ + StorageId: storage.id.ToUint64(), + Name: storage.name, + ReplicationFactor: 3, + NodeId: node.ToUint64(), + ReplicaGroups: replicaGroups(storage.id, 2), + } + testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[storage.id.ToUint64()]) }) // Now the third node joins. It does not matter whether the third node bootstraps the cluster. @@ -172,9 +210,14 @@ func TestManager_Start(t *testing.T) { require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(4), clusterInfo.NextStorageId) - require.Equal(t, &gitalypb.Storage{ - StorageId: storage.id.ToUint64(), Name: storage.name, - }, clusterInfo.Storages[storage.id.ToUint64()]) + expectedInfo := &gitalypb.Storage{ + StorageId: storage.id.ToUint64(), + Name: storage.name, + ReplicationFactor: 3, + NodeId: node.ToUint64(), + ReplicaGroups: replicaGroups(storage.id, 3), + } + testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[storage.id.ToUint64()]) }) }) }(bootstrap) @@ -226,6 +269,12 @@ func TestManager_Start(t *testing.T) { require.EqualError(t, cluster.nodes[node].manager.Start(), "registering storage ID: storage \"storage-2\" already registered") } else { require.NoError(t, cluster.nodes[node].manager.Start()) + + storage := cluster.nodes[node].manager.firstStorage + require.Equal(t, storage.id.ToUint64(), storage.persistedInfo.StorageId) + require.Equal(t, storage.name, storage.persistedInfo.Name) + require.Equal(t, uint64(3), storage.persistedInfo.ReplicationFactor) + require.Equal(t, node.ToUint64(), storage.persistedInfo.NodeId) } if node != duplicatedNode { @@ -253,9 +302,14 @@ func TestManager_Start(t *testing.T) { require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(3), clusterInfo.NextStorageId) - require.Equal(t, &gitalypb.Storage{ - StorageId: storage.id.ToUint64(), Name: storage.name, - }, clusterInfo.Storages[storage.id.ToUint64()]) + expectedInfo := &gitalypb.Storage{ + StorageId: storage.id.ToUint64(), + Name: storage.name, + ReplicationFactor: 3, + NodeId: node.ToUint64(), + ReplicaGroups: replicaGroups(storage.id, 2), + } + testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[storage.id.ToUint64()]) } }) }) @@ -268,6 +322,12 @@ func TestManager_Start(t *testing.T) { fanOut(3, func(node raftID) { require.NoError(t, cluster.nodes[node].manager.Start()) + + storage := cluster.nodes[node].manager.firstStorage + require.Equal(t, storage.id.ToUint64(), storage.persistedInfo.StorageId) + require.Equal(t, storage.name, storage.persistedInfo.Name) + require.Equal(t, uint64(3), storage.persistedInfo.ReplicationFactor) + require.Equal(t, node.ToUint64(), storage.persistedInfo.NodeId) }) for _, node := range cluster.nodes { @@ -286,9 +346,14 @@ func TestManager_Start(t *testing.T) { require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(4), clusterInfo.NextStorageId) - require.Equal(t, &gitalypb.Storage{ - StorageId: mgr.firstStorage.id.ToUint64(), Name: mgr.firstStorage.name, - }, clusterInfo.Storages[mgr.firstStorage.id.ToUint64()]) + expectedInfo := &gitalypb.Storage{ + StorageId: mgr.firstStorage.id.ToUint64(), + Name: mgr.firstStorage.name, + ReplicationFactor: 3, + NodeId: node.ToUint64(), + ReplicaGroups: replicaGroups(mgr.firstStorage.id, 3), + } + testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[mgr.firstStorage.id.ToUint64()]) }) }) @@ -300,6 +365,12 @@ func TestManager_Start(t *testing.T) { fanOut(3, func(node raftID) { require.NoError(t, cluster.nodes[node].manager.Start()) + + storage := cluster.nodes[node].manager.firstStorage + require.Equal(t, storage.id.ToUint64(), storage.persistedInfo.StorageId) + require.Equal(t, storage.name, storage.persistedInfo.Name) + require.Equal(t, uint64(3), storage.persistedInfo.ReplicationFactor) + require.Equal(t, node.ToUint64(), storage.persistedInfo.NodeId) }) for _, node := range cluster.nodes { @@ -319,9 +390,14 @@ func TestManager_Start(t *testing.T) { require.Equal(t, cluster.clusterID, clusterInfo.ClusterId) require.Equal(t, uint64(4), clusterInfo.NextStorageId) - require.Equal(t, &gitalypb.Storage{ - StorageId: mgr.firstStorage.id.ToUint64(), Name: mgr.firstStorage.name, - }, clusterInfo.Storages[mgr.firstStorage.id.ToUint64()]) + expectedInfo := &gitalypb.Storage{ + StorageId: mgr.firstStorage.id.ToUint64(), + Name: mgr.firstStorage.name, + ReplicationFactor: 3, + NodeId: node.ToUint64(), + ReplicaGroups: replicaGroups(mgr.firstStorage.id, 3), + } + testhelper.ProtoEqual(t, expectedInfo, clusterInfo.Storages[mgr.firstStorage.id.ToUint64()]) }) }) diff --git a/internal/gitaly/storage/raft/metadata_group.go b/internal/gitaly/storage/raft/metadata_group.go index 405a37aaf83d5618bcc084b0a652d92c7eb59fdb..d0395667aeaeac187f5a9e7cd0e06c292a0cd529 100644 --- a/internal/gitaly/storage/raft/metadata_group.go +++ b/internal/gitaly/storage/raft/metadata_group.go @@ -157,35 +157,35 @@ func (g *metadataRaftGroup) tryBootstrap() (*gitalypb.Cluster, error) { // RegisterStorage requests the metadata group to allocate a unique ID for a new storage. The caller // is expected to persist the newly allocated ID. This ID is used for future interactions with the // Raft cluster. The storage name must be unique cluster-wide. -func (g *metadataRaftGroup) RegisterStorage(storageName string) (raftID, error) { +func (g *metadataRaftGroup) RegisterStorage(storageName string) (*gitalypb.Storage, error) { storageName = strings.TrimSpace(storageName) cluster, err := g.ClusterInfo() if err != nil { - return 0, err + return nil, err } for _, storage := range cluster.Storages { if storage.GetName() == storageName { - return 0, fmt.Errorf("storage %q already registered", storageName) + return nil, fmt.Errorf("storage %q already registered", storageName) } } - result, response, err := g.requestRegisterStorage(storageName) + result, response, err := g.requestRegisterStorage(storageName, g.clusterConfig) if err != nil { - return 0, fmt.Errorf("registering storage: %w", err) + return nil, fmt.Errorf("registering storage: %w", err) } switch result { case resultRegisterStorageSuccessful: - return raftID(response.GetStorage().GetStorageId()), nil + return response.GetStorage(), nil case resultStorageAlreadyRegistered: // There's a chance that storage is registered by another node while firing this request. We // have no choice but reject this request. - return 0, fmt.Errorf("storage %q already registered", storageName) + return nil, fmt.Errorf("storage %q already registered", storageName) case resultRegisterStorageClusterNotBootstrappedYet: // Extremely rare occasion. This case occurs when the cluster information is wiped out of // the metadata group when the register storage request is in-flight. - return 0, fmt.Errorf("cluster has not been bootstrapped") + return nil, fmt.Errorf("cluster has not been bootstrapped") default: - return 0, fmt.Errorf("unsupported update result: %d", result) + return nil, fmt.Errorf("unsupported update result: %d", result) } } @@ -236,7 +236,7 @@ func (g *metadataRaftGroup) requestBootstrapCluster() (updateResult, *gitalypb.B return requester.SyncWrite(g.ctx, &gitalypb.BootstrapClusterRequest{ClusterId: g.clusterConfig.ClusterID}) } -func (g *metadataRaftGroup) requestRegisterStorage(storageName string) (updateResult, *gitalypb.RegisterStorageResponse, error) { +func (g *metadataRaftGroup) requestRegisterStorage(storageName string, clusterCfg config.Raft) (updateResult, *gitalypb.RegisterStorageResponse, error) { requester := NewRequester[*gitalypb.RegisterStorageRequest, *gitalypb.RegisterStorageResponse]( g.nodeHost, g.groupID, g.logger, requestOption{ retry: defaultRetry, @@ -244,7 +244,11 @@ func (g *metadataRaftGroup) requestRegisterStorage(storageName string) (updateRe exponential: g.backoffProfile, }, ) - return requester.SyncWrite(g.ctx, &gitalypb.RegisterStorageRequest{StorageName: storageName}) + return requester.SyncWrite(g.ctx, &gitalypb.RegisterStorageRequest{ + StorageName: storageName, + ReplicationFactor: clusterCfg.ReplicationFactor, + NodeId: clusterCfg.NodeID, + }) } func (g *metadataRaftGroup) getLeaderState() (*gitalypb.LeaderState, error) { diff --git a/internal/gitaly/storage/raft/metadata_group_test.go b/internal/gitaly/storage/raft/metadata_group_test.go index 266983104f0ee17de267ad0cae59a4c7c0453ed7..c0939cdc0bd3ba7db93d88ccff6878fbef6d9e51 100644 --- a/internal/gitaly/storage/raft/metadata_group_test.go +++ b/internal/gitaly/storage/raft/metadata_group_test.go @@ -279,10 +279,21 @@ func TestMetadataGroup_RegisterStorage(t *testing.T) { ptnMgr := setupTestPartitionManager(t, cfg) groups := bootstrapCluster(t, cluster, ptnMgr) + expectedReplicaGroups := [][]uint64{ + nil, // When node-1 is registered, there is no other storages + {1}, // When node-2 is registered, only node-1 is eligible. + {1, 2}, // When node-3 is registered, both node-1 and node-2 are eligible. + } for i := raftID(1); i <= 3; i++ { - id, err := groups[i].RegisterStorage(fmt.Sprintf("storage-%d", 2*i)) + info, err := groups[i].RegisterStorage(fmt.Sprintf("storage-%d", 2*i)) require.NoError(t, err) - require.Equal(t, i, id) + require.Equal(t, &gitalypb.Storage{ + StorageId: uint64(i), + Name: fmt.Sprintf("storage-%d", 2*i), + ReplicationFactor: 3, + NodeId: i.ToUint64(), + ReplicaGroups: expectedReplicaGroups[i-1], + }, info) } for i := raftID(1); i <= 3; i++ { @@ -292,9 +303,9 @@ func TestMetadataGroup_RegisterStorage(t *testing.T) { ClusterId: cluster.clusterID, NextStorageId: 4, Storages: map[uint64]*gitalypb.Storage{ - 1: {StorageId: 1, Name: "storage-2"}, - 2: {StorageId: 2, Name: "storage-4"}, - 3: {StorageId: 3, Name: "storage-6"}, + 1: {StorageId: 1, Name: "storage-2", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-4", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 1}}, + 3: {StorageId: 3, Name: "storage-6", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{1, 2}}, }, }, clusterInfo) } @@ -310,9 +321,14 @@ func TestMetadataGroup_RegisterStorage(t *testing.T) { ptnMgr := setupTestPartitionManager(t, cfg) groups := bootstrapCluster(t, cluster, ptnMgr) - id, err := groups[1].RegisterStorage("storage-1") + info, err := groups[1].RegisterStorage("storage-1") require.NoError(t, err) - require.Equal(t, raftID(1), id) + require.Equal(t, &gitalypb.Storage{ + StorageId: 1, + Name: "storage-1", + ReplicationFactor: 3, + NodeId: 1, + }, info) _, err = groups[2].RegisterStorage("storage-1") require.EqualError(t, err, "storage \"storage-1\" already registered") @@ -327,7 +343,7 @@ func TestMetadataGroup_RegisterStorage(t *testing.T) { ClusterId: cluster.clusterID, NextStorageId: 2, Storages: map[uint64]*gitalypb.Storage{ - 1: {StorageId: 1, Name: "storage-1"}, + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, }, }, clusterInfo) } diff --git a/internal/gitaly/storage/raft/metadata_statemachine.go b/internal/gitaly/storage/raft/metadata_statemachine.go index 2c826e3c90ad78f306d9d358c4a4ac7753c90afa..9a0c3a14bbcf782c318b9b2994f146a6a489dd5c 100644 --- a/internal/gitaly/storage/raft/metadata_statemachine.go +++ b/internal/gitaly/storage/raft/metadata_statemachine.go @@ -14,10 +14,11 @@ import ( ) type metadataStateMachine struct { - ctx context.Context - groupID raftID - replicaID raftID - accessDB dbAccessor + ctx context.Context + groupID raftID + replicaID raftID + accessDB dbAccessor + replicaPlacement replicaPlacement } const ( @@ -210,11 +211,14 @@ func (s *metadataStateMachine) handleRegisterStorageRequest(req *gitalypb.Regist } newStorage := &gitalypb.Storage{ - StorageId: cluster.NextStorageId, - Name: req.StorageName, + StorageId: cluster.NextStorageId, + Name: req.GetStorageName(), + ReplicationFactor: req.GetReplicationFactor(), + NodeId: req.GetNodeId(), } cluster.Storages[cluster.NextStorageId] = newStorage cluster.NextStorageId++ + s.replicaPlacement.apply(cluster.Storages) response, err := anyProtoMarshal(&gitalypb.RegisterStorageResponse{Storage: newStorage}) if err != nil { @@ -294,9 +298,10 @@ var _ = Statemachine(&metadataStateMachine{}) func newMetadataStatemachine(ctx context.Context, groupID raftID, replicaID raftID, accessDB dbAccessor) *metadataStateMachine { return &metadataStateMachine{ - ctx: ctx, - groupID: groupID, - replicaID: replicaID, - accessDB: accessDB, + ctx: ctx, + groupID: groupID, + replicaID: replicaID, + accessDB: accessDB, + replicaPlacement: newDefaultReplicaPlacement(), } } diff --git a/internal/gitaly/storage/raft/metadata_statemachine_test.go b/internal/gitaly/storage/raft/metadata_statemachine_test.go index 26eaf2a3f085e9a8e4348b99cd0adbdd4c04fed1..4c390dbdf1fd715afcaf27f9441ee40ccf465a07 100644 --- a/internal/gitaly/storage/raft/metadata_statemachine_test.go +++ b/internal/gitaly/storage/raft/metadata_statemachine_test.go @@ -216,7 +216,7 @@ func TestMetadataStateMachine_Update(t *testing.T) { t.Run("register a new storage", func(t *testing.T) { t.Parallel() - cfg := testcfg.Build(t, testcfg.WithStorages("storage-1", "storage-2")) + cfg := testcfg.Build(t, testcfg.WithStorages("storage-1", "storage-2", "storage-3", "storage-4", "storage-5")) ctx := testhelper.Context(t) ptnMgr := setupTestPartitionManager(t, cfg) @@ -228,17 +228,26 @@ func TestMetadataStateMachine_Update(t *testing.T) { requireLastApplied(t, sm, 1) result, err := sm.Update([]statemachine.Entry{ - {Index: 2, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name})}, - {Index: 3, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[1].Name})}, + {Index: 2, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name, ReplicationFactor: 3, NodeId: 1})}, + {Index: 3, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[1].Name, ReplicationFactor: 5, NodeId: 2})}, + {Index: 4, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[2].Name, ReplicationFactor: 3, NodeId: 3})}, + {Index: 5, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[3].Name, ReplicationFactor: 3, NodeId: 4})}, + {Index: 6, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[4].Name, ReplicationFactor: 3, NodeId: 5})}, }) require.NoError(t, err) - require.Equal(t, []statemachine.Entry{ + + // Remember, storage registration is supposed to be distributed and async. At the time first + // update finishes, the second might not have arrived. So, an update's returned data + // consists of the changes of the time that update is processed only. + testhelper.ProtoEqual(t, []statemachine.Entry{ {Index: 2, Result: statemachine.Result{ Value: uint64(resultRegisterStorageSuccessful), Data: wrapSMMessage(t, &gitalypb.RegisterStorageResponse{ Storage: &gitalypb.Storage{ - StorageId: 1, - Name: cfg.Storages[0].Name, + StorageId: 1, + Name: cfg.Storages[0].Name, + ReplicationFactor: 3, + NodeId: 1, }, }), }}, @@ -246,25 +255,92 @@ func TestMetadataStateMachine_Update(t *testing.T) { Value: uint64(resultRegisterStorageSuccessful), Data: wrapSMMessage(t, &gitalypb.RegisterStorageResponse{ Storage: &gitalypb.Storage{ - StorageId: 2, - Name: cfg.Storages[1].Name, + StorageId: 2, + Name: cfg.Storages[1].Name, + ReplicationFactor: 5, + NodeId: 2, + ReplicaGroups: []uint64{1}, + }, + }), + }}, + {Index: 4, Result: statemachine.Result{ + Value: uint64(resultRegisterStorageSuccessful), + Data: wrapSMMessage(t, &gitalypb.RegisterStorageResponse{ + Storage: &gitalypb.Storage{ + StorageId: 3, + Name: cfg.Storages[2].Name, + ReplicationFactor: 3, + NodeId: 3, + ReplicaGroups: []uint64{1, 2}, + }, + }), + }}, + {Index: 5, Result: statemachine.Result{ + Value: uint64(resultRegisterStorageSuccessful), + Data: wrapSMMessage(t, &gitalypb.RegisterStorageResponse{ + Storage: &gitalypb.Storage{ + StorageId: 4, + Name: cfg.Storages[3].Name, + ReplicationFactor: 3, + NodeId: 4, + ReplicaGroups: []uint64{1, 2}, + }, + }), + }}, + {Index: 6, Result: statemachine.Result{ + Value: uint64(resultRegisterStorageSuccessful), + Data: wrapSMMessage(t, &gitalypb.RegisterStorageResponse{ + Storage: &gitalypb.Storage{ + StorageId: 5, + Name: cfg.Storages[4].Name, + ReplicationFactor: 3, + NodeId: 5, + ReplicaGroups: []uint64{1, 2}, }, }), }}, }, result) - requireLastApplied(t, sm, 3) + requireLastApplied(t, sm, 6) + // The final state of the statemachine does have latest replica groups. requireClusterState(t, sm, &gitalypb.Cluster{ ClusterId: "1234", - NextStorageId: 3, + NextStorageId: 6, Storages: map[uint64]*gitalypb.Storage{ 1: { - StorageId: 1, - Name: cfg.Storages[0].Name, + StorageId: 1, + Name: cfg.Storages[0].Name, + ReplicationFactor: 3, + NodeId: 1, + ReplicaGroups: []uint64{2, 3}, }, 2: { - StorageId: 2, - Name: cfg.Storages[1].Name, + StorageId: 2, + Name: cfg.Storages[1].Name, + ReplicationFactor: 5, + NodeId: 2, + ReplicaGroups: []uint64{3, 4, 5, 1}, + }, + 3: { + StorageId: 3, + Name: cfg.Storages[2].Name, + ReplicationFactor: 3, + NodeId: 3, + ReplicaGroups: []uint64{4, 5}, + }, + 4: { + StorageId: 4, + Name: cfg.Storages[3].Name, + ReplicationFactor: 3, + NodeId: 4, + ReplicaGroups: []uint64{5, 1}, + }, + 5: { + StorageId: 5, + Name: cfg.Storages[4].Name, + ReplicationFactor: 3, + NodeId: 5, + ReplicaGroups: []uint64{1, 2}, }, }, }) @@ -285,8 +361,8 @@ func TestMetadataStateMachine_Update(t *testing.T) { requireLastApplied(t, sm, 1) result, err := sm.Update([]statemachine.Entry{ - {Index: 2, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name})}, - {Index: 3, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name})}, + {Index: 2, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name, ReplicationFactor: 3, NodeId: 1})}, + {Index: 3, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name, ReplicationFactor: 5, NodeId: 2})}, }) require.NoError(t, err) require.Equal(t, []statemachine.Entry{ @@ -294,8 +370,10 @@ func TestMetadataStateMachine_Update(t *testing.T) { Value: uint64(resultRegisterStorageSuccessful), Data: wrapSMMessage(t, &gitalypb.RegisterStorageResponse{ Storage: &gitalypb.Storage{ - StorageId: 1, - Name: cfg.Storages[0].Name, + StorageId: 1, + Name: cfg.Storages[0].Name, + ReplicationFactor: 3, + NodeId: 1, }, }), }}, @@ -310,8 +388,10 @@ func TestMetadataStateMachine_Update(t *testing.T) { NextStorageId: 2, Storages: map[uint64]*gitalypb.Storage{ 1: { - StorageId: 1, - Name: cfg.Storages[0].Name, + StorageId: 1, + Name: cfg.Storages[0].Name, + ReplicationFactor: 3, + NodeId: 1, }, }, }) @@ -329,7 +409,7 @@ func TestMetadataStateMachine_Update(t *testing.T) { require.NoError(t, err) result, err := sm.Update([]statemachine.Entry{ - {Index: 1, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name})}, + {Index: 1, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name, ReplicationFactor: 3, NodeId: 1})}, }) require.NoError(t, err) require.Equal(t, []statemachine.Entry{ @@ -459,8 +539,8 @@ func TestMetadataStateMachine_Lookup(t *testing.T) { bootstrapCluster(t, sm) _, err = sm.Update([]statemachine.Entry{ - {Index: 2, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name})}, - {Index: 3, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[1].Name})}, + {Index: 2, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[0].Name, ReplicationFactor: 3, NodeId: 1})}, + {Index: 3, Cmd: wrapSMMessage(t, &gitalypb.RegisterStorageRequest{StorageName: cfg.Storages[1].Name, ReplicationFactor: 5, NodeId: 2})}, }) require.NoError(t, err) @@ -471,8 +551,8 @@ func TestMetadataStateMachine_Lookup(t *testing.T) { ClusterId: "1234", NextStorageId: 3, Storages: map[uint64]*gitalypb.Storage{ - 1: {StorageId: 1, Name: cfg.Storages[0].Name}, - 2: {StorageId: 2, Name: cfg.Storages[1].Name}, + 1: {StorageId: 1, Name: cfg.Storages[0].Name, ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2}}, + 2: {StorageId: 2, Name: cfg.Storages[1].Name, ReplicationFactor: 5, NodeId: 2, ReplicaGroups: []uint64{1}}, }, }}, response) }) diff --git a/internal/gitaly/storage/raft/replica_placement.go b/internal/gitaly/storage/raft/replica_placement.go new file mode 100644 index 0000000000000000000000000000000000000000..a3b118dc4b5dc8434115b8d8d8100f5b6a02dd2c --- /dev/null +++ b/internal/gitaly/storage/raft/replica_placement.go @@ -0,0 +1,79 @@ +package raft + +import ( + "slices" + + "gitlab.com/gitlab-org/gitaly/v16/proto/go/gitalypb" +) + +// replicaPlacement is an interface for a function that appoint replicas for +// all storages of a Raft cluster. +type replicaPlacement interface { + apply(map[uint64]*gitalypb.Storage) +} + +// simpleRingReplicaPlacement implements a deterministic replica placement. It implements a simple +// ring-based placement strategy where each storage is assigned replicas based on the next storage +// nodes in the ring, wrapping around if necessary. This approach ensures a balanced distribution of +// replicas across the available storage nodes. Storages are allowed to have different replication +// factors, the default value is 3. +// +// For example, consider a scenario with 5 storages: 1, 2, 3, 4, 5. The replica placement will be: +// - Storage 1 (authority), replicas on storage 2 and storage 3 +// - Storage 2 (authority), replicas on storage 3 and storage 4 +// - Storage 3 (authority): replicas on storage 4 and storage 5 +// - Storage 4 (authority): replicas on storage 5 and storage 1 +// - Storage 5 (authority): replicas on storage 1 and storage 2 +// +// If the replication factor is more than the number of nodes, the strategy does its best to fill in +// the gaps. For example, with a replication factor of 5 and there are 3 storages, the replica +// placement will be: +// - Storage 1 (authority): replicas on storage 2 and storage 3 +// - Storage 2 (authority): replicas on storage 3 and storage 1 +// - Storage 3 (authority): replicas on storage 1 and storage 2 +// +// This strategy also takes storage residence into account. It means storages residing on the same +// nodes don't replicate to each other. +// +// The storages are not necessarily contiguous. Replication factor of 0 or 1 means that the storage +// does not replicate. +type simpleRingReplicaPlacement struct{} + +func (*simpleRingReplicaPlacement) apply(storages map[uint64]*gitalypb.Storage) { + var ids []uint64 + for id := range storages { + ids = append(ids, id) + } + slices.Sort(ids) + + for i := range ids { + // Reset replica groups. + storage := storages[ids[i]] + storage.ReplicaGroups = []uint64{} + j := i + for k := storage.ReplicationFactor - 1; k >= 1; k-- { + for { + j = (j + 1) % len(ids) + // Ensure the other storage is not on the same node. + if j == i || storages[ids[j]].GetNodeId() != storage.GetNodeId() { + break + } + } + // Reach the examining storage. It means there are less eligible nodes than needed. + if j == i { + break + } + storage.ReplicaGroups = append(storage.ReplicaGroups, ids[j]) + } + } +} + +func newSimpleRingReplicaPlacement() replicaPlacement { + return &simpleRingReplicaPlacement{} +} + +// newDefaultReplicaPlacement defines a factory that returns the default replica placements strategy +// used for determining replica groups. At the moment, Gitaly supports a simple ring-based placement +// strategy. When we involve replica placement strategy in the future, all members of the metadata +// Raft group must sync up to ensure they have the same replica placement strategy. +var newDefaultReplicaPlacement = newSimpleRingReplicaPlacement diff --git a/internal/gitaly/storage/raft/replica_placement_test.go b/internal/gitaly/storage/raft/replica_placement_test.go new file mode 100644 index 0000000000000000000000000000000000000000..c510f3d51b578320a1c1998884b27d2699c280de --- /dev/null +++ b/internal/gitaly/storage/raft/replica_placement_test.go @@ -0,0 +1,233 @@ +package raft + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + "gitlab.com/gitlab-org/gitaly/v16/proto/go/gitalypb" +) + +func TestSimpleRingReplicaPlacement(t *testing.T) { + t.Parallel() + + t.Run("0 storage", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{} + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{}, storages) + }) + + t.Run("1 storage", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{}}, + }, storages) + }) + + t.Run("2 storages", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{1}}, + }, storages) + }) + + t.Run("3 storages", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 1}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) + + t.Run("more than 3 storages", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 4}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 5}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 4}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{4, 5}}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 4, ReplicaGroups: []uint64{5, 1}}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 5, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) + + t.Run("more than 3 storages in random order", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 4}, + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 5}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 4}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{4, 5}}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 4, ReplicaGroups: []uint64{5, 1}}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 5, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) + + t.Run("discontinued eligible storages", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 5}, + 6: {StorageId: 6, Name: "storage-6", ReplicationFactor: 3, NodeId: 6}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 5}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{5, 6}}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 5, ReplicaGroups: []uint64{6, 1}}, + 6: {StorageId: 6, Name: "storage-6", ReplicationFactor: 3, NodeId: 6, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) + t.Run("storages residing on the same node", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 1}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 2}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 2}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 3}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{3, 4}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{3, 4}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{5, 1}}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{5, 1}}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) + + t.Run("storages have different replication factors", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 5, NodeId: 2}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 4}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 1, NodeId: 5}, + 6: {StorageId: 6, Name: "storage-6", ReplicationFactor: 1, NodeId: 6}, + 7: {StorageId: 7, Name: "storage-7", ReplicationFactor: 3, NodeId: 7}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 5, NodeId: 2, ReplicaGroups: []uint64{3, 4, 5, 6}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{4, 5}}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 4, ReplicaGroups: []uint64{5, 6}}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 1, NodeId: 5, ReplicaGroups: []uint64{}}, + 6: {StorageId: 6, Name: "storage-6", ReplicationFactor: 1, NodeId: 6, ReplicaGroups: []uint64{}}, + 7: {StorageId: 7, Name: "storage-7", ReplicationFactor: 3, NodeId: 7, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) + + t.Run("fixup existing replica groups", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + // Result of assigning replica groups when there are 2 storages. + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{1}}, + // This one is new. + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3}, + } + newSimpleRingReplicaPlacement().apply(storages) + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 1}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) + + t.Run("add new storages", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1}, + } + for i := uint64(2); i <= 7; i++ { + storages[i] = &gitalypb.Storage{ + StorageId: i, + Name: fmt.Sprintf("storage-%d", i), + ReplicationFactor: 3, + NodeId: i, + } + newSimpleRingReplicaPlacement().apply(storages) + } + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 4}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{4, 5}}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 4, ReplicaGroups: []uint64{5, 6}}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 5, ReplicaGroups: []uint64{6, 7}}, + 6: {StorageId: 6, Name: "storage-6", ReplicationFactor: 3, NodeId: 6, ReplicaGroups: []uint64{7, 1}}, + 7: {StorageId: 7, Name: "storage-7", ReplicationFactor: 3, NodeId: 7, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) + + t.Run("remove storages", func(t *testing.T) { + t.Parallel() + + storages := map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 4}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{4, 5}}, + 4: {StorageId: 4, Name: "storage-4", ReplicationFactor: 3, NodeId: 4, ReplicaGroups: []uint64{5, 6}}, + 5: {StorageId: 5, Name: "storage-5", ReplicationFactor: 3, NodeId: 5, ReplicaGroups: []uint64{6, 7}}, + 6: {StorageId: 6, Name: "storage-6", ReplicationFactor: 3, NodeId: 6, ReplicaGroups: []uint64{7, 1}}, + 7: {StorageId: 7, Name: "storage-7", ReplicationFactor: 3, NodeId: 7, ReplicaGroups: []uint64{1, 2}}, + } + for i := uint64(4); i <= 7; i++ { + delete(storages, i) + newSimpleRingReplicaPlacement().apply(storages) + } + require.Equal(t, map[uint64]*gitalypb.Storage{ + 1: {StorageId: 1, Name: "storage-1", ReplicationFactor: 3, NodeId: 1, ReplicaGroups: []uint64{2, 3}}, + 2: {StorageId: 2, Name: "storage-2", ReplicationFactor: 3, NodeId: 2, ReplicaGroups: []uint64{3, 1}}, + 3: {StorageId: 3, Name: "storage-3", ReplicationFactor: 3, NodeId: 3, ReplicaGroups: []uint64{1, 2}}, + }, storages) + }) +} diff --git a/internal/gitaly/storage/raft/storage.go b/internal/gitaly/storage/raft/storage.go index e6acefdb151690d9117c5fd13f115955edc32c21..ed0f1f1a7960e96fe833c40d699143cda7a03f49 100644 --- a/internal/gitaly/storage/raft/storage.go +++ b/internal/gitaly/storage/raft/storage.go @@ -9,6 +9,8 @@ import ( "github.com/lni/dragonboat/v4" "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/keyvalue" "gitlab.com/gitlab-org/gitaly/v16/internal/gitaly/storage/storagemgr" + "gitlab.com/gitlab-org/gitaly/v16/proto/go/gitalypb" + "google.golang.org/protobuf/proto" ) type dbAccessor func(context.Context, bool, func(keyvalue.ReadWriter) error) error @@ -17,10 +19,11 @@ type dbAccessor func(context.Context, bool, func(keyvalue.ReadWriter) error) err // keyvalue.Transactioner for each Raft group, allowing the Raft groups to store their data in the // underlying keyvalue store. type storageManager struct { - id raftID - name string - ptnMgr *storagemgr.PartitionManager - nodeHost *dragonboat.NodeHost + id raftID + name string + ptnMgr *storagemgr.PartitionManager + nodeHost *dragonboat.NodeHost + persistedInfo *gitalypb.Storage } // newStorageManager returns an instance of storage manager. @@ -35,10 +38,10 @@ func newStorageManager(name string, ptnMgr *storagemgr.PartitionManager, nodeHos // Close closes the storage manager. func (m *storageManager) Close() { m.nodeHost.Close() } -func (m *storageManager) loadStorageID(ctx context.Context) error { +func (m *storageManager) loadStorageInfo(ctx context.Context) error { db := m.dbForStorage() - return db(ctx, true, func(txn keyvalue.ReadWriter) error { - item, err := txn.Get([]byte("storage_id")) + return db(ctx, false, func(txn keyvalue.ReadWriter) error { + item, err := txn.Get([]byte("storage")) if err != nil { if errors.Is(err, badger.ErrKeyNotFound) { return nil @@ -46,32 +49,45 @@ func (m *storageManager) loadStorageID(ctx context.Context) error { return err } return item.Value(func(value []byte) error { - m.id.UnmarshalBinary(value) + var persistedInfo gitalypb.Storage + if err := proto.Unmarshal(value, &persistedInfo); err != nil { + return err + } + m.persistedInfo = &persistedInfo + m.id = raftID(m.persistedInfo.StorageId) return nil }) }) } -func (m *storageManager) saveStorageID(ctx context.Context, id raftID) error { +func (m *storageManager) saveStorageInfo(ctx context.Context, storage *gitalypb.Storage) error { db := m.dbForStorage() return db(ctx, false, func(txn keyvalue.ReadWriter) error { - _, err := txn.Get([]byte("storage_id")) + _, err := txn.Get([]byte("storage")) if err == nil { - return fmt.Errorf("storage ID already exists") + return fmt.Errorf("storage already exists") } else if !errors.Is(err, badger.ErrKeyNotFound) { return err } - if err := txn.Set([]byte("storage_id"), id.MarshalBinary()); err != nil { + marshaled, err := proto.Marshal(storage) + if err != nil { + return err + } + if err := txn.Set([]byte("storage"), marshaled); err != nil { return err } - m.id = id + m.persistedInfo = storage + m.id = raftID(m.persistedInfo.StorageId) return nil }) } -// clearStorageID clears the storage ID inside the in-memory storage of the storage manager. It does -// not clean the underlying storage ID. -func (m *storageManager) clearStorageID() { m.id = 0 } +// clearStorageInfo clears the storage info inside the in-memory storage of the storage manager. It +// does not clean the persisted info the DB. +func (m *storageManager) clearStorageInfo() { + m.id = 0 + m.persistedInfo = nil +} func (m *storageManager) dbForStorage() dbAccessor { return func(ctx context.Context, readOnly bool, fn func(keyvalue.ReadWriter) error) error { diff --git a/internal/gitaly/storage/raft/testhelper_test.go b/internal/gitaly/storage/raft/testhelper_test.go index 36b559a7c95a0270bb5b14bd6db50e10f41635d1..455d2bb5d529d438dd6daaa39eae98257fdf42b0 100644 --- a/internal/gitaly/storage/raft/testhelper_test.go +++ b/internal/gitaly/storage/raft/testhelper_test.go @@ -218,14 +218,15 @@ func (c *testRaftCluster) createRaftConfig(node raftID) config.Raft { initialMembers[fmt.Sprintf("%d", node)] = addr } return config.Raft{ - Enabled: true, - ClusterID: c.clusterID, - NodeID: node.ToUint64(), - RaftAddr: c.initialMembers[node.ToUint64()], - InitialMembers: initialMembers, - RTTMilliseconds: config.RaftDefaultRTT, - ElectionTicks: config.RaftDefaultElectionTicks, - HeartbeatTicks: config.RaftDefaultHeartbeatTicks, + Enabled: true, + ClusterID: c.clusterID, + NodeID: node.ToUint64(), + RaftAddr: c.initialMembers[node.ToUint64()], + InitialMembers: initialMembers, + ReplicationFactor: 3, + RTTMilliseconds: config.RaftDefaultRTT, + ElectionTicks: config.RaftDefaultElectionTicks, + HeartbeatTicks: config.RaftDefaultHeartbeatTicks, } } diff --git a/internal/gitaly/storage/storagemgr/apply_operations_test.go b/internal/gitaly/storage/storagemgr/apply_operations_test.go index 159f2bcfba62f75b211b499ebf86b4db7b514d28..20661864dc4993d449ada3e2a34b6947683cab85 100644 --- a/internal/gitaly/storage/storagemgr/apply_operations_test.go +++ b/internal/gitaly/storage/storagemgr/apply_operations_test.go @@ -35,7 +35,7 @@ func TestApplyOperations(t *testing.T) { "parent": {Mode: fs.ModeDir | perm.PrivateDir}, "parent/relative-path": {Mode: fs.ModeDir | perm.PrivateDir}, "parent/relative-path/private-file": {Mode: perm.PrivateWriteOnceFile, Data: []byte("private")}, - "parent/relative-path/shared-file": {Mode: perm.SharedFile, Data: []byte("shared")}, + "parent/relative-path/shared-file": {Mode: perm.PrivateWriteOnceFile, Data: []byte("shared")}, "parent/relative-path/empty-dir": {Mode: fs.ModeDir | perm.PrivateDir}, "parent/relative-path/removed-dir": {Mode: fs.ModeDir | perm.PrivateDir}, "parent/relative-path/dir-with-removed-file": {Mode: fs.ModeDir | perm.PrivateDir}, diff --git a/internal/gitaly/storage/storagemgr/testhelper_test.go b/internal/gitaly/storage/storagemgr/testhelper_test.go index f3615cabff0ed14e1a36de022c6e71f24747194f..3c44c203f3bafac4d8a73ffbe25a854e4bae16da 100644 --- a/internal/gitaly/storage/storagemgr/testhelper_test.go +++ b/internal/gitaly/storage/storagemgr/testhelper_test.go @@ -718,7 +718,7 @@ type Commit struct { // SkipVerificationFailures sets the verification failure handling for this commit. SkipVerificationFailures bool // ReferenceUpdates are the reference updates to commit. - ReferenceUpdates ReferenceUpdates + ReferenceUpdates git.ReferenceUpdates // QuarantinedPacks are the packs to include in the quarantine directory of the transaction. QuarantinedPacks [][]byte // DefaultBranchUpdate is the default branch update to commit. @@ -748,7 +748,7 @@ type UpdateReferences struct { // TransactionID identifies the transaction to update references on. TransactionID int // ReferenceUpdates are the reference updates to make. - ReferenceUpdates ReferenceUpdates + ReferenceUpdates git.ReferenceUpdates } // SetKey calls SetKey on a transaction. @@ -908,7 +908,7 @@ type transactionTestCase struct { expectedState StateAssertion } -func performReferenceUpdates(t *testing.T, ctx context.Context, tx *Transaction, rewrittenRepo git.RepositoryExecutor, updates ReferenceUpdates) { +func performReferenceUpdates(t *testing.T, ctx context.Context, tx *Transaction, rewrittenRepo git.RepositoryExecutor, updates git.ReferenceUpdates) { tx.UpdateReferences(updates) updater, err := updateref.New(ctx, rewrittenRepo) @@ -1453,7 +1453,7 @@ func checkManagerError(t *testing.T, ctx context.Context, managerErrChannel chan t.Helper() testTransaction := &Transaction{ - referenceUpdates: []ReferenceUpdates{{"sentinel": {}}}, + referenceUpdates: []git.ReferenceUpdates{{"sentinel": {}}}, result: make(chan error, 1), finish: func() error { return nil }, } diff --git a/internal/gitaly/storage/storagemgr/transaction_manager.go b/internal/gitaly/storage/storagemgr/transaction_manager.go index feb004314cab243294f5d9ac8c17c34beb82bb6e..6e386871d2faf900cba95f5f3249230b0f51f8c8 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager.go @@ -135,19 +135,6 @@ func (err ReferenceVerificationError) Error() string { return fmt.Sprintf("expected %q to point to %q but it pointed to %q", err.ReferenceName, err.ExpectedOldOID, err.ActualOldOID) } -// ReferenceUpdate describes the state of a reference's old and new tip in an update. -type ReferenceUpdate struct { - // OldOID is the old OID the reference is expected to point to prior to updating it. - // If the reference does not point to the old value, the reference verification fails. - OldOID git.ObjectID - // NewOID is the new desired OID to point the reference to. - NewOID git.ObjectID - // OldTarget is the expected target for a symbolic reference. - OldTarget git.ReferenceName - // NewTarget stores the desired target for a symbolic reference. - NewTarget git.ReferenceName -} - // repositoryCreation models a repository creation in a transaction. type repositoryCreation struct { // objectHash defines the object format the repository is created with. @@ -193,10 +180,6 @@ type writeCommitGraphs struct { config housekeepingcfg.WriteCommitGraphConfig } -// ReferenceUpdates contains references to update. Reference name is used as the key and the value -// is the expected old tip and the desired new tip. -type ReferenceUpdates map[git.ReferenceName]ReferenceUpdate - type transactionState int const ( @@ -291,7 +274,7 @@ type Transaction struct { walEntry *wal.Entry skipVerificationFailures bool initialReferenceValues map[git.ReferenceName]git.Reference - referenceUpdates []ReferenceUpdates + referenceUpdates []git.ReferenceUpdates defaultBranchUpdated bool customHooksUpdated bool repositoryCreation *repositoryCreation @@ -677,8 +660,8 @@ func (txn *Transaction) RecordInitialReferenceValues(ctx context.Context, initia // committed as 'oid-1 -> oid-3'. The old OIDs of the intermediate states are not verified when // committing the write to the actual repository and are discarded from the final committed log // entry. -func (txn *Transaction) UpdateReferences(updates ReferenceUpdates) { - u := ReferenceUpdates{} +func (txn *Transaction) UpdateReferences(updates git.ReferenceUpdates) { + u := git.ReferenceUpdates{} for reference, update := range updates { oldOID := update.OldOID @@ -704,7 +687,7 @@ func (txn *Transaction) UpdateReferences(updates ReferenceUpdates) { } } - u[reference] = ReferenceUpdate{ + u[reference] = git.ReferenceUpdate{ OldOID: oldOID, NewOID: update.NewOID, OldTarget: oldTarget, @@ -717,14 +700,14 @@ func (txn *Transaction) UpdateReferences(updates ReferenceUpdates) { } // flattenReferenceTransactions flattens the recorded reference transactions by dropping -// all intermediate states. The returned ReferenceUpdates contains the reference changes +// all intermediate states. The returned git.ReferenceUpdates contains the reference changes // with the OldOID set to the reference's value at the beginning of the transaction, and the // NewOID set to the reference's final value after all of the changes. -func (txn *Transaction) flattenReferenceTransactions() ReferenceUpdates { - flattenedUpdates := ReferenceUpdates{} +func (txn *Transaction) flattenReferenceTransactions() git.ReferenceUpdates { + flattenedUpdates := git.ReferenceUpdates{} for _, updates := range txn.referenceUpdates { for reference, update := range updates { - u := ReferenceUpdate{ + u := git.ReferenceUpdate{ OldOID: update.OldOID, NewOID: update.NewOID, OldTarget: update.OldTarget, @@ -1367,19 +1350,19 @@ func (mgr *TransactionManager) stageRepositoryCreation(ctx context.Context, tran return fmt.Errorf("get references: %w", err) } - referenceUpdates := make(ReferenceUpdates, len(references)) + referenceUpdates := make(git.ReferenceUpdates, len(references)) for _, ref := range references { if ref.IsSymbolic { return fmt.Errorf("unexpected symbolic ref: %v", ref) } - referenceUpdates[ref.Name] = ReferenceUpdate{ + referenceUpdates[ref.Name] = git.ReferenceUpdate{ OldOID: objectHash.ZeroOID, NewOID: git.ObjectID(ref.Target), } } - transaction.referenceUpdates = []ReferenceUpdates{referenceUpdates} + transaction.referenceUpdates = []git.ReferenceUpdates{referenceUpdates} var customHooks bytes.Buffer if err := repoutil.GetCustomHooks(ctx, mgr.logger, diff --git a/internal/gitaly/storage/storagemgr/transaction_manager_alternate_test.go b/internal/gitaly/storage/storagemgr/transaction_manager_alternate_test.go index 01abfc474a594463a6615f089fe2587e282c299b..d3fa3505b218738dc4205f7b32eb776f4f4fb238 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager_alternate_test.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager_alternate_test.go @@ -398,7 +398,7 @@ func generateAlternateTests(t *testing.T, setup testTransactionSetup) []transact }, Commit{ TransactionID: 5, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, }, @@ -1134,7 +1134,7 @@ func generateAlternateTests(t *testing.T, setup testTransactionSetup) []transact }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1211,7 +1211,7 @@ func generateAlternateTests(t *testing.T, setup testTransactionSetup) []transact }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.Second.Pack}, diff --git a/internal/gitaly/storage/storagemgr/transaction_manager_consumer_test.go b/internal/gitaly/storage/storagemgr/transaction_manager_consumer_test.go index e50748178b3a8d7dd477c49eb7fd0ed900ee6bdb..57aa4bac82abc537f9d37161d9b7f3c6f9834a79 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager_consumer_test.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager_consumer_test.go @@ -32,7 +32,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -75,7 +75,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -118,7 +118,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -129,7 +129,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -172,7 +172,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -228,7 +228,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -242,7 +242,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/other": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, }, @@ -261,7 +261,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/third": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Third.OID}, }, }, @@ -314,7 +314,7 @@ func generateConsumerTests(t *testing.T, setup testTransactionSetup) []transacti }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, diff --git a/internal/gitaly/storage/storagemgr/transaction_manager_default_branch_test.go b/internal/gitaly/storage/storagemgr/transaction_manager_default_branch_test.go index bc9be0362eea10d6f6084803caf1f32b2ee9e14d..36d1e838cce8711544f0729959741b3d0f983322 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager_default_branch_test.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager_default_branch_test.go @@ -19,7 +19,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -63,7 +63,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -74,7 +74,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, @@ -108,7 +108,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran RelativePath: setup.RelativePath, }, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, DefaultBranchUpdate: &DefaultBranchUpdate{ @@ -142,7 +142,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/branch2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -154,7 +154,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch2": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, DefaultBranchUpdate: &DefaultBranchUpdate{ @@ -188,7 +188,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -200,7 +200,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, DefaultBranchUpdate: &DefaultBranchUpdate{ @@ -242,7 +242,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/branch2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -262,7 +262,7 @@ func generateDefaultBranchTests(t *testing.T, setup testTransactionSetup) []tran }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, diff --git a/internal/gitaly/storage/storagemgr/transaction_manager_hook_test.go b/internal/gitaly/storage/storagemgr/transaction_manager_hook_test.go index 337eed7c9e031772e3e6cf3854f3fe47ebdb0314..c486fd2c0e4171406ca0e5ef5de60831b866be12 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager_hook_test.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager_hook_test.go @@ -210,13 +210,13 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, ExpectedError: ReferenceVerificationError{ @@ -233,7 +233,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Third.OID}, }, }, @@ -264,7 +264,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -278,7 +278,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -313,13 +313,13 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, ExpectedError: ReferenceVerificationError{ @@ -338,7 +338,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -376,7 +376,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: ErrTransactionProcessingStopped, @@ -392,7 +392,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -430,7 +430,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: ErrTransactionProcessingStopped, @@ -446,7 +446,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -484,7 +484,7 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: ErrTransactionProcessingStopped, @@ -505,13 +505,13 @@ func generateCustomHooksTests(t *testing.T, setup testTransactionSetup) []transa }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Third.OID}, }, ExpectedError: ReferenceVerificationError{ diff --git a/internal/gitaly/storage/storagemgr/transaction_manager_housekeeping_test.go b/internal/gitaly/storage/storagemgr/transaction_manager_housekeeping_test.go index 5fd9951a6d979eab7f616d21d75b257d60191e3e..3f34523054b752088cb35742f12392a686ba826c 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager_housekeeping_test.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager_housekeeping_test.go @@ -78,7 +78,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -140,7 +140,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, "refs/heads/branch-3": {OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: setup.Commits.Diverging.OID}, }, @@ -191,7 +191,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/keep-around/1": {OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/merge-requests/1": {OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, "refs/very/deep/nested/ref": {OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: setup.Commits.Third.OID}, @@ -252,7 +252,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-3": {OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: setup.Commits.Diverging.OID}, "refs/keep-around/1": {OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -310,7 +310,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-3": {OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: setup.Commits.Diverging.OID}, "refs/keep-around/1": {OldOID: gittest.DefaultObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -361,7 +361,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, "refs/heads/branch-1": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, "refs/heads/branch-2": {OldOID: setup.Commits.Third.OID, NewOID: setup.Commits.Diverging.OID}, @@ -421,7 +421,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, "refs/heads/branch-1": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, "refs/heads/branch-2": {OldOID: setup.Commits.Third.OID, NewOID: setup.Commits.Diverging.OID}, @@ -474,7 +474,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.Commits.Second.OID, NewOID: gittest.DefaultObjectHash.ZeroOID}, "refs/tags/v1.0.0": {OldOID: lightweightTag, NewOID: gittest.DefaultObjectHash.ZeroOID}, }, @@ -530,7 +530,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -611,7 +611,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -630,7 +630,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.Commits.First.OID, NewOID: gittest.DefaultObjectHash.ZeroOID}, }, }, @@ -690,7 +690,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.Commits.Second.OID, NewOID: gittest.DefaultObjectHash.ZeroOID}, "refs/tags/v1.0.0": {OldOID: lightweightTag, NewOID: gittest.DefaultObjectHash.ZeroOID}, }, @@ -726,7 +726,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/empty-dir/parent/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -820,7 +820,7 @@ func generateHousekeepingPackRefsTests(t *testing.T, ctx context.Context, testPa }, RunPackRefs{}, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, ExpectedError: errHousekeepingConflictOtherUpdates, @@ -1700,7 +1700,7 @@ func generateHousekeepingRepackingStrategyTests(t *testing.T, ctx context.Contex }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Third.OID, NewOID: setup.Commits.Second.OID}, }, ExpectedError: errHousekeepingConflictOtherUpdates, @@ -1787,7 +1787,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -1854,7 +1854,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -1885,7 +1885,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, }, QuarantinedPacks: [][]byte{ @@ -1942,7 +1942,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -1970,7 +1970,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, }, QuarantinedPacks: [][]byte{ @@ -2029,7 +2029,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -2057,7 +2057,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.First.OID}, }, }, @@ -2106,7 +2106,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{ @@ -2131,7 +2131,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -2145,7 +2145,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, }, QuarantinedPacks: [][]byte{ @@ -2159,7 +2159,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 5, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Third.OID, NewOID: setup.Commits.Diverging.OID}, }, QuarantinedPacks: [][]byte{ @@ -2231,7 +2231,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, @@ -2247,7 +2247,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.Commits.Second.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -2269,7 +2269,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -2318,7 +2318,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, @@ -2334,7 +2334,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.Commits.Second.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -2356,7 +2356,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -2404,7 +2404,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, @@ -2420,7 +2420,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.Commits.Second.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -2442,7 +2442,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -2496,7 +2496,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, IncludeObjects: []git.ObjectID{setup.Commits.Diverging.OID}, @@ -2517,13 +2517,13 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, UpdateReferences{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Diverging.OID}, }, }, UpdateReferences{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-2": {OldOID: setup.Commits.Diverging.OID, NewOID: setup.Commits.First.OID}, }, }, @@ -2602,7 +2602,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -2776,7 +2776,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Third.OID}, }, QuarantinedPacks: [][]byte{ @@ -2791,7 +2791,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.Commits.Third.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -2934,7 +2934,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Third.OID}, }, QuarantinedPacks: [][]byte{ @@ -2949,7 +2949,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.Commits.Third.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -3060,7 +3060,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -3074,7 +3074,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, }, QuarantinedPacks: [][]byte{ @@ -3223,7 +3223,7 @@ func generateHousekeepingRepackingConcurrentTests(t *testing.T, ctx context.Cont }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, "refs/heads/branch-2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Third.OID}, }, @@ -3496,7 +3496,7 @@ func generateHousekeepingCommitGraphsTests(t *testing.T, ctx context.Context, se }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -3653,7 +3653,7 @@ func generateHousekeepingCommitGraphsTests(t *testing.T, ctx context.Context, se }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{ @@ -3700,7 +3700,7 @@ func generateHousekeepingCommitGraphsTests(t *testing.T, ctx context.Context, se }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Third.OID}, }, QuarantinedPacks: [][]byte{ diff --git a/internal/gitaly/storage/storagemgr/transaction_manager_refs_test.go b/internal/gitaly/storage/storagemgr/transaction_manager_refs_test.go index bfa379520d03cf45b4ddcee5d63f68dc3869f8db..9f43bd4ab603e0589a4a2d122bfd53eb9dd6273d 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager_refs_test.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager_refs_test.go @@ -27,7 +27,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t RelativePath: setup.RelativePath, }, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/non-\xE5-utf8-directory/non-\xE5-utf8-file": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -61,7 +61,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/non-\xE5-utf8-directory/non-\xE5-utf8-file": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -72,7 +72,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/non-\xE5-utf8-directory/non-\xE5-utf8-file": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -91,7 +91,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t RelativePath: setup.RelativePath, }, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -120,7 +120,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t RelativePath: setup.RelativePath, }, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "not-in-refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: InvalidReferenceFormatError{ @@ -143,13 +143,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/parent": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/parent/child": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: updateref.FileDirectoryConflictError{ @@ -187,14 +187,14 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/parent": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 1, SkipVerificationFailures: true, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/parent/child": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -233,14 +233,14 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/parent/child": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, Commit{ // This is a no-op and thus is dropped. TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/parent": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -270,7 +270,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/parent": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -281,13 +281,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, UpdateReferences{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/parent": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/parent/child": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -315,7 +315,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t RelativePath: setup.RelativePath, }, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/tags/v1.0.0": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.ObjectHash.EmptyTreeOID}, }, }, @@ -370,7 +370,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: localrepo.InvalidObjectError(setup.Commits.First.OID), @@ -397,7 +397,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, }, @@ -413,14 +413,14 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 2, SkipVerificationFailures: true, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, "refs/heads/non-conflicting": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, @@ -457,13 +457,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, "refs/heads/non-conflicting": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, @@ -505,13 +505,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: ReferenceVerificationError{ @@ -548,7 +548,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -559,7 +559,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -590,7 +590,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, "refs/heads/non-conflicting": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -607,14 +607,14 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 2, SkipVerificationFailures: true, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, "refs/heads/non-conflicting": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Third.OID}, }, @@ -647,7 +647,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, "refs/heads/non-conflicting": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -664,13 +664,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.Third.OID}, "refs/heads/non-conflicting": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Third.OID}, }, @@ -709,7 +709,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -725,13 +725,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, ExpectedError: ReferenceVerificationError{ @@ -758,7 +758,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -769,7 +769,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.First.OID}, }, }, @@ -800,7 +800,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -811,7 +811,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -832,7 +832,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/branch-2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -860,13 +860,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-2": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -896,7 +896,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/subdir/branch-2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -913,13 +913,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/subdir/branch-2": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -945,7 +945,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-packed": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/sentinel-packed": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -968,7 +968,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-loose": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/sentinel-loose": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -985,13 +985,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-packed": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, Commit{ TransactionID: 5, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-loose": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -1030,7 +1030,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t RelativePath: setup.RelativePath, }, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/symbolic": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -1058,7 +1058,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1069,7 +1069,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/symbolic": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -1106,7 +1106,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1117,7 +1117,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/symbolic": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -1150,7 +1150,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, "refs/heads/non-conflicting": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -1167,14 +1167,14 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 2, SkipVerificationFailures: true, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.ObjectHash.ZeroOID}, "refs/heads/non-conflicting": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, @@ -1206,7 +1206,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, "refs/heads/non-conflicting": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, @@ -1223,13 +1223,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.Commits.First.OID}, }, }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.Second.OID, NewOID: setup.ObjectHash.ZeroOID}, "refs/heads/non-conflicting": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, @@ -1268,7 +1268,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1284,13 +1284,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, ExpectedError: ReferenceVerificationError{ @@ -1315,7 +1315,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t RelativePath: setup.RelativePath, }, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -1334,12 +1334,12 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t RelativePath: setup.RelativePath, }, UpdateReferences{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, UpdateReferences{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Second.OID}, }, }, @@ -1374,19 +1374,19 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, UpdateReferences{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, }, UpdateReferences{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ // The old oid should be ignored since there's already a recorded initial value for the // reference. "refs/heads/main": {NewOID: setup.Commits.Third.OID}, @@ -1451,7 +1451,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, UpdateReferences{ // The old oid is ignored as the references old value was already recorded. - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {NewOID: setup.Commits.First.OID}, }, }, @@ -1482,7 +1482,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1500,7 +1500,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t UpdateReferences{ TransactionID: 2, // The old oid is ignored as the references old value was already recorded. - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {NewOID: setup.Commits.Second.OID}, }, }, @@ -1533,7 +1533,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1551,7 +1551,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t UpdateReferences{ TransactionID: 2, // The old oid is ignored as the references old value was already recorded. - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {NewOID: setup.Commits.Second.OID}, }, }, @@ -1584,7 +1584,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1602,7 +1602,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t UpdateReferences{ TransactionID: 2, // The old oid is ignored as the references old value was already recorded. - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {NewOID: setup.Commits.Second.OID}, }, }, @@ -1639,7 +1639,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t RelativePath: setup.RelativePath, }, UpdateReferences{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1675,7 +1675,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1697,7 +1697,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.First.OID}, }, }, @@ -1729,7 +1729,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1740,7 +1740,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.First.OID}, }, }, @@ -1771,7 +1771,7 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/remotes/upstream/deleted-branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1798,13 +1798,13 @@ func generateModifyReferencesTests(t *testing.T, setup testTransactionSetup) []t // Delete the branch in the same transaction as we create another one in the `refs/remotes` // directory. The reference deletion creates the `refs/remotes` directory that was removed // by the repacking task. - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/remotes/upstream/deleted-branch": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, UpdateReferences{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/remotes/upstream/created-branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, diff --git a/internal/gitaly/storage/storagemgr/transaction_manager_repo_test.go b/internal/gitaly/storage/storagemgr/transaction_manager_repo_test.go index 74fb0b5d2a7e912dcc7de9dcb6d354f2bf361488..8095a4b962b651fc1ef6e8be7624d7cf0432e06f 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager_repo_test.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager_repo_test.go @@ -510,7 +510,7 @@ func generateDeleteRepositoryTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, DeleteRepository: true, @@ -611,7 +611,7 @@ func generateDeleteRepositoryTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: ErrRepositoryNotFound, @@ -762,7 +762,7 @@ func generateDeleteRepositoryTests(t *testing.T, setup testTransactionSetup) []t DefaultBranchUpdate: &DefaultBranchUpdate{ Reference: "refs/heads/branch", }, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, CustomHooksUpdate: &CustomHooksUpdate{ @@ -853,7 +853,7 @@ func generateDeleteRepositoryTests(t *testing.T, setup testTransactionSetup) []t DefaultBranchUpdate: &DefaultBranchUpdate{ Reference: "refs/heads/new-head", }, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.First.Pack}, @@ -948,7 +948,7 @@ func generateDeleteRepositoryTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.First.Pack}, diff --git a/internal/gitaly/storage/storagemgr/transaction_manager_test.go b/internal/gitaly/storage/storagemgr/transaction_manager_test.go index fe9495d7ae91906926f67f2b83a435a2cc0ac884..b8f2f6e23c512b54382c9e15cc15d6feb93af198 100644 --- a/internal/gitaly/storage/storagemgr/transaction_manager_test.go +++ b/internal/gitaly/storage/storagemgr/transaction_manager_test.go @@ -310,7 +310,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ Context: ctx, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: context.Canceled, @@ -369,7 +369,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio RelativePath: setup.RelativePath, }, Commit{ - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, ExpectedError: ErrTransactionProcessingStopped, @@ -454,7 +454,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, CustomHooksUpdate: &CustomHooksUpdate{ @@ -515,7 +515,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, ExpectedError: ReferenceVerificationError{ @@ -540,7 +540,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 4, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Third.OID}, }, CustomHooksUpdate: &CustomHooksUpdate{}, @@ -584,7 +584,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.First.Pack}, @@ -628,7 +628,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.First.Pack}, @@ -680,7 +680,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.Second.Pack}, @@ -728,7 +728,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/existing": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, "refs/heads/new": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, @@ -768,7 +768,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.First.Pack}, @@ -780,7 +780,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.Commits.Third.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.Third.Pack}, @@ -932,7 +932,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.First.Pack}, @@ -944,7 +944,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, QuarantinedPacks: [][]byte{setup.Commits.Second.Pack}, @@ -975,7 +975,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.First.Pack}, @@ -992,7 +992,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.Commits.First.OID, NewOID: setup.ObjectHash.ZeroOID}, }, }, @@ -1000,7 +1000,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio Prune{}, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/dependant": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.Second.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.Second.Pack}, @@ -1175,7 +1175,7 @@ func generateCommonTests(t *testing.T, ctx context.Context, setup testTransactio DefaultBranchUpdate: &DefaultBranchUpdate{ Reference: "refs/heads/new-head", }, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, QuarantinedPacks: [][]byte{setup.Commits.First.Pack}, @@ -1317,7 +1317,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }), Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1339,7 +1339,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }), Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1374,7 +1374,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1398,7 +1398,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }), Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1434,7 +1434,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }), Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1522,7 +1522,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1538,7 +1538,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1549,7 +1549,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1613,7 +1613,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 1, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/main": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1629,7 +1629,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 2, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-1": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1640,7 +1640,7 @@ func generateCommittedEntriesTests(t *testing.T, setup testTransactionSetup) []t }, Commit{ TransactionID: 3, - ReferenceUpdates: ReferenceUpdates{ + ReferenceUpdates: git.ReferenceUpdates{ "refs/heads/branch-2": {OldOID: setup.ObjectHash.ZeroOID, NewOID: setup.Commits.First.OID}, }, }, @@ -1781,11 +1781,11 @@ func BenchmarkTransactionManager(b *testing.B) { commit2 git.ObjectID ) - // getReferenceUpdates builds a ReferenceUpdates with unique branches for the updater. - getReferenceUpdates := func(updaterID int, old, new git.ObjectID) ReferenceUpdates { - referenceUpdates := make(ReferenceUpdates, tc.transactionSize) + // getReferenceUpdates builds a git.ReferenceUpdates with unique branches for the updater. + getReferenceUpdates := func(updaterID int, old, new git.ObjectID) git.ReferenceUpdates { + referenceUpdates := make(git.ReferenceUpdates, tc.transactionSize) for i := 0; i < tc.transactionSize; i++ { - referenceUpdates[git.ReferenceName(fmt.Sprintf("refs/heads/updater-%d-branch-%d", updaterID, i))] = ReferenceUpdate{ + referenceUpdates[git.ReferenceName(fmt.Sprintf("refs/heads/updater-%d-branch-%d", updaterID, i))] = git.ReferenceUpdate{ OldOID: old, NewOID: new, } diff --git a/internal/gitaly/storage/wal/entry_test.go b/internal/gitaly/storage/wal/entry_test.go index 9bb5b0e97ebd77543332092277207994e0778d52..5e0e7b31d7ce5db49877dbed5219f76a5bee2056 100644 --- a/internal/gitaly/storage/wal/entry_test.go +++ b/internal/gitaly/storage/wal/entry_test.go @@ -24,7 +24,7 @@ func setupTestDirectory(t *testing.T, path string) { require.NoError(t, os.WriteFile(filepath.Join(path, "file-1"), []byte("file-1"), perm.PrivateExecutable)) privateSubDir := filepath.Join(filepath.Join(path, "subdir-private")) require.NoError(t, os.Mkdir(privateSubDir, perm.PrivateDir)) - require.NoError(t, os.WriteFile(filepath.Join(privateSubDir, "file-2"), []byte("file-2"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(privateSubDir, "file-2"), []byte("file-2"), perm.PrivateWriteOnceFile)) sharedSubDir := filepath.Join(path, "subdir-shared") require.NoError(t, os.Mkdir(sharedSubDir, perm.PrivateDir)) require.NoError(t, os.WriteFile(filepath.Join(sharedSubDir, "file-3"), []byte("file-3"), perm.PrivateWriteOnceFile)) @@ -268,12 +268,12 @@ func TestRecordAlternateUnlink(t *testing.T) { "objects/info": {Mode: fs.ModeDir | perm.PrivateDir}, "objects/3f": {Mode: fs.ModeDir | perm.PrivateDir}, "objects/3f/1": {Mode: perm.PrivateWriteOnceFile}, - "objects/3f/2": {Mode: perm.SharedFile}, + "objects/3f/2": {Mode: perm.PrivateWriteOnceFile}, "objects/4f": {Mode: fs.ModeDir | perm.PrivateDir}, - "objects/4f/3": {Mode: perm.SharedFile}, + "objects/4f/3": {Mode: perm.PrivateWriteOnceFile}, "objects/pack": {Mode: fs.ModeDir | perm.PrivateDir}, "objects/pack/pack.pack": {Mode: perm.PrivateWriteOnceFile}, - "objects/pack/pack.idx": {Mode: perm.SharedFile}, + "objects/pack/pack.idx": {Mode: perm.PrivateWriteOnceFile}, }) } @@ -311,9 +311,9 @@ func TestRecordAlternateUnlink(t *testing.T) { "objects/3f": {Mode: fs.ModeDir | perm.PrivateDir}, "objects/3f/1": {Mode: perm.PrivateWriteOnceFile}, "objects/4f": {Mode: fs.ModeDir | perm.PrivateDir}, - "objects/4f/3": {Mode: perm.SharedFile}, + "objects/4f/3": {Mode: perm.PrivateWriteOnceFile}, "objects/pack": {Mode: fs.ModeDir | perm.PrivateDir}, - "objects/pack/pack.idx": {Mode: perm.SharedFile}, + "objects/pack/pack.idx": {Mode: perm.PrivateWriteOnceFile}, }) }, expectedOperations: func() operations { diff --git a/internal/gitlab/test_server.go b/internal/gitlab/test_server.go index b0a2ec5ee45e16e9986f0f9cb81347c1345e6a45..f9fa31efae6e3791b380a580d6190c66dc821c64 100644 --- a/internal/gitlab/test_server.go +++ b/internal/gitlab/test_server.go @@ -28,7 +28,7 @@ func WriteShellSecretFile(tb testing.TB, dir, secretToken string) string { require.NoError(tb, os.MkdirAll(dir, perm.PrivateDir)) filePath := filepath.Join(dir, ".gitlab_shell_secret") - require.NoError(tb, os.WriteFile(filePath, []byte(secretToken), perm.SharedFile)) + require.NoError(tb, os.WriteFile(filePath, []byte(secretToken), perm.PrivateWriteOnceFile)) return filePath } diff --git a/internal/safe/locking_directory_test.go b/internal/safe/locking_directory_test.go index 717c1e498bd2b5bb5639c7c8a50f9ce2a123f698..054684c601926af72f1c7702e8975399fdb61aeb 100644 --- a/internal/safe/locking_directory_test.go +++ b/internal/safe/locking_directory_test.go @@ -29,7 +29,7 @@ func TestLockingDirectory(t *testing.T) { require.NoError(t, os.WriteFile( filepath.Join(path, "somefile"), []byte("data"), - perm.SharedFile), + perm.PrivateWriteOnceFile), ) assert.ErrorIs(t, secondLockingDir.Lock(), safe.ErrFileAlreadyLocked) require.NoError(t, lockingDir.Unlock()) diff --git a/internal/safe/locking_file_writer_test.go b/internal/safe/locking_file_writer_test.go index 2302aca4e520b1458d3874453866b56dd05564fa..627742c9893aa6ac8ea428b38637222f3ee1fff9 100644 --- a/internal/safe/locking_file_writer_test.go +++ b/internal/safe/locking_file_writer_test.go @@ -148,7 +148,7 @@ func TestLockingFileWriter_seedingWithExistingTarget(t *testing.T) { t.Parallel() target := filepath.Join(testhelper.TempDir(t), "file") - require.NoError(t, os.WriteFile(target, []byte("seed"), perm.SharedFile)) + require.NoError(t, os.WriteFile(target, []byte("seed"), perm.PrivateWriteOnceFile)) writer, err := safe.NewLockingFileWriter(target, safe.LockingFileWriterConfig{ SeedContents: true, @@ -166,7 +166,7 @@ func TestLockingFileWriter_modifiesExistingFiles(t *testing.T) { t.Parallel() target := filepath.Join(testhelper.TempDir(t), "file") - require.NoError(t, os.WriteFile(target, []byte("preexisting"), perm.SharedFile)) + require.NoError(t, os.WriteFile(target, []byte("preexisting"), perm.PrivateWriteOnceFile)) writer, err := safe.NewLockingFileWriter(target) require.NoError(t, err) @@ -182,7 +182,7 @@ func TestLockingFileWriter_modifiesExistingFilesWithMode(t *testing.T) { t.Parallel() target := filepath.Join(testhelper.TempDir(t), "file") - require.NoError(t, os.WriteFile(target, []byte("preexisting"), perm.SharedFile)) + require.NoError(t, os.WriteFile(target, []byte("preexisting"), perm.PrivateWriteOnceFile)) writer, err := safe.NewLockingFileWriter(target, safe.LockingFileWriterConfig{ FileWriterConfig: safe.FileWriterConfig{FileMode: 0o060}, @@ -205,7 +205,7 @@ func TestLockingFileWriter_concurrentCreation(t *testing.T) { require.NoError(t, err) // Create file concurrently. - require.NoError(t, os.WriteFile(target, []byte("concurrent"), perm.SharedFile)) + require.NoError(t, os.WriteFile(target, []byte("concurrent"), perm.PrivateWriteOnceFile)) require.Equal(t, fmt.Errorf("file concurrently created"), writer.Lock()) @@ -217,7 +217,7 @@ func TestLockingFileWriter_concurrentDeletion(t *testing.T) { target := filepath.Join(testhelper.TempDir(t), "file") - require.NoError(t, os.WriteFile(target, []byte("base"), perm.SharedFile)) + require.NoError(t, os.WriteFile(target, []byte("base"), perm.PrivateWriteOnceFile)) writer, err := safe.NewLockingFileWriter(target) require.NoError(t, err) @@ -234,12 +234,13 @@ func TestLockingFileWriter_concurrentModification(t *testing.T) { target := filepath.Join(testhelper.TempDir(t), "file") - require.NoError(t, os.WriteFile(target, []byte("base"), perm.SharedFile)) + require.NoError(t, os.WriteFile(target, []byte("base"), perm.PrivateWriteOnceFile)) writer, err := safe.NewLockingFileWriter(target) require.NoError(t, err) // Concurrently modify the file. - require.NoError(t, os.WriteFile(target, []byte("concurrent"), perm.SharedFile)) + require.NoError(t, os.Remove(target)) + require.NoError(t, os.WriteFile(target, []byte("concurrent"), perm.PrivateWriteOnceFile)) require.Equal(t, fmt.Errorf("file concurrently modified"), writer.Lock()) @@ -272,13 +273,13 @@ func TestLockingFileWriter_locked(t *testing.T) { t.Parallel() target := filepath.Join(testhelper.TempDir(t), "file") - require.NoError(t, os.WriteFile(target, []byte("base"), perm.SharedFile)) + require.NoError(t, os.WriteFile(target, []byte("base"), perm.PrivateWriteOnceFile)) writer, err := safe.NewLockingFileWriter(target) require.NoError(t, err) // Concurrently lock the file. - require.NoError(t, os.WriteFile(target+".lock", nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(target+".lock", nil, perm.PrivateWriteOnceFile)) require.Equal(t, safe.ErrFileAlreadyLocked, writer.Lock()) @@ -291,7 +292,7 @@ func TestLockingFileWriter_externalProcess(t *testing.T) { cfg := testcfg.Build(t) target := filepath.Join(testhelper.TempDir(t), "file") - require.NoError(t, os.WriteFile(target, []byte("base"), perm.SharedFile)) + require.NoError(t, os.WriteFile(target, []byte("base"), perm.PrivateWriteOnceFile)) writer, err := safe.NewLockingFileWriter(target) require.NoError(t, err) diff --git a/internal/streamcache/cache_test.go b/internal/streamcache/cache_test.go index 421f6cc38f32449a0df9725fabc26c612c1a2e38..6fabb71d4fdf3338bf8c948740eaf000cb6985fd 100644 --- a/internal/streamcache/cache_test.go +++ b/internal/streamcache/cache_test.go @@ -358,7 +358,7 @@ func TestCache_unWriteableFile(t *testing.T) { c := newCache(t, tmp) innerCache(c).createFile = func() (namedWriteCloser, error) { - return os.OpenFile(filepath.Join(tmp, "unwriteable"), os.O_RDONLY|os.O_CREATE|os.O_EXCL, perm.SharedFile) + return os.OpenFile(filepath.Join(tmp, "unwriteable"), os.O_RDONLY|os.O_CREATE|os.O_EXCL, perm.PrivateWriteOnceFile) } _, _, err := c.Fetch(ctx, "key", io.Discard, func(w io.Writer) error { @@ -379,7 +379,7 @@ func TestCache_unCloseableFile(t *testing.T) { c := newCache(t, tmp) innerCache(c).createFile = func() (namedWriteCloser, error) { - f, err := os.OpenFile(filepath.Join(tmp, "uncloseable"), os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm.SharedFile) + f, err := os.OpenFile(filepath.Join(tmp, "uncloseable"), os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm.PrivateWriteOnceFile) if err != nil { return nil, err } @@ -401,7 +401,7 @@ func TestCache_cannotOpenFileForReading(t *testing.T) { c := newCache(t, tmp) innerCache(c).createFile = func() (namedWriteCloser, error) { - f, err := os.OpenFile(filepath.Join(tmp, "unopenable"), os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm.SharedFile) + f, err := os.OpenFile(filepath.Join(tmp, "unopenable"), os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm.PrivateWriteOnceFile) if err != nil { return nil, err } diff --git a/internal/streamcache/filestore.go b/internal/streamcache/filestore.go index 4d01e377e6f806ce017a6e6e0a730b9462bc5667..8ce3fb62b861960b045dcd6ff7f85e3cce353fdf 100644 --- a/internal/streamcache/filestore.go +++ b/internal/streamcache/filestore.go @@ -110,7 +110,7 @@ func (fs *filestore) Create() (namedWriteCloser, error) { return nil, fmt.Errorf("Create: mkdir: %w", err) } - f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm.SharedFile) + f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_EXCL, perm.PrivateWriteOnceFile) if err != nil { return nil, fmt.Errorf("Create: %w", err) } diff --git a/internal/streamcache/filestore_test.go b/internal/streamcache/filestore_test.go index f722cc7edb73a36aae6746450e25f439c31a2243..16404d4c7eb51495e15a27b4b2d6d5c3623a5959 100644 --- a/internal/streamcache/filestore_test.go +++ b/internal/streamcache/filestore_test.go @@ -109,7 +109,7 @@ func TestFilestoreCleanwalk(t *testing.T) { file := filepath.Join(dir2, "file") require.NoError(t, os.Mkdir(dir1, perm.PrivateDir)) require.NoError(t, os.Mkdir(dir2, perm.PrivateDir)) - require.NoError(t, os.WriteFile(file, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(file, nil, perm.PrivateWriteOnceFile)) require.NoError(t, os.Chmod(dir2, 0), "create dir with pathological permissions") require.NoError(t, fs.cleanWalk(time.Now().Add(time.Hour))) diff --git a/internal/tempdir/clean_test.go b/internal/tempdir/clean_test.go index 75ce5a814530bf8ba9cb87d9254ab18d9e93bb9b..587efc76c32151e6c02ed9a6e8a1c83d34676340 100644 --- a/internal/tempdir/clean_test.go +++ b/internal/tempdir/clean_test.go @@ -159,7 +159,7 @@ func makeFile(t *testing.T, locator storage.Locator, storage config.Storage, fil require.NoError(t, err) fullPath := filepath.Join(root, filePath) - require.NoError(t, os.WriteFile(fullPath, nil, perm.SharedFile)) + require.NoError(t, os.WriteFile(fullPath, nil, perm.PrivateWriteOnceFile)) require.NoError(t, os.Chtimes(fullPath, mtime, mtime)) } diff --git a/internal/tempdir/tempdir_test.go b/internal/tempdir/tempdir_test.go index 391afb112883b819019461863ed0b5caecf579e4..1e09c1dd47782c08a9fb77f5a491fb97f4dda2e9 100644 --- a/internal/tempdir/tempdir_test.go +++ b/internal/tempdir/tempdir_test.go @@ -29,7 +29,7 @@ func TestNewRepositorySuccess(t *testing.T) { require.NoError(t, err) require.Equal(t, tempDir.Path(), calculatedPath) - require.NoError(t, os.WriteFile(filepath.Join(tempDir.Path(), "test"), []byte("hello"), perm.SharedFile)) + require.NoError(t, os.WriteFile(filepath.Join(tempDir.Path(), "test"), []byte("hello"), perm.PrivateWriteOnceFile)) require.DirExists(t, tempDir.Path()) diff --git a/internal/testhelper/testcfg/gitaly.go b/internal/testhelper/testcfg/gitaly.go index 135d9f3256afceafa9ce656a2562a3a5b651d87e..6f4bfc5984637131a9bd5a835c48e08f3f50aa97 100644 --- a/internal/testhelper/testcfg/gitaly.go +++ b/internal/testhelper/testcfg/gitaly.go @@ -171,7 +171,7 @@ func WriteTemporaryGitalyConfigFile(tb testing.TB, cfg config.Cfg) string { contents, err := toml.Marshal(cfg) require.NoError(tb, err) - require.NoError(tb, os.WriteFile(path, contents, perm.SharedFile)) + require.NoError(tb, os.WriteFile(path, contents, perm.PrivateWriteOnceFile)) return path } diff --git a/internal/testhelper/testserver/gitaly.go b/internal/testhelper/testserver/gitaly.go index d3dc6aca6671a1e50d42faed8b32937d07c3544c..56a9b1c069c708c019e23af5f188eb7bd845310f 100644 --- a/internal/testhelper/testserver/gitaly.go +++ b/internal/testhelper/testserver/gitaly.go @@ -290,6 +290,8 @@ type gitalyServerDeps struct { signingKey string transactionRegistry *storagemgr.TransactionRegistry procReceiveRegistry *hook.ProcReceiveRegistry + partitionManager *storagemgr.PartitionManager + inProgressTracker *service.InProgressTracker } func (gsd *gitalyServerDeps) createDependencies(tb testing.TB, cfg config.Cfg) *service.Dependencies { @@ -331,29 +333,36 @@ func (gsd *gitalyServerDeps) createDependencies(tb testing.TB, cfg config.Cfg) * gsd.procReceiveRegistry = hook.NewProcReceiveRegistry() } + if gsd.inProgressTracker == nil { + gsd.inProgressTracker = service.NewInProgressTracker() + } + var partitionManager *storagemgr.PartitionManager if testhelper.IsWALEnabled() { - dbMgr, err := keyvalue.NewDBManager( - cfg.Storages, - keyvalue.NewBadgerStore, - helper.NewNullTickerFactory(), - gsd.logger, - ) - require.NoError(tb, err) - tb.Cleanup(dbMgr.Close) - - partitionManager, err = storagemgr.NewPartitionManager( - testhelper.Context(tb), - cfg.Storages, - gsd.gitCmdFactory, - localrepo.NewFactory(gsd.logger, gsd.locator, gsd.gitCmdFactory, gsd.catfileCache), - gsd.logger, - dbMgr, - cfg.Prometheus, - nil, - ) - require.NoError(tb, err) - tb.Cleanup(partitionManager.Close) + if gsd.partitionManager == nil { + dbMgr, err := keyvalue.NewDBManager( + cfg.Storages, + keyvalue.NewBadgerStore, + helper.NewNullTickerFactory(), + gsd.logger, + ) + require.NoError(tb, err) + tb.Cleanup(dbMgr.Close) + + partitionManager, err = storagemgr.NewPartitionManager( + testhelper.Context(tb), + cfg.Storages, + gsd.gitCmdFactory, + localrepo.NewFactory(gsd.logger, gsd.locator, gsd.gitCmdFactory, gsd.catfileCache), + gsd.logger, + dbMgr, + cfg.Prometheus, + nil, + ) + require.NoError(tb, err) + tb.Cleanup(partitionManager.Close) + gsd.partitionManager = partitionManager + } } if gsd.hookMgr == nil { @@ -433,11 +442,12 @@ func (gsd *gitalyServerDeps) createDependencies(tb testing.TB, cfg config.Cfg) * UpdaterWithHooks: gsd.updaterWithHooks, HousekeepingManager: gsd.housekeepingManager, TransactionRegistry: gsd.transactionRegistry, - PartitionManager: partitionManager, + PartitionManager: gsd.partitionManager, BackupSink: gsd.backupSink, BackupLocator: gsd.backupLocator, BundleURISink: gsd.bundleURISink, ProcReceiveRegistry: gsd.procReceiveRegistry, + InProgressTracker: gsd.inProgressTracker, } } @@ -567,6 +577,14 @@ func WithBundleURISink(sink *bundleuri.Sink) GitalyServerOpt { } } +// WithInProgressTracker sets the bundleuri.Sink that will be used for Gitaly services +func WithInProgressTracker(tracker *service.InProgressTracker) GitalyServerOpt { + return func(deps gitalyServerDeps) gitalyServerDeps { + deps.inProgressTracker = tracker + return deps + } +} + // WithSigningKey sets the signing key path that will be used for Gitaly // services. func WithSigningKey(signingKey string) GitalyServerOpt { @@ -591,3 +609,11 @@ func WithProcReceiveRegistry(registry *hook.ProcReceiveRegistry) GitalyServerOpt return deps } } + +// WithPartitionManager sets the proc receive registry that will be used for Gitaly services. +func WithPartitionManager(partitionMgr *storagemgr.PartitionManager) GitalyServerOpt { + return func(deps gitalyServerDeps) gitalyServerDeps { + deps.partitionManager = partitionMgr + return deps + } +} diff --git a/proto/cluster.proto b/proto/cluster.proto index fa954a1f425074955bd4cda1f31253f505893790..0a820b48177de3e44e7cbf66480c7bb9de9be034 100644 --- a/proto/cluster.proto +++ b/proto/cluster.proto @@ -23,8 +23,12 @@ message Storage { uint64 storage_id = 1; // name is the human-readable name of the storage. string name = 2; + // replication_factor defines the number of nodes where data of this storage are replicated. + uint64 replication_factor = 3; + // node_id is the current residential node of the storage. + uint64 node_id = 4; // replica_groups is a list of identifiers for the replica groups associated with this storage. - repeated uint64 replica_groups = 3; + repeated uint64 replica_groups = 5; } // LeaderState represents the current leader state of a Raft group. @@ -67,6 +71,10 @@ message GetClusterResponse{ message RegisterStorageRequest { // storage_name is the human-readable name of the new storage. string storage_name = 1; + // node_id is the inital residential node of the storage. + uint64 node_id = 2; + // replication_factor contains the replication factor of this storage. + uint64 replication_factor = 3; } // RegisterStorageResponse is the response message for registering a new storage in a cluster. diff --git a/proto/go/gitalypb/cluster.pb.go b/proto/go/gitalypb/cluster.pb.go index b29434b7be834b32c3118a9e054d5262da6c468d..49352a0e502d559f5a274ce0f2c76904116f967b 100644 --- a/proto/go/gitalypb/cluster.pb.go +++ b/proto/go/gitalypb/cluster.pb.go @@ -100,8 +100,12 @@ type Storage struct { StorageId uint64 `protobuf:"varint,1,opt,name=storage_id,json=storageId,proto3" json:"storage_id,omitempty"` // name is the human-readable name of the storage. Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // replication_factor defines the number of nodes where data of this storage are replicated. + ReplicationFactor uint64 `protobuf:"varint,3,opt,name=replication_factor,json=replicationFactor,proto3" json:"replication_factor,omitempty"` + // node_id is the current residential node of the storage. + NodeId uint64 `protobuf:"varint,4,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` // replica_groups is a list of identifiers for the replica groups associated with this storage. - ReplicaGroups []uint64 `protobuf:"varint,3,rep,packed,name=replica_groups,json=replicaGroups,proto3" json:"replica_groups,omitempty"` + ReplicaGroups []uint64 `protobuf:"varint,5,rep,packed,name=replica_groups,json=replicaGroups,proto3" json:"replica_groups,omitempty"` } func (x *Storage) Reset() { @@ -150,6 +154,20 @@ func (x *Storage) GetName() string { return "" } +func (x *Storage) GetReplicationFactor() uint64 { + if x != nil { + return x.ReplicationFactor + } + return 0 +} + +func (x *Storage) GetNodeId() uint64 { + if x != nil { + return x.NodeId + } + return 0 +} + func (x *Storage) GetReplicaGroups() []uint64 { if x != nil { return x.ReplicaGroups @@ -429,6 +447,10 @@ type RegisterStorageRequest struct { // storage_name is the human-readable name of the new storage. StorageName string `protobuf:"bytes,1,opt,name=storage_name,json=storageName,proto3" json:"storage_name,omitempty"` + // node_id is the inital residential node of the storage. + NodeId uint64 `protobuf:"varint,2,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` + // replication_factor contains the replication factor of this storage. + ReplicationFactor uint64 `protobuf:"varint,3,opt,name=replication_factor,json=replicationFactor,proto3" json:"replication_factor,omitempty"` } func (x *RegisterStorageRequest) Reset() { @@ -470,6 +492,20 @@ func (x *RegisterStorageRequest) GetStorageName() string { return "" } +func (x *RegisterStorageRequest) GetNodeId() uint64 { + if x != nil { + return x.NodeId + } + return 0 +} + +func (x *RegisterStorageRequest) GetReplicationFactor() uint64 { + if x != nil { + return x.ReplicationFactor + } + return 0 +} + // RegisterStorageResponse is the response message for registering a new storage in a cluster. type RegisterStorageResponse struct { state protoimpl.MessageState @@ -537,46 +573,55 @@ var file_cluster_proto_rawDesc = []byte{ 0x01, 0x28, 0x04, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x22, 0x63, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, 0x1d, - 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x67, 0x72, 0x6f, - 0x75, 0x70, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x04, 0x52, 0x0d, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x22, 0x6f, 0x0a, 0x0b, 0x4c, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, - 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, - 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, - 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, - 0x65, 0x72, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x22, 0x38, 0x0a, 0x17, 0x42, 0x6f, 0x6f, - 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, - 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x49, 0x64, 0x22, 0x45, 0x0a, 0x18, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, - 0x29, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x13, 0x0a, 0x11, 0x47, 0x65, - 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x22, 0x3b, 0x0a, 0x16, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, - 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x44, 0x0a, - 0x17, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x67, 0x69, 0x74, 0x61, - 0x6c, 0x79, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x07, 0x73, 0x74, 0x6f, 0x72, - 0x61, 0x67, 0x65, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x6c, 0x61, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x67, 0x69, 0x74, 0x6c, 0x61, 0x62, 0x2d, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x69, 0x74, - 0x61, 0x6c, 0x79, 0x2f, 0x76, 0x31, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, - 0x2f, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x02, 0x38, 0x01, 0x22, 0xab, 0x01, 0x0a, 0x07, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x12, + 0x1d, 0x0a, 0x0a, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x09, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x49, 0x64, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x63, 0x74, 0x6f, + 0x72, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x04, 0x52, 0x0d, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x73, 0x22, 0x6f, 0x0a, 0x0b, 0x4c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x12, 0x19, 0x0a, 0x08, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x07, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6c, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, + 0x6c, 0x65, 0x61, 0x64, 0x65, 0x72, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x65, 0x72, 0x6d, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x04, 0x74, 0x65, 0x72, 0x6d, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x22, 0x38, 0x0a, 0x17, 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, + 0x0a, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x64, 0x22, 0x45, 0x0a, 0x18, + 0x42, 0x6f, 0x6f, 0x74, 0x73, 0x74, 0x72, 0x61, 0x70, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x67, 0x69, 0x74, 0x61, + 0x6c, 0x79, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x22, 0x13, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x3f, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x43, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, + 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x83, 0x01, 0x0a, 0x16, 0x52, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x74, 0x6f, 0x72, + 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, + 0x12, 0x2d, 0x0a, 0x12, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, + 0x66, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x11, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x22, + 0x44, 0x0a, 0x17, 0x52, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x53, 0x74, 0x6f, 0x72, 0x61, + 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x29, 0x0a, 0x07, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x67, 0x69, + 0x74, 0x61, 0x6c, 0x79, 0x2e, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x07, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x42, 0x34, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x6c, 0x61, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x69, 0x74, 0x6c, 0x61, 0x62, 0x2d, 0x6f, 0x72, 0x67, 0x2f, 0x67, + 0x69, 0x74, 0x61, 0x6c, 0x79, 0x2f, 0x76, 0x31, 0x36, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x67, 0x6f, 0x2f, 0x67, 0x69, 0x74, 0x61, 0x6c, 0x79, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, } var ( diff --git a/tools/dlv/go.mod b/tools/dlv/go.mod index f32d952b82d5f38b23d541d9b7fc5893b176b881..d5f5058259a3e6761b4cdd91b58371d418688a80 100644 --- a/tools/dlv/go.mod +++ b/tools/dlv/go.mod @@ -2,7 +2,7 @@ module gitlab.com/gitlab-org/gitaly/tools/dlv go 1.21 -require github.com/go-delve/delve v1.22.1 +require github.com/go-delve/delve v1.23.0 require ( github.com/cilium/ebpf v0.11.0 // indirect @@ -10,7 +10,7 @@ require ( github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/derekparker/trie v0.0.0-20230829180723-39f4de51ef7d // indirect github.com/go-delve/liner v1.2.3-0.20231231155935-4726ab1d7f62 // indirect - github.com/google/go-dap v0.11.0 // indirect + github.com/google/go-dap v0.12.0 // indirect github.com/hashicorp/golang-lru v1.0.2 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -24,7 +24,6 @@ require ( go.starlark.net v0.0.0-20231101134539-556fd59b42f6 // indirect golang.org/x/arch v0.6.0 // indirect golang.org/x/exp v0.0.0-20230224173230-c95f2b4c22f2 // indirect - golang.org/x/sys v0.13.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + golang.org/x/sys v0.17.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/tools/dlv/go.sum b/tools/dlv/go.sum index faecce1f02057188a8b93e9b02b38c3d1d6720bd..185eac4806729afa663e0afe88f150ae714b6385 100644 --- a/tools/dlv/go.sum +++ b/tools/dlv/go.sum @@ -13,14 +13,14 @@ github.com/derekparker/trie v0.0.0-20230829180723-39f4de51ef7d h1:hUWoLdw5kvo2xC github.com/derekparker/trie v0.0.0-20230829180723-39f4de51ef7d/go.mod h1:C7Es+DLenIpPc9J6IYw4jrK0h7S9bKj4DNl8+KxGEXU= github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA= github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/go-delve/delve v1.22.1 h1:LQSF2sv+lP3mmOzMkadl5HGQGgSS2bFg2tbyALqHu8Y= -github.com/go-delve/delve v1.22.1/go.mod h1:TfOb+G5H6YYKheZYAmA59ojoHbOimGfs5trbghHdLbM= +github.com/go-delve/delve v1.23.0 h1:jYgZISZ14KAO3ys8kD07kjrowrygE9F9SIwnpz9xXys= +github.com/go-delve/delve v1.23.0/go.mod h1:S3SLuEE2mn7wipKilTvk1p9HdTMnXXElcEpiZ+VcuqU= github.com/go-delve/liner v1.2.3-0.20231231155935-4726ab1d7f62 h1:IGtvsNyIuRjl04XAOFGACozgUD7A82UffYxZt4DWbvA= github.com/go-delve/liner v1.2.3-0.20231231155935-4726ab1d7f62/go.mod h1:biJCRbqp51wS+I92HMqn5H8/A0PAhxn2vyOT+JqhiGI= github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-dap v0.11.0 h1:SpAZJL41rOOvd85PuLCCLE1dteTQOyKNnn0H3DBHywo= -github.com/google/go-dap v0.11.0/go.mod h1:HAeyoSd2WIfTfg+0GRXcFrb+RnojAtGNh+k+XTIxJDE= +github.com/google/go-dap v0.12.0 h1:rVcjv3SyMIrpaOoTAdFDyHs99CwVOItIJGKLQFQhNeM= +github.com/google/go-dap v0.12.0/go.mod h1:tNjCASCm5cqePi/RVXXWEVqtnNLV1KTWtYOqu6rZNzc= github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= @@ -67,16 +67,14 @@ golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=