diff --git a/db/docs/batched_background_migrations/backfill_pool_repositories_organization_id.yml b/db/docs/batched_background_migrations/backfill_pool_repositories_organization_id.yml new file mode 100644 index 0000000000000000000000000000000000000000..9dabb9330fb4992fdf31f3b663a141494e405a89 --- /dev/null +++ b/db/docs/batched_background_migrations/backfill_pool_repositories_organization_id.yml @@ -0,0 +1,8 @@ +--- +migration_job_name: BackfillPoolRepositoriesOrganizationId +description: Backfills organization_id on pool_repositories +feature_category: source_code_management +introduced_by_url: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/208322 +milestone: '18.8' +queued_migration_version: 20251217130435 +finalized_by: # version of the migration that finalized this BBM diff --git a/db/migrate/20251217130425_add_sharding_key_trigger_to_pool_repositories.rb b/db/migrate/20251217130425_add_sharding_key_trigger_to_pool_repositories.rb new file mode 100644 index 0000000000000000000000000000000000000000..acf0dcd4c3dc1145a68ab07704f543866afb7056 --- /dev/null +++ b/db/migrate/20251217130425_add_sharding_key_trigger_to_pool_repositories.rb @@ -0,0 +1,44 @@ +# frozen_string_literal: true + +class AddShardingKeyTriggerToPoolRepositories < Gitlab::Database::Migration[2.3] + include Gitlab::Database::SchemaHelpers + + milestone '18.8' + + TABLE_NAME = 'pool_repositories' + TRIGGER_FUNCTION_NAME = 'pool_repositories_sharding_key' + TRIGGER_NAME = "trigger_#{TRIGGER_FUNCTION_NAME}" + + def up + execute(<<~SQL) + CREATE OR REPLACE FUNCTION #{TRIGGER_FUNCTION_NAME}() RETURNS TRIGGER AS $$ + BEGIN + IF NEW.organization_id IS NOT NULL THEN + RETURN NEW; + END IF; + + IF NEW.source_project_id IS NOT NULL THEN + SELECT p.organization_id + INTO NEW.organization_id + FROM projects p + WHERE p.id = NEW.source_project_id; + END IF; + + RETURN NEW; + END; + $$ LANGUAGE plpgsql; + SQL + + create_trigger( + TABLE_NAME, + TRIGGER_NAME, + TRIGGER_FUNCTION_NAME, + fires: 'BEFORE INSERT OR UPDATE' + ) + end + + def down + drop_trigger(TABLE_NAME, TRIGGER_NAME) + drop_function(TRIGGER_FUNCTION_NAME) + end +end diff --git a/db/post_migrate/20251217130430_add_concurrent_index_to_pool_repositories_on_organization_id.rb b/db/post_migrate/20251217130430_add_concurrent_index_to_pool_repositories_on_organization_id.rb new file mode 100644 index 0000000000000000000000000000000000000000..917be380130f7c895589dfb6b486dfb596cce135 --- /dev/null +++ b/db/post_migrate/20251217130430_add_concurrent_index_to_pool_repositories_on_organization_id.rb @@ -0,0 +1,16 @@ +# frozen_string_literal: true + +class AddConcurrentIndexToPoolRepositoriesOnOrganizationId < Gitlab::Database::Migration[2.3] + disable_ddl_transaction! + milestone '18.8' + + INDEX_NAME = 'index_pool_repositories_on_organization_id' + + def up + add_concurrent_index :pool_repositories, :organization_id, name: INDEX_NAME + end + + def down + remove_concurrent_index_by_name :pool_repositories, INDEX_NAME + end +end diff --git a/db/post_migrate/20251217130432_add_not_valid_foreign_key_constraint_to_pool_repositories_on_organization_id.rb b/db/post_migrate/20251217130432_add_not_valid_foreign_key_constraint_to_pool_repositories_on_organization_id.rb new file mode 100644 index 0000000000000000000000000000000000000000..aebaf69ad4e42202b6c375bed54553260e701e63 --- /dev/null +++ b/db/post_migrate/20251217130432_add_not_valid_foreign_key_constraint_to_pool_repositories_on_organization_id.rb @@ -0,0 +1,18 @@ +# frozen_string_literal: true + +class AddNotValidForeignKeyConstraintToPoolRepositoriesOnOrganizationId < Gitlab::Database::Migration[2.3] + disable_ddl_transaction! + milestone '18.7' + + def up + add_concurrent_foreign_key( + :pool_repositories, :organizations, + column: :organization_id, + on_delete: :cascade, validate: false + ) + end + + def down + remove_foreign_key_if_exists :pool_repositories, column: :organization_id + end +end diff --git a/db/post_migrate/20251217130435_queue_backfill_pool_repositories_organization_id.rb b/db/post_migrate/20251217130435_queue_backfill_pool_repositories_organization_id.rb new file mode 100644 index 0000000000000000000000000000000000000000..0507509657c38c6a2bd7c7a9f08a5ddcc9421e3d --- /dev/null +++ b/db/post_migrate/20251217130435_queue_backfill_pool_repositories_organization_id.rb @@ -0,0 +1,25 @@ +# frozen_string_literal: true + +class QueueBackfillPoolRepositoriesOrganizationId < Gitlab::Database::Migration[2.3] + milestone '18.8' + + restrict_gitlab_migration gitlab_schema: :gitlab_main_org + + MIGRATION = "BackfillPoolRepositoriesOrganizationId" + BATCH_SIZE = 1000 + SUB_BATCH_SIZE = 100 + + def up + queue_batched_background_migration( + MIGRATION, + :pool_repositories, + :id, + batch_size: BATCH_SIZE, + sub_batch_size: SUB_BATCH_SIZE + ) + end + + def down + delete_batched_background_migration(MIGRATION, :pool_repositories, :id, []) + end +end diff --git a/db/schema_migrations/20251217130425 b/db/schema_migrations/20251217130425 new file mode 100644 index 0000000000000000000000000000000000000000..2319a2d797f701378b6f15b6b36cd0b9dcd939bf --- /dev/null +++ b/db/schema_migrations/20251217130425 @@ -0,0 +1 @@ +9e6922df75c95742c85248c2ad224950c95594f342ac5f25a0233bf531ff92d5 \ No newline at end of file diff --git a/db/schema_migrations/20251217130430 b/db/schema_migrations/20251217130430 new file mode 100644 index 0000000000000000000000000000000000000000..c5690919b4b859d6e9805dcd877d4c59d1b481f3 --- /dev/null +++ b/db/schema_migrations/20251217130430 @@ -0,0 +1 @@ +8ecdda8961617a9b25040679ddc990497b70158cf70c57ff435d58a1f9c1f172 \ No newline at end of file diff --git a/db/schema_migrations/20251217130432 b/db/schema_migrations/20251217130432 new file mode 100644 index 0000000000000000000000000000000000000000..890244eaa20d7cf21880348af34608eba1bbe2f2 --- /dev/null +++ b/db/schema_migrations/20251217130432 @@ -0,0 +1 @@ +2400e2526b7069ad5fe248a8a952af35cc33d15b518fbaa2ac7cce9b0c6a17c7 \ No newline at end of file diff --git a/db/schema_migrations/20251217130435 b/db/schema_migrations/20251217130435 new file mode 100644 index 0000000000000000000000000000000000000000..8e451dcbe53e33b0f6a72f83221b60a11ed9e082 --- /dev/null +++ b/db/schema_migrations/20251217130435 @@ -0,0 +1 @@ +3f6cc8fa80bfbbdc64a5ba7e7f86a7fda9ca92b9aa2a1628183688754c467d9d \ No newline at end of file diff --git a/db/structure.sql b/db/structure.sql index 2188d6e075cd59ee6dc3c7eb25bedadf5050a377..fb4e2bd7ce11d9abbbf41ec8123d3e97b6078025 100644 --- a/db/structure.sql +++ b/db/structure.sql @@ -817,6 +817,25 @@ RETURN NEW; END $$; +CREATE FUNCTION pool_repositories_sharding_key() RETURNS trigger + LANGUAGE plpgsql + AS $$ +BEGIN + IF NEW.organization_id IS NOT NULL THEN + RETURN NEW; + END IF; + + IF NEW.source_project_id IS NOT NULL THEN + SELECT p.organization_id + INTO NEW.organization_id + FROM projects p + WHERE p.id = NEW.source_project_id; + END IF; + + RETURN NEW; +END; +$$; + CREATE FUNCTION postgres_pg_stat_activity_autovacuum() RETURNS TABLE(query text, query_start timestamp with time zone) LANGUAGE sql SECURITY DEFINER SET search_path TO 'pg_catalog', 'pg_temp' @@ -43709,6 +43728,8 @@ CREATE INDEX index_pm_package_version_licenses_on_pm_package_version_id ON pm_pa CREATE INDEX index_pm_package_versions_on_pm_package_id ON pm_package_versions USING btree (pm_package_id); +CREATE INDEX index_pool_repositories_on_organization_id ON pool_repositories USING btree (organization_id); + CREATE INDEX index_pool_repositories_on_shard_id ON pool_repositories USING btree (shard_id); CREATE UNIQUE INDEX index_pool_repositories_on_source_project_id_and_shard_id ON pool_repositories USING btree (source_project_id, shard_id); @@ -49775,6 +49796,8 @@ CREATE TRIGGER trigger_jira_tracker_data_sharding_key_on_insert BEFORE INSERT ON CREATE TRIGGER trigger_namespaces_traversal_ids_on_update AFTER UPDATE ON namespaces FOR EACH ROW WHEN ((old.traversal_ids IS DISTINCT FROM new.traversal_ids)) EXECUTE FUNCTION insert_namespaces_sync_event(); +CREATE TRIGGER trigger_pool_repositories_sharding_key BEFORE INSERT OR UPDATE ON pool_repositories FOR EACH ROW EXECUTE FUNCTION pool_repositories_sharding_key(); + CREATE TRIGGER trigger_projects_parent_id_on_insert AFTER INSERT ON projects FOR EACH ROW EXECUTE FUNCTION insert_projects_sync_event(); CREATE TRIGGER trigger_projects_parent_id_on_update AFTER UPDATE ON projects FOR EACH ROW WHEN ((old.namespace_id IS DISTINCT FROM new.namespace_id)) EXECUTE FUNCTION insert_projects_sync_event(); @@ -51106,6 +51129,9 @@ ALTER TABLE ONLY scan_result_policy_violations ALTER TABLE ONLY approval_project_rules ADD CONSTRAINT fk_773289d10b FOREIGN KEY (approval_policy_rule_id) REFERENCES approval_policy_rules(id) ON DELETE CASCADE; +ALTER TABLE ONLY pool_repositories + ADD CONSTRAINT fk_775c554d89 FOREIGN KEY (organization_id) REFERENCES organizations(id) ON DELETE CASCADE NOT VALID; + ALTER TABLE ONLY agent_user_access_project_authorizations ADD CONSTRAINT fk_78034b05d8 FOREIGN KEY (project_id) REFERENCES projects(id) ON DELETE CASCADE; diff --git a/lib/gitlab/background_migration/backfill_pool_repositories_organization_id.rb b/lib/gitlab/background_migration/backfill_pool_repositories_organization_id.rb new file mode 100644 index 0000000000000000000000000000000000000000..269bf5c845f2eaaef3f0bc75d4d4d45a9baea61b --- /dev/null +++ b/lib/gitlab/background_migration/backfill_pool_repositories_organization_id.rb @@ -0,0 +1,63 @@ +# frozen_string_literal: true + +module Gitlab + module BackgroundMigration + class BackfillPoolRepositoriesOrganizationId < BatchedMigrationJob + operation_name :backfill_pool_repositories_organization_id + + feature_category :source_code_management + + def perform + each_sub_batch do |sub_batch| + # Case 1: source_project_id is set, get organization from source project (PREFERRED) + connection.exec_update(update_from_source_project_sql(sub_batch)) + + # Case 2: source_project_id is empty, but we can detect organization + # through the member_projects (projects.pool_repository_id relation) + connection.exec_update(update_from_member_projects_sql(sub_batch)) + + # Case 3: No sharding key can be determined - assign default organization_id = 1 + connection.exec_update(update_with_default_organization_sql(sub_batch)) + end + end + + private + + def update_from_source_project_sql(sub_batch) + <<~SQL + UPDATE pool_repositories + SET organization_id = projects.organization_id + FROM projects + WHERE pool_repositories.source_project_id = projects.id + AND pool_repositories.organization_id IS NULL + AND pool_repositories.id IN (#{sub_batch.select(:id).to_sql}) + SQL + end + + def update_from_member_projects_sql(sub_batch) + <<~SQL + UPDATE pool_repositories + SET organization_id = subquery.organization_id + FROM ( + SELECT DISTINCT p.pool_repository_id, p.organization_id + FROM projects p + WHERE p.pool_repository_id IS NOT NULL + ) AS subquery + WHERE pool_repositories.id = subquery.pool_repository_id + AND pool_repositories.organization_id IS NULL + AND pool_repositories.source_project_id IS NULL + AND pool_repositories.id IN (#{sub_batch.select(:id).to_sql}) + SQL + end + + def update_with_default_organization_sql(sub_batch) + <<~SQL + UPDATE pool_repositories + SET organization_id = 1 + WHERE pool_repositories.organization_id IS NULL + AND pool_repositories.id IN (#{sub_batch.select(:id).to_sql}) + SQL + end + end + end +end diff --git a/spec/db/schema_spec.rb b/spec/db/schema_spec.rb index 6a231a38f810957e6b9368bea38c99fe7b403079..b9059ed2ae7fab1f61a0cf8076ed1e13a3281695 100644 --- a/spec/db/schema_spec.rb +++ b/spec/db/schema_spec.rb @@ -159,7 +159,6 @@ p_ci_pipeline_variables: %w[project_id], p_ci_pipelines_config: %w[partition_id project_id], p_ci_stages: %w[project_id], - pool_repositories: %w[organization_id], p_duo_workflows_checkpoints: %w[project_id namespace_id], project_build_artifacts_size_refreshes: %w[last_job_artifact_id], project_data_transfers: %w[project_id namespace_id], diff --git a/spec/lib/backup/restore/pool_repositories_spec.rb b/spec/lib/backup/restore/pool_repositories_spec.rb index 53419c6a49d7b0423e33315feace56ed047903fd..77b5e5628dc5816802574fa739e5de044c39314e 100644 --- a/spec/lib/backup/restore/pool_repositories_spec.rb +++ b/spec/lib/backup/restore/pool_repositories_spec.rb @@ -9,7 +9,9 @@ describe '.reinitialize_pools!' do context 'with a pool without a source project' do - let(:pool_repository) { create(:pool_repository, :without_project) } + let_it_be(:default_organization) { create(:organization, id: 1) } + + let(:pool_repository) { create(:pool_repository, :without_project, organization: default_organization) } it 'yields a skipped result' do results = [] diff --git a/spec/lib/gitlab/background_migration/backfill_pool_repositories_organization_id_spec.rb b/spec/lib/gitlab/background_migration/backfill_pool_repositories_organization_id_spec.rb new file mode 100644 index 0000000000000000000000000000000000000000..0ac789332e6ef003729a099d0ee86ff4d5ca6648 --- /dev/null +++ b/spec/lib/gitlab/background_migration/backfill_pool_repositories_organization_id_spec.rb @@ -0,0 +1,246 @@ +# frozen_string_literal: true + +require 'spec_helper' + +RSpec.describe Gitlab::BackgroundMigration::BackfillPoolRepositoriesOrganizationId, + feature_category: :source_code_management do + let(:connection) { ApplicationRecord.connection } + + let(:namespaces_table) { table(:namespaces) } + let(:organizations_table) { table(:organizations) } + let(:projects_table) { table(:projects) } + let(:pool_repositories_table) { table(:pool_repositories) } + + # Create default organization with ID 1 (this is typically the default + # organization in GitLab) + # + let!(:default_organization) do + organizations_table.find_or_create_by!(path: 'default') do |org| + org.id = 1 + org.name = 'default' + end + end + + let!(:organization) { organizations_table.create!(name: 'organization', path: 'organization') } + let!(:organization2) { organizations_table.create!(name: 'organization2', path: 'organization2') } + + # Create a shard for pool repositories + # + let!(:shard) { table(:shards).create!(name: 'default') } + + let!(:group1) do + namespaces_table.create!(name: 'group1', path: 'group1', type: 'Group', organization_id: organization.id) + end + + let!(:group2) do + namespaces_table.create!(name: 'group2', path: 'group2', type: 'Group', organization_id: organization2.id) + end + + # Create project namespaces (these are what project_namespace_id references) + # + let!(:project_namespace1) do + namespaces_table.create!( + name: 'project1', + path: 'project1', + type: 'Project', + parent_id: group1.id, + organization_id: organization.id + ) + end + + let!(:project_namespace2) do + namespaces_table.create!( + name: 'project2', + path: 'project2', + type: 'Project', + parent_id: group2.id, + organization_id: organization2.id + ) + end + + let!(:project1) do + projects_table.create!( + name: 'project1', + path: 'project1', + organization_id: organization.id, + project_namespace_id: project_namespace1.id, + namespace_id: group1.id + ) + end + + let!(:project2) do + projects_table.create!( + name: 'project2', + path: 'project2', + organization_id: organization2.id, + project_namespace_id: project_namespace2.id, + namespace_id: group2.id + ) + end + + let(:migration_args) do + { + start_id: pool_repositories_table.minimum(:id), + end_id: pool_repositories_table.maximum(:id), + batch_table: :pool_repositories, + batch_column: :id, + sub_batch_size: 1, + pause_ms: 0, + connection: ApplicationRecord.connection + } + end + + describe '#perform' do + context 'when pool_repository has source_project_id' do + it 'backfills organization_id from source project' do + pool_repo = pool_repositories_table.create!( + source_project_id: project1.id, + organization_id: nil, + disk_path: 'pool/path1', + state: 'ready', + shard_id: shard.id + ) + + described_class.new(**migration_args).perform + + pool_repo.reload + expect(pool_repo.organization_id).to eq(organization.id) + end + end + + context 'when pool_repository has no source_project_id but has member projects' do + it 'backfills organization_id from member projects' do + # Temporarily disable the trigger for this test + connection.execute('DROP TRIGGER IF EXISTS trigger_pool_repositories_sharding_key ON pool_repositories') + + pool_repo = pool_repositories_table.create!( + source_project_id: nil, + organization_id: nil, + disk_path: 'pool/path2', + state: 'ready', + shard_id: shard.id + ) + + # Verify it's actually NULL + expect(pool_repo.organization_id).to be_nil + + # Set project2 as a member of the pool + connection.execute( + "UPDATE projects SET pool_repository_id = #{pool_repo.id} WHERE id = #{project2.id}" + ) + + # Re-enable the trigger + connection.execute(<<~SQL) + CREATE TRIGGER trigger_pool_repositories_sharding_key + BEFORE INSERT OR UPDATE ON pool_repositories + FOR EACH ROW + EXECUTE FUNCTION pool_repositories_sharding_key() + SQL + + described_class.new(**migration_args).perform + + pool_repo.reload + + expect(pool_repo.organization_id).to eq(organization2.id) + end + end + + context 'when pool_repository has no source_project_id and no member projects' do + it 'backfills organization_id with default value 1' do + pool_repo = pool_repositories_table.create!( + source_project_id: nil, + organization_id: nil, + disk_path: 'pool/path3', + state: 'ready', + shard_id: shard.id + ) + + described_class.new(**migration_args).perform + + pool_repo.reload + + expect(pool_repo.organization_id).to eq(1) + end + end + + context 'when pool_repository already has organization_id' do + it 'does not change existing organization_id' do + pool_repo = pool_repositories_table.create!( + source_project_id: project1.id, + organization_id: organization2.id, + disk_path: 'pool/path4', + state: 'ready', + shard_id: shard.id + ) + + described_class.new(**migration_args).perform + + pool_repo.reload + + expect(pool_repo.organization_id).to eq(organization2.id) + end + end + + context 'with mixed scenarios' do + it 'handles all cases correctly in priority order' do + # Case 1b: Has source_project_id (trigger handles this correctly) + pool_repo1 = pool_repositories_table.create!( + source_project_id: project1.id, + organization_id: nil, + disk_path: 'pool/path5', + state: 'ready', + shard_id: shard.id + ) + + # Temporarily disable the trigger for this test + connection.execute('DROP TRIGGER IF EXISTS trigger_pool_repositories_sharding_key ON pool_repositories') + + pool_repositories_table.create!( + source_project_id: nil, + organization_id: nil, + disk_path: 'pool/path2', + state: 'ready', + shard_id: shard.id + ) + + # Case 2: No source_project_id but has member projects (bypass trigger) + pool_repo2_id = connection.execute(<<~SQL).first['id'] + INSERT INTO pool_repositories (source_project_id, organization_id, disk_path, state, shard_id) + VALUES (NULL, NULL, 'pool/path6', 'ready', #{shard.id}) + RETURNING id + SQL + pool_repo2 = pool_repositories_table.find(pool_repo2_id) + + connection.execute( + "UPDATE projects SET pool_repository_id = #{pool_repo2.id} WHERE id = #{project2.id}" + ) + + # Case 3: No source_project_id and no member projects (bypass trigger) + pool_repo3_id = connection.execute(<<~SQL).first['id'] + INSERT INTO pool_repositories (source_project_id, organization_id, disk_path, state, shard_id) + VALUES (NULL, NULL, 'pool/path7', 'ready', #{shard.id}) + RETURNING id + SQL + pool_repo3 = pool_repositories_table.find(pool_repo3_id) + + # Re-enable the trigger + connection.execute(<<~SQL) + CREATE TRIGGER trigger_pool_repositories_sharding_key + BEFORE INSERT OR UPDATE ON pool_repositories + FOR EACH ROW + EXECUTE FUNCTION pool_repositories_sharding_key() + SQL + + described_class.new(**migration_args).perform + + pool_repo1.reload + pool_repo2.reload + pool_repo3.reload + + expect(pool_repo1.organization_id).to eq(organization.id) + expect(pool_repo2.organization_id).to eq(organization2.id) + expect(pool_repo3.organization_id).to eq(1) + end + end + end +end diff --git a/spec/migrations/20251008204550_queue_backfill_pool_repositories_organization_id_spec.rb b/spec/migrations/20251008204550_queue_backfill_pool_repositories_organization_id_spec.rb new file mode 100644 index 0000000000000000000000000000000000000000..7e922356b1ddc3301eb53daa6cdae288faacc482 --- /dev/null +++ b/spec/migrations/20251008204550_queue_backfill_pool_repositories_organization_id_spec.rb @@ -0,0 +1,26 @@ +# frozen_string_literal: true + +require 'spec_helper' +require_migration! + +RSpec.describe QueueBackfillPoolRepositoriesOrganizationId, migration: :gitlab_main_org, feature_category: :source_code_management do + let!(:batched_migration) { described_class::MIGRATION } + + it 'schedules a new batched migration' do + reversible_migration do |migration| + migration.before -> { + expect(batched_migration).not_to have_scheduled_batched_migration + } + + migration.after -> { + expect(batched_migration).to have_scheduled_batched_migration( + gitlab_schema: :gitlab_main_org, + table_name: :pool_repositories, + column_name: :id, + batch_size: described_class::BATCH_SIZE, + sub_batch_size: described_class::SUB_BATCH_SIZE + ) + } + end + end +end diff --git a/spec/models/pool_repository_spec.rb b/spec/models/pool_repository_spec.rb index cd36b8f9d2b178a0ff7a4a23471c685af154569c..624096d296adbd10a7ea55622c4096af4a1a70d7 100644 --- a/spec/models/pool_repository_spec.rb +++ b/spec/models/pool_repository_spec.rb @@ -10,9 +10,11 @@ it { is_expected.to have_many(:member_projects) } end - describe 'before_validation callbacks' do + describe 'setting organization id' do let_it_be(:project) { create(:project) } let_it_be(:other_organization) { create(:organization) } + let_it_be(:default_organization) { create(:organization, id: 1) } + let_it_be(:shard) { create(:shard) } context 'when organization is not set' do it 'assigns organization from the source project' do @@ -45,6 +47,68 @@ expect(pool_repo.organization).to eq(other_organization) end end + + context 'when model hooks are bypassed' do + context 'when source project is available' do + it 'sets organization_id from the source project via database trigger' do + # Use insert_all to bypass ActiveRecord callbacks and model hooks + result = described_class.insert_all([{ + source_project_id: project.id, + organization_id: nil, + disk_path: 'pool/trigger_test', + state: 'ready', + shard_id: shard.id + }], returning: [:id, :organization_id]) + + pool_repo = described_class.find(result.rows.first[0]) + expect(pool_repo.organization_id).to eq(project.organization_id) + end + end + + context 'when source project is not available' do + it 'does not set the default organization_id' do + # Use insert_all to bypass ActiveRecord callbacks and model hooks + # + result = described_class.insert_all([{ + source_project_id: nil, + organization_id: nil, + disk_path: 'pool/trigger_default_test', + state: 'ready', + shard_id: shard.id + }], returning: [:id, :organization_id]) + + pool_repo = described_class.find(result.rows.first[0]) + expect(pool_repo.organization_id).to be_nil + end + end + + context 'when organization_id is already set' do + it 'preserves existing organization_id' do + # Use insert_all to bypass ActiveRecord callbacks and model hooks + result = described_class.insert_all([{ + source_project_id: project.id, + organization_id: other_organization.id, + disk_path: 'pool/trigger_preserve_test', + state: 'ready', + shard_id: shard.id + }], returning: [:id, :organization_id]) + + pool_repo = described_class.find(result.rows.first[0]) + expect(pool_repo.organization_id).to eq(other_organization.id) + end + end + + it 'works during updates when organization_id is cleared' do + pool_repo = create(:pool_repository, source_project: project) + original_org_id = pool_repo.organization_id + + # Use update_all to bypass ActiveRecord callbacks + described_class.where(id: pool_repo.id).update_all(organization_id: nil) + + pool_repo.reload + expect(pool_repo.organization_id).to eq(original_org_id) + end + end end describe 'validations' do