diff --git a/doc/api/graphql/reference/_index.md b/doc/api/graphql/reference/_index.md
index 67f9bb9c57fa85e5dbf888fcc4b712437998f65d..25f211c217e3e5adf11eda1e0245a5014f0612fe 100644
--- a/doc/api/graphql/reference/_index.md
+++ b/doc/api/graphql/reference/_index.md
@@ -2068,6 +2068,7 @@ Input type: `AiActionInput`
| `platformOrigin` | [`String`](#string) | Specifies the origin platform of the request. |
| `projectId` | [`ProjectID`](#projectid) | Global ID of the project the user is acting on. |
| `resolveVulnerability` | [`AiResolveVulnerabilityInput`](#airesolvevulnerabilityinput) | Input for resolve_vulnerability AI action. |
+| `rootNamespaceId` | [`NamespaceID`](#namespaceid) | Global ID of the top-level namespace the user is acting on. |
| `summarizeComments` | [`AiSummarizeCommentsInput`](#aisummarizecommentsinput) | Input for summarize_comments AI action. |
| `summarizeNewMergeRequest` | [`AiSummarizeNewMergeRequestInput`](#aisummarizenewmergerequestinput) | Input for summarize_new_merge_request AI action. |
| `summarizeReview` | [`AiSummarizeReviewInput`](#aisummarizereviewinput) | Input for summarize_review AI action. |
diff --git a/ee/app/assets/javascripts/ai/graphql/chat.mutation.graphql b/ee/app/assets/javascripts/ai/graphql/chat.mutation.graphql
index 43618561a556d9b13aedd432b82e267fbd0baad0..86cd40a13a0a7f22345bae0441c311d960738cda 100644
--- a/ee/app/assets/javascripts/ai/graphql/chat.mutation.graphql
+++ b/ee/app/assets/javascripts/ai/graphql/chat.mutation.graphql
@@ -7,6 +7,7 @@ mutation chat(
$currentFileContext: AiCurrentFileInput
$conversationType: AiConversationsThreadsConversationType
$threadId: AiConversationThreadID
+ $rootNamespaceId: NamespaceID
) {
aiAction(
input: {
@@ -20,6 +21,7 @@ mutation chat(
clientSubscriptionId: $clientSubscriptionId
conversationType: $conversationType
threadId: $threadId
+ rootNamespaceId: $rootNamespaceId
}
) {
requestId
diff --git a/ee/app/assets/javascripts/ai/tanuki_bot/components/app.vue b/ee/app/assets/javascripts/ai/tanuki_bot/components/app.vue
index 382c0c846c217401862dc58bb1d51af1bd6f7c79..bc832c8a89b57c3c0e701d95a4ce42b51669819f 100644
--- a/ee/app/assets/javascripts/ai/tanuki_bot/components/app.vue
+++ b/ee/app/assets/javascripts/ai/tanuki_bot/components/app.vue
@@ -75,6 +75,11 @@ export default {
required: false,
default: null,
},
+ rootNamespaceId: {
+ type: String,
+ required: false,
+ default: null,
+ },
chatTitle: {
type: String,
required: false,
@@ -354,6 +359,7 @@ export default {
projectId: this.projectId,
threadId: this.activeThread,
conversationType: MULTI_THREADED_CONVERSATION_TYPE,
+ rootNamespaceId: this.rootNamespaceId,
...variables,
};
diff --git a/ee/app/assets/javascripts/ai/tanuki_bot/index.js b/ee/app/assets/javascripts/ai/tanuki_bot/index.js
index 2f84399235ad91359e8286f3a1d71bcade9a3cd1..b45f5e4cb9c8f6d27cd7dd0d93fe2fdb4fed4298 100644
--- a/ee/app/assets/javascripts/ai/tanuki_bot/index.js
+++ b/ee/app/assets/javascripts/ai/tanuki_bot/index.js
@@ -27,7 +27,7 @@ export const initTanukiBotChatDrawer = () => {
});
}
- const { userId, resourceId, projectId, chatTitle } = el.dataset;
+ const { userId, resourceId, projectId, chatTitle, rootNamespaceId } = el.dataset;
return new Vue({
el,
@@ -40,6 +40,7 @@ export const initTanukiBotChatDrawer = () => {
resourceId,
projectId,
chatTitle,
+ rootNamespaceId,
},
});
},
diff --git a/ee/app/graphql/mutations/ai/action.rb b/ee/app/graphql/mutations/ai/action.rb
index 9c9d2797b58d90b4176748b7e575a77a6bd94f30..3570755fe75e49d04cbc59f625546df40b8a608a 100644
--- a/ee/app/graphql/mutations/ai/action.rb
+++ b/ee/app/graphql/mutations/ai/action.rb
@@ -28,6 +28,10 @@ class Action < BaseMutation
required: false,
description: "Global ID of the project the user is acting on."
+ argument :root_namespace_id, ::Types::GlobalIDType[::Namespace],
+ required: false,
+ description: "Global ID of the top-level namespace the user is acting on."
+
argument :conversation_type, Types::Ai::Conversations::Threads::ConversationTypeEnum,
required: false,
description: 'Conversation type of the thread.'
@@ -174,7 +178,7 @@ def authorized_resource?(object)
def extract_method_params!(attributes)
options = attributes.extract!(:client_subscription_id, :platform_origin, :project_id,
- :conversation_type, :thread_id)
+ :conversation_type, :thread_id, :root_namespace_id)
methods = methods(attributes.transform_values(&:to_h))
# At this point, we only have one method since we filtered it in `#ready?`
diff --git a/ee/app/models/ai/model_selection/namespace_feature_setting.rb b/ee/app/models/ai/model_selection/namespace_feature_setting.rb
index 28397999174899993e354bf420b0da69b4e5da5e..ae69932a350b6ae9af5c411f7b11087d65341d5f 100644
--- a/ee/app/models/ai/model_selection/namespace_feature_setting.rb
+++ b/ee/app/models/ai/model_selection/namespace_feature_setting.rb
@@ -17,12 +17,19 @@ class NamespaceFeatureSetting < ApplicationRecord
scope :for_namespace, ->(namespace_id) { where(namespace_id: namespace_id) }
def self.find_or_initialize_by_feature(namespace, feature)
- return unless ::Feature.enabled?(:ai_model_switching, namespace)
+ return unless namespace.present? && ::Feature.enabled?(:ai_model_switching, namespace)
return unless namespace.root?
find_or_initialize_by(namespace_id: namespace.id, feature: feature)
end
+ def self.find_by_feature(namespace, feature)
+ return unless namespace.present? && ::Feature.enabled?(:ai_model_switching, namespace)
+ return unless namespace.root?
+
+ find_by(namespace: namespace, feature: feature)
+ end
+
def model_selection_scope
namespace
end
diff --git a/ee/app/views/layouts/_tanuki_bot_chat.html.haml b/ee/app/views/layouts/_tanuki_bot_chat.html.haml
index db5282683471ef3f419c398d528edd645625bbb2..96a515c682e09897bdd9da8fe5412bff72e5369e 100644
--- a/ee/app/views/layouts/_tanuki_bot_chat.html.haml
+++ b/ee/app/views/layouts/_tanuki_bot_chat.html.haml
@@ -1,6 +1,7 @@
- return unless ::Gitlab::Llm::TanukiBot.enabled_for?(user: current_user, container: nil)
- resource_id = Gitlab::Llm::TanukiBot.resource_id
- project_id = Gitlab::Llm::TanukiBot.project_id
+- root_namespace_id = Gitlab::Llm::TanukiBot.root_namespace_id
- chat_title = ::Ai::AmazonQ.enabled? ? s_('GitLab Duo Chat with Amazon Q') : s_('GitLab Duo Chat')
-#js-tanuki-bot-chat-app{ data: { user_id: current_user.to_global_id, resource_id: resource_id, project_id: project_id, chat_title: chat_title } }
+#js-tanuki-bot-chat-app{ data: { user_id: current_user.to_global_id, resource_id: resource_id, project_id: project_id, root_namespace_id: root_namespace_id, chat_title: chat_title } }
diff --git a/ee/lib/gitlab/duo/chat/react_executor.rb b/ee/lib/gitlab/duo/chat/react_executor.rb
index 1fa7f2db80b93f6e0284bd0286f0f2726d4a5cd5..a1df84bbf3b1dde5c55137d0fdd201839686be79 100644
--- a/ee/lib/gitlab/duo/chat/react_executor.rb
+++ b/ee/lib/gitlab/duo/chat/react_executor.rb
@@ -339,7 +339,13 @@ def current_blob
strong_memoize_attr :current_blob
def chat_feature_setting
- ::Ai::FeatureSetting.find_by_feature(:duo_chat)
+ root_namespace = context.ai_request&.root_namespace
+
+ if Feature.enabled?(:ai_model_switching, root_namespace)
+ ::Ai::ModelSelection::NamespaceFeatureSetting.find_by_feature(root_namespace, :duo_chat)
+ else
+ ::Ai::FeatureSetting.find_by_feature(:duo_chat)
+ end
end
def record_first_token_apex
diff --git a/ee/lib/gitlab/llm/ai_gateway/model_metadata.rb b/ee/lib/gitlab/llm/ai_gateway/model_metadata.rb
index 52d78e73d9a4c096283d30fe7b7794e7eb27c0d2..580956e18432273ca2f52dc2ed3e03de70738697 100644
--- a/ee/lib/gitlab/llm/ai_gateway/model_metadata.rb
+++ b/ee/lib/gitlab/llm/ai_gateway/model_metadata.rb
@@ -9,6 +9,8 @@ def initialize(feature_setting: nil)
end
def to_params
+ return namespace_settings_params if feature_setting.is_a?(::Ai::ModelSelection::NamespaceFeatureSetting)
+
return self_hosted_params if feature_setting&.self_hosted?
amazon_q_params if ::Ai::AmazonQ.connected?
@@ -32,6 +34,14 @@ def self_hosted_params
attr_reader :feature_setting
+ def namespace_settings_params
+ {
+ provider: feature_setting.provider,
+ identifier: feature_setting.offered_model_ref,
+ feature_setting: feature_setting.feature
+ }
+ end
+
def amazon_q_params
{
provider: :amazon_q,
diff --git a/ee/lib/gitlab/llm/chain/requests/ai_gateway.rb b/ee/lib/gitlab/llm/chain/requests/ai_gateway.rb
index 1be68ab763dce781de8b7e3008f6aeca761e5b12..49eb4cce76ccde9db2b2cdb97cdecce91e995d8b 100644
--- a/ee/lib/gitlab/llm/chain/requests/ai_gateway.rb
+++ b/ee/lib/gitlab/llm/chain/requests/ai_gateway.rb
@@ -11,7 +11,7 @@ class AiGateway < Base
include ::Gitlab::Llm::Concerns::AllowedParams
include ::Gitlab::Llm::Concerns::EventTracking
- attr_reader :ai_client, :tracking_context
+ attr_reader :ai_client, :tracking_context, :root_namespace
ENDPOINT = '/v1/chat/agent'
BASE_ENDPOINT = '/v1/chat'
@@ -22,9 +22,10 @@ class AiGateway < Base
STOP_WORDS = ["\n\nHuman", "Observation:"].freeze
DEFAULT_MAX_TOKENS = 4096
- def initialize(user, service_name: :duo_chat, tracking_context: {})
+ def initialize(user, service_name: :duo_chat, tracking_context: {}, root_namespace: nil)
@user = user
@tracking_context = tracking_context
+ @root_namespace = root_namespace
@ai_client = ::Gitlab::Llm::AiGateway::Client.new(user, service_name: processed_service_name(service_name),
tracking_context: tracking_context)
end
@@ -103,11 +104,11 @@ def body(prompt, options, unit_primitive: nil)
request_body_agent(inputs: options[:inputs], unit_primitive: unit_primitive,
prompt_version: options[:prompt_version])
else
- request_body(prompt: prompt[:prompt], options: options)
+ request_body(prompt: prompt[:prompt], options: options, unit_primitive: unit_primitive)
end
end
- def request_body(prompt:, options: {})
+ def request_body(prompt:, options: {}, unit_primitive: nil)
{
prompt_components: [{
type: DEFAULT_TYPE,
@@ -117,7 +118,7 @@ def request_body(prompt:, options: {})
},
payload: {
content: prompt
- }.merge(payload_params(options)).merge(model_params(options))
+ }.merge(payload_params(options)).merge(model_params(options, unit_primitive))
}],
stream: true
}
@@ -129,16 +130,17 @@ def request_body_agent(inputs:, unit_primitive: nil, prompt_version: nil)
inputs: inputs
}
- feature_setting = chat_feature_setting(unit_primitive: unit_primitive)
+ feature_setting = namespace_feature_setting(unit_primitive) ||
+ chat_feature_setting(unit_primitive: unit_primitive)
- model_metadata_params =
- ::Gitlab::Llm::AiGateway::ModelMetadata.new(feature_setting: feature_setting).to_params
- params[:model_metadata] = model_metadata_params if model_metadata_params.present?
+ model_metadata = model_metadata(feature_setting)
+ params[:model_metadata] = model_metadata if model_metadata.present?
- model_family = model_metadata_params && model_metadata_params[:name]
+ model_family = model_metadata && model_metadata[:name]
default_version = ::Gitlab::Llm::PromptVersions.version_for_prompt("chat/#{unit_primitive}", model_family)
- params[:prompt_version] = if feature_setting&.self_hosted? || ::Ai::AmazonQ.connected?
+ is_self_hosted = feature_setting.is_a?(::Ai::FeatureSetting) && feature_setting.self_hosted?
+ params[:prompt_version] = if is_self_hosted || ::Ai::AmazonQ.connected?
default_version
else
prompt_version || default_version
@@ -147,23 +149,47 @@ def request_body_agent(inputs:, unit_primitive: nil, prompt_version: nil)
params
end
- def model_params(options)
- if chat_feature_setting&.self_hosted?
- self_hosted_model = chat_feature_setting.self_hosted_model
+ def model_metadata(feature_setting)
+ ::Gitlab::Llm::AiGateway::ModelMetadata.new(feature_setting: feature_setting).to_params
+ end
+
+ def namespace_feature_setting(unit_primitive)
+ feature = unit_primitive ? "duo_chat_#{unit_primitive}" : "duo_chat"
+ ::Ai::ModelSelection::NamespaceFeatureSetting.find_by_feature(root_namespace, feature)
+ end
- {
+ def model_params(options, unit_primitive = nil)
+ unit_primitive ||= options[:unit_primitive]
+ feature_setting = namespace_feature_setting(unit_primitive) ||
+ chat_feature_setting(unit_primitive: unit_primitive)
+
+ # Handle self-hosted model settings
+ if feature_setting.is_a?(::Ai::FeatureSetting) && feature_setting.self_hosted?
+ self_hosted_model = feature_setting.self_hosted_model
+ return {
provider: :litellm,
model: self_hosted_model.model,
model_endpoint: self_hosted_model.endpoint,
model_api_key: self_hosted_model.api_token,
model_identifier: self_hosted_model.identifier
}
- else
- {
- provider: provider(options),
- model: model(options)
+ end
+
+ # Handle namespace feature settings
+ if feature_setting.is_a?(::Ai::ModelSelection::NamespaceFeatureSetting)
+ return {
+ provider: feature_setting.provider,
+ feature_setting: feature_setting.feature,
+ identifier: feature_setting.offered_model_ref
}
+
end
+
+ # Default model parameters
+ {
+ provider: provider(options),
+ model: model(options)
+ }
end
def payload_params(options)
diff --git a/ee/lib/gitlab/llm/completions/chat.rb b/ee/lib/gitlab/llm/completions/chat.rb
index c449e382c434e3e12ce3c02e7b640f20a39413c2..ff4298737c0cb6fee713d6045907b919c56604b5 100644
--- a/ee/lib/gitlab/llm/completions/chat.rb
+++ b/ee/lib/gitlab/llm/completions/chat.rb
@@ -53,7 +53,11 @@ def initialize(prompt_message, ai_prompt_class, options = {})
end
def ai_request
- ::Gitlab::Llm::Chain::Requests::AiGateway.new(user, tracking_context: tracking_context)
+ ::Gitlab::Llm::Chain::Requests::AiGateway.new(
+ user,
+ tracking_context: tracking_context,
+ root_namespace: resource.try(:resource_parent)&.root_ancestor || find_root_namespace
+ )
end
def execute
@@ -99,6 +103,13 @@ def tools
TOOLS
end
+ def find_root_namespace
+ return unless options[:root_namespace_id]
+
+ root_namespace_id = GlobalID.parse(options[:root_namespace_id])
+ ::Gitlab::Graphql::Lazy.force(GitlabSchema.find_by_gid(root_namespace_id))
+ end
+
def response_post_processing
return if Rails.env.development?
return unless Gitlab::Saas.feature_available?(:duo_chat_categorize_question)
diff --git a/ee/lib/gitlab/llm/tanuki_bot.rb b/ee/lib/gitlab/llm/tanuki_bot.rb
index 0770ec25bfcb64e8c8b6a3ad2481bae1e86b94df..9acc8b572896e24f2b1a30bfb8eb7c6860fb3f57 100644
--- a/ee/lib/gitlab/llm/tanuki_bot.rb
+++ b/ee/lib/gitlab/llm/tanuki_bot.rb
@@ -51,6 +51,11 @@ def self.project_id
project_path = Gitlab::ApplicationContext.current_context_attribute(:project).presence
Project.find_by_full_path(project_path).try(:to_global_id) if project_path
end
+
+ def self.root_namespace_id
+ namespace_path = Gitlab::ApplicationContext.current_context_attribute(:root_namespace).presence
+ Group.find_by_full_path(namespace_path).try(:to_global_id) if namespace_path
+ end
end
end
end
diff --git a/ee/spec/factories/ai/model_selection/namespace_feature_settings.rb b/ee/spec/factories/ai/model_selection/namespace_feature_settings.rb
index 139eb0e7eb3570f0c9b421473cd69fa4b2ed46ee..1f879db71972aae430c5993fea831af524fea61d 100644
--- a/ee/spec/factories/ai/model_selection/namespace_feature_settings.rb
+++ b/ee/spec/factories/ai/model_selection/namespace_feature_settings.rb
@@ -14,6 +14,8 @@
"models" => [
{ 'name' => 'Claude Sonnet 3.5', 'identifier' => 'claude_sonnet_3_5' },
{ 'name' => 'Claude Sonnet 3.7', 'identifier' => 'claude_sonnet_3_7' },
+ { 'name' => 'Claude Sonnet 3.7', 'identifier' => 'claude-3-7-sonnet-20250219' },
+ { 'name' => 'Claude 3.5 Sonnet', 'identifier' => 'claude-3-5-sonnet-20240620' },
{ 'name' => 'OpenAI Chat GPT 4o', 'identifier' => 'openai_chatgpt_4o' }
],
"unit_primitives" => [
@@ -30,6 +32,20 @@
"selectable_models" => %w[claude_sonnet_3_5 claude_sonnet_3_7 openai_chatgpt_4o],
"beta_models" => [],
"unit_primitives" => ["generate_code"]
+ },
+ {
+ "feature_setting" => "duo_chat_explain_code",
+ "default_model" => "claude-3-7-sonnet-20250219",
+ "selectable_models" => %w[claude-3-7-sonnet-20250219 claude-3-5-sonnet-20240620],
+ "beta_models" => [],
+ "unit_primitives" => ["explain_code"]
+ },
+ {
+ "feature_setting" => "duo_chat",
+ "default_model" => "claude-3-7-sonnet-20250219",
+ "selectable_models" => %w[claude-3-7-sonnet-20250219 claude_3_5_sonnet_20240620],
+ "beta_models" => [],
+ "unit_primitives" => %w[ask_build ask_commit]
}
]
}
diff --git a/ee/spec/frontend/ai/tanuki_bot/components/app_spec.js b/ee/spec/frontend/ai/tanuki_bot/components/app_spec.js
index 80e1ef655db97c1a71d81e14219efd6880a44951..089e659287d7f2fbacafa6b93e67240f0751634e 100644
--- a/ee/spec/frontend/ai/tanuki_bot/components/app_spec.js
+++ b/ee/spec/frontend/ai/tanuki_bot/components/app_spec.js
@@ -328,6 +328,7 @@ describeSkipVue3(skipReason, () => {
resourceId: 'command::1',
projectId: null,
conversationType: 'DUO_CHAT',
+ rootNamespaceId: null,
threadId: undefined,
});
});
@@ -398,6 +399,7 @@ describeSkipVue3(skipReason, () => {
resourceId: MOCK_USER_ID,
projectId: 'project-123',
conversationType: 'DUO_CHAT',
+ rootNamespaceId: null,
threadId: undefined,
});
});
@@ -417,6 +419,7 @@ describeSkipVue3(skipReason, () => {
resourceId: MOCK_RESOURCE_ID,
projectId: null,
conversationType: 'DUO_CHAT',
+ rootNamespaceId: null,
threadId: undefined,
});
});
@@ -464,6 +467,7 @@ describeSkipVue3(skipReason, () => {
clientSubscriptionId: '123',
projectId: null,
conversationType: 'DUO_CHAT',
+ rootNamespaceId: null,
threadId: undefined,
});
});
@@ -850,6 +854,7 @@ describeSkipVue3(skipReason, () => {
resourceId: 'gid://gitlab/Issue/1',
projectId: null,
conversationType: 'DUO_CHAT',
+ rootNamespaceId: null,
});
});
@@ -869,9 +874,34 @@ describeSkipVue3(skipReason, () => {
resourceId: MOCK_RESOURCE_ID,
projectId: null,
conversationType: 'DUO_CHAT',
+ rootNamespaceId: null,
threadId: mockThreadId,
});
});
+ it('passes rootNamespaceId when provided in props', async () => {
+ // Create component with rootNamespaceId in props
+ createComponent({
+ propsData: {
+ userId: MOCK_USER_ID,
+ resourceId: MOCK_RESOURCE_ID,
+ rootNamespaceId: 'namespace-123',
+ },
+ });
+
+ findDuoChat().vm.$emit('send-chat-prompt', MOCK_USER_MESSAGE.content);
+
+ await nextTick();
+
+ expect(chatMutationHandlerMock).toHaveBeenCalledWith({
+ clientSubscriptionId: '123',
+ question: MOCK_USER_MESSAGE.content,
+ resourceId: MOCK_RESOURCE_ID,
+ projectId: null,
+ conversationType: 'DUO_CHAT',
+ rootNamespaceId: 'namespace-123',
+ threadId: undefined,
+ });
+ });
});
describe('thread handling', () => {
diff --git a/ee/spec/graphql/mutations/ai/action_spec.rb b/ee/spec/graphql/mutations/ai/action_spec.rb
index 170f57567095777e846e9027ab72dd078f7b5255..a56f61132806ce88412add8915f957ca204ed9fa 100644
--- a/ee/spec/graphql/mutations/ai/action_spec.rb
+++ b/ee/spec/graphql/mutations/ai/action_spec.rb
@@ -476,5 +476,90 @@
it_behaves_like 'an AI action'
end
+
+ context 'when root_namespace_id is specified' do
+ let_it_be(:group) { create(:group) }
+
+ let(:input) { { chat: { resource_id: resource_id }, root_namespace_id: group.to_global_id } }
+
+ before_all do
+ resource.project.add_developer(user)
+ group.add_developer(user)
+ end
+
+ before do
+ allow(Ability).to receive(:allowed?).and_call_original
+ allow(Ability)
+ .to receive(:allowed?)
+ .with(user, :read_namespace, group)
+ .and_return(true)
+ end
+
+ it 'passes the root_namespace_id to the service' do
+ expect_next_instance_of(
+ Llm::ExecuteMethodService,
+ user,
+ resource,
+ :chat,
+ hash_including(root_namespace_id: kind_of(GlobalID))
+ ) do |svc|
+ expect(svc)
+ .to receive(:execute)
+ .and_return(ServiceResponse.success(
+ payload: {
+ ai_message: build(:ai_message, request_id: request_id)
+ }))
+ end
+
+ result = subject
+ expect(result[:errors]).to be_empty
+ expect(result[:request_id]).to eq(request_id)
+ end
+
+ context 'when resource is null' do
+ let(:resource_id) { nil }
+
+ it 'passes the root_namespace_id to the service' do
+ expect_next_instance_of(
+ Llm::ExecuteMethodService,
+ user,
+ nil,
+ :chat,
+ hash_including(root_namespace_id: kind_of(GlobalID))
+ ) do |svc|
+ expect(svc)
+ .to receive(:execute)
+ .and_return(ServiceResponse.success(
+ payload: {
+ ai_message: build(:ai_message, request_id: request_id)
+ }))
+ end
+
+ result = subject
+ expect(result[:errors]).to be_empty
+ expect(result[:request_id]).to eq(request_id)
+ end
+ end
+
+ context 'when service returns an error' do
+ it 'returns the error message' do
+ expect_next_instance_of(
+ Llm::ExecuteMethodService,
+ user,
+ resource,
+ :chat,
+ hash_including(root_namespace_id: kind_of(GlobalID))
+ ) do |svc|
+ expect(svc)
+ .to receive(:execute)
+ .and_return(ServiceResponse.error(message: 'error'))
+ end
+
+ result = subject
+ expect(result[:errors]).to eq(['error'])
+ expect(result[:request_id]).to be_nil
+ end
+ end
+ end
end
end
diff --git a/ee/spec/lib/gitlab/duo/chat/react_executor_spec.rb b/ee/spec/lib/gitlab/duo/chat/react_executor_spec.rb
index 9a7da107954866b30ed82309168b0278be9f94c5..3c39b5660898348e951634263f165626b095b97c 100644
--- a/ee/spec/lib/gitlab/duo/chat/react_executor_spec.rb
+++ b/ee/spec/lib/gitlab/duo/chat/react_executor_spec.rb
@@ -549,6 +549,10 @@ def expect_sli_error(failed)
let_it_be(:self_hosted_model) { create(:ai_self_hosted_model, api_token: 'test_token') }
let_it_be(:ai_feature) { create(:ai_feature_setting, self_hosted_model: self_hosted_model, feature: :duo_chat) }
+ before do
+ stub_feature_flags(ai_model_switching: false)
+ end
+
it 'sends the self-hosted model metadata' do
params = step_params
params[:model_metadata] = {
@@ -568,6 +572,47 @@ def expect_sli_error(failed)
end
end
+ context 'when Duo chat model is selected at namespace level' do
+ let_it_be(:root_namespace) { create(:group) }
+ let(:ai_request) { instance_double(::Gitlab::Llm::Chain::Requests::AiGateway, root_namespace: root_namespace) }
+ let(:context) do
+ Gitlab::Llm::Chain::GitlabContext.new(
+ current_user: user,
+ container: nil,
+ resource: resource,
+ ai_request: ai_request,
+ extra_resource: extra_resource,
+ started_at: started_at_timestamp,
+ current_file: current_file,
+ agent_version: nil,
+ additional_context: additional_context
+ )
+ end
+
+ let_it_be(:feature_setting) do
+ create(:ai_namespace_feature_setting,
+ namespace: root_namespace,
+ feature: :duo_chat,
+ offered_model_ref: 'claude-3-7-sonnet-20250219')
+ end
+
+ it 'sends the namespace model metadata' do
+ params = step_params
+ params[:model_metadata] = {
+ provider: 'gitlab',
+ identifier: 'claude-3-7-sonnet-20250219',
+ feature_setting: 'duo_chat'
+ }
+
+ expect_next_instance_of(Gitlab::Duo::Chat::StepExecutor) do |react_agent|
+ expect(react_agent).to receive(:step).with(params)
+ .and_yield(action_event).and_return([action_event])
+ end
+
+ agent.execute
+ end
+ end
+
context 'when amazon q is connected' do
let_it_be(:add_on_purchase) { create(:gitlab_subscription_add_on_purchase, :duo_amazon_q) }
diff --git a/ee/spec/lib/gitlab/llm/ai_gateway/model_metadata_spec.rb b/ee/spec/lib/gitlab/llm/ai_gateway/model_metadata_spec.rb
index 19f052c3647ec300a6bc365b8f6ec79d0116aea0..82f5d81cd4fc392dc13bb9a5b4566618adc7ff9e 100644
--- a/ee/spec/lib/gitlab/llm/ai_gateway/model_metadata_spec.rb
+++ b/ee/spec/lib/gitlab/llm/ai_gateway/model_metadata_spec.rb
@@ -50,6 +50,30 @@
it { is_expected.to be_nil }
end
+
+ context 'when feature_setting is a NamespaceFeatureSetting' do
+ let_it_be(:root_namespace) { create(:group) }
+
+ let(:feature_name) { 'duo_chat' }
+ let(:model_ref) { 'claude-3-7-sonnet-20250219' }
+
+ let(:feature_setting) do
+ create(
+ :ai_namespace_feature_setting,
+ namespace: root_namespace,
+ feature: feature_name,
+ offered_model_ref: model_ref
+ )
+ end
+
+ it 'returns the correct namespace_settings_params' do
+ is_expected.to eq({
+ provider: 'gitlab',
+ identifier: model_ref,
+ feature_setting: 'duo_chat'
+ })
+ end
+ end
end
describe '#self_hosted_params' do
diff --git a/ee/spec/lib/gitlab/llm/chain/requests/ai_gateway_spec.rb b/ee/spec/lib/gitlab/llm/chain/requests/ai_gateway_spec.rb
index 752a41481818e56977623cedc564c20900401ae6..93e1bdc289a5257a12914d52f044a81857456dc2 100644
--- a/ee/spec/lib/gitlab/llm/chain/requests/ai_gateway_spec.rb
+++ b/ee/spec/lib/gitlab/llm/chain/requests/ai_gateway_spec.rb
@@ -93,6 +93,7 @@
allow(Gitlab::Llm::Logger).to receive(:build).and_return(logger)
allow(logger).to receive(:conditional_info)
allow(instance).to receive(:ai_client).and_return(ai_client)
+ stub_feature_flags(ai_model_switching: false)
end
shared_examples 'performing request to the AI Gateway' do
@@ -367,6 +368,192 @@
end
end
end
+
+ context 'when root_namespace is passed' do
+ let_it_be(:root_namespace) { create(:group) }
+ let(:tracking_context) { { action: 'chat', request_id: 'uuid' } }
+ let(:user_prompt) { "Some prompt" }
+ let(:response) { 'response from llm' }
+ let(:logger) { instance_double(Gitlab::Llm::Logger) }
+ let(:ai_client) { double }
+
+ before do
+ allow(Gitlab::Llm::Logger).to receive(:build).and_return(logger)
+ allow(logger).to receive(:conditional_info)
+ allow_next_instance_of(described_class) do |instance|
+ allow(instance).to receive(:ai_client).and_return(ai_client)
+ end
+ end
+
+ context 'when model switching is enabled and model is selected' do
+ let(:unit_primitive) { :explain_code }
+ let(:model_ref) { 'claude-3-7-sonnet-20250219' }
+ let(:prompt) { { prompt: user_prompt, options: {} } }
+
+ before do
+ stub_feature_flags(ai_model_switching: true)
+ create(:ai_namespace_feature_setting, namespace: root_namespace,
+ feature: :"duo_chat_#{unit_primitive}", offered_model_ref: model_ref)
+ end
+
+ it 'sends identifier and feature_setting' do
+ url = "#{::Gitlab::AiGateway.url}#{described_class::BASE_ENDPOINT}/#{unit_primitive}"
+
+ expect(ai_client).to receive(:stream).with(
+ hash_including(
+ url: url,
+ body: hash_including(
+ prompt_components: array_including(
+ hash_including(payload: hash_including(
+ provider: "gitlab",
+ feature_setting: 'duo_chat_explain_code',
+ identifier: model_ref
+ ))
+ )
+ )
+ )
+ ).and_return(response)
+
+ gateway = described_class.new(user, root_namespace: root_namespace, tracking_context: tracking_context)
+ expect(gateway.request(prompt, unit_primitive: unit_primitive)).to eq(response)
+ end
+ end
+
+ context 'when model switching is enabled and model is NOT selected' do
+ let(:unit_primitive) { :explain_code }
+ let(:prompt) { { prompt: user_prompt, options: {} } }
+
+ before do
+ stub_feature_flags(ai_model_switching: true)
+ create(:ai_namespace_feature_setting, namespace: root_namespace,
+ feature: :"duo_chat_#{unit_primitive}", offered_model_ref: nil)
+ end
+
+ it 'sends only feature_setting (identifier is nil)' do
+ url = "#{::Gitlab::AiGateway.url}#{described_class::BASE_ENDPOINT}/#{unit_primitive}"
+
+ expect(ai_client).to receive(:stream).with(
+ hash_including(
+ url: url,
+ body: hash_including(
+ prompt_components: array_including(
+ hash_including(payload: hash_including(
+ provider: "gitlab",
+ feature_setting: 'duo_chat_explain_code',
+ identifier: nil
+ ))
+ )
+ )
+ )
+ ).and_return(response)
+
+ gateway = described_class.new(user, root_namespace: root_namespace, tracking_context: tracking_context)
+ expect(gateway.request(prompt, unit_primitive: unit_primitive)).to eq(response)
+ end
+ end
+
+ context 'when using agent prompt with model switching' do
+ let(:unit_primitive) { :explain_code }
+ let(:model_ref) { 'claude-3-7-sonnet-20250219' }
+ let(:prompt) do
+ {
+ options: {
+ use_ai_gateway_agent_prompt: true,
+ inputs: { a: 1 }
+ }
+ }
+ end
+
+ before do
+ stub_feature_flags(ai_model_switching: true)
+ create(:ai_namespace_feature_setting, namespace: root_namespace,
+ feature: :"duo_chat_#{unit_primitive}", offered_model_ref: model_ref)
+ end
+
+ it 'sends model_metadata with identifier and feature_setting' do
+ url = "#{::Gitlab::AiGateway.url}#{described_class::BASE_PROMPTS_CHAT_ENDPOINT}/#{unit_primitive}"
+
+ expect(ai_client).to receive(:stream).with(
+ hash_including(
+ url: url,
+ body: hash_including(
+ inputs: { a: 1 },
+ model_metadata: {
+ provider: 'gitlab',
+ feature_setting: 'duo_chat_explain_code',
+ identifier: model_ref
+ },
+ prompt_version: a_kind_of(String)
+ )
+ )
+ ).and_return(response)
+
+ gateway = described_class.new(user, root_namespace: root_namespace, tracking_context: tracking_context)
+ expect(gateway.request(prompt, unit_primitive: unit_primitive)).to eq(response)
+ end
+ end
+
+ context 'when model switching is disabled' do
+ let(:unit_primitive) { nil }
+ let(:prompt) { { prompt: user_prompt, options: {} } }
+
+ before do
+ stub_feature_flags(ai_model_switching: false)
+ end
+
+ it 'uses default classic model' do
+ url = "#{::Gitlab::AiGateway.url}#{described_class::ENDPOINT}"
+
+ expect(ai_client).to receive(:stream).with(
+ hash_including(
+ url: url,
+ body: hash_including(
+ prompt_components: array_including(
+ hash_including(payload: hash_including(
+ provider: :anthropic,
+ model: described_class::CLAUDE_3_5_SONNET
+ ))
+ )
+ )
+ )
+ ).and_return(response)
+
+ gateway = described_class.new(user, root_namespace: root_namespace, tracking_context: tracking_context)
+ expect(gateway.request(prompt, unit_primitive: unit_primitive)).to eq(response)
+ end
+ end
+
+ context 'when root_namespace is not root' do
+ let(:subgroup) { create(:group, parent: root_namespace) }
+ let(:unit_primitive) { 'write_tests' }
+ let(:prompt) { { prompt: user_prompt, options: {} } }
+
+ before do
+ stub_feature_flags(ai_model_switching: true)
+ end
+
+ it 'ignores model switching' do
+ url = "#{::Gitlab::AiGateway.url}/v1/chat/write_tests"
+
+ expect(ai_client).to receive(:stream).with(
+ hash_including(
+ url: url,
+ body: hash_including(
+ prompt_components: array_including(
+ hash_including(payload: hash_including(
+ provider: :anthropic,
+ model: 'claude-3-5-sonnet-20240620'
+ ))
+ )
+ )
+ )
+ ).and_return(response)
+
+ gateway = described_class.new(user, root_namespace: subgroup, tracking_context: tracking_context)
+ expect(gateway.request(prompt, unit_primitive: unit_primitive)).to eq(response)
+ end
+ end
+ end
end
# rubocop:enable RSpec/MultipleMemoizedHelpers
end
diff --git a/ee/spec/lib/gitlab/llm/completions/chat_spec.rb b/ee/spec/lib/gitlab/llm/completions/chat_spec.rb
index 070efbbb95303102b5f6d9833ed01001c62f8212..489ac0bfb13912bf90ed5b01ec57378764e17ef7 100644
--- a/ee/spec/lib/gitlab/llm/completions/chat_spec.rb
+++ b/ee/spec/lib/gitlab/llm/completions/chat_spec.rb
@@ -101,6 +101,14 @@
stream_response_handler: stream_response_handler
]
+ expect(Gitlab::Llm::Chain::Requests::AiGateway).to receive(:new).with(
+ user,
+ hash_including(
+ tracking_context: anything,
+ root_namespace: expected_container&.root_ancestor
+ )
+ ).and_return(ai_request)
+
expect_next_instance_of(::Gitlab::Duo::Chat::ReactExecutor, *expected_params) do |instance|
expect(instance).to receive(:execute).and_return(answer)
end
@@ -210,12 +218,71 @@
end
describe '.initialize' do
- subject { described_class.new(prompt_message, nil, **options) }
+ let_it_be(:root_group) { create(:group) }
+ let_it_be(:subgroup) { create(:group, parent: root_group) }
+ let_it_be(:project) { create(:project, namespace: subgroup) }
+ let_it_be(:issue) { create(:issue, project: project) }
+ let_it_be(:user) { create(:user) }
+
+ let(:resource) { issue }
+ let(:prompt_message) do
+ build(:ai_chat_message, user: user, resource: resource, request_id: 'uuid', content: content, thread: thread)
+ end
+
+ let(:options) do
+ {
+ current_file: nil,
+ additional_context: [],
+ started_at: Time.current,
+ agent_version_id: nil,
+ extra_resource: {},
+ tracking_context: {},
+ resource: resource,
+ user: user
+ }
+ end
+
+ subject(:instance) do
+ described_class.new(prompt_message, nil, **options)
+ end
it 'trims additional context' do
- expect(::CodeSuggestions::Context).to receive(:new).with(additional_context).and_call_original
+ expect(::CodeSuggestions::Context).to receive(:new).with([]).and_call_original
+ instance
+ end
- subject
+ it 'sets root_namespace correctly on ai_request' do
+ expect(instance.send(:context).ai_request.root_namespace).to eq(root_group)
+ end
+
+ context 'when resource has no resource_parent, fallback is used via root_namespace_global_id' do
+ let_it_be(:fallback_namespace) { create(:group) }
+
+ let(:prompt_message) do
+ build(:ai_chat_message, user: user, resource: user, request_id: 'uuid', content: content, thread: thread)
+ end
+
+ let(:options) do
+ {
+ current_file: nil,
+ additional_context: [],
+ started_at: Time.current,
+ agent_version_id: nil,
+ extra_resource: {},
+ tracking_context: {},
+ resource: user,
+ user: user,
+ root_namespace_id: fallback_namespace.to_global_id.to_s
+ }
+ end
+
+ subject(:instance) do
+ described_class.new(prompt_message, nil, **options)
+ end
+
+ it 'uses root_namespace_global_id fallback when resource_parent is nil' do
+ expect(instance.send(:context).ai_request.root_namespace).to eq(fallback_namespace)
+ end
end
end
diff --git a/ee/spec/lib/gitlab/llm/tanuki_bot_spec.rb b/ee/spec/lib/gitlab/llm/tanuki_bot_spec.rb
index 22b8552bb0a55ebb3b91418ddd57baa6d4fcefe4..a782fc6f34b9001124b51415a567084b55377ccd 100644
--- a/ee/spec/lib/gitlab/llm/tanuki_bot_spec.rb
+++ b/ee/spec/lib/gitlab/llm/tanuki_bot_spec.rb
@@ -237,4 +237,38 @@
end
end
end
+
+ describe '.root_namespace_id' do
+ let_it_be(:group) { create(:group) }
+
+ context 'with current context including root_namespace' do
+ let(:result) do
+ ::Gitlab::ApplicationContext.with_raw_context(root_namespace: group.full_path) do
+ described_class.root_namespace_id
+ end
+ end
+
+ it 'returns the global ID of the root namespace when found' do
+ expect(result).to eq(group.to_global_id)
+ end
+ end
+
+ context 'when root_namespace is not found' do
+ let(:result) do
+ ::Gitlab::ApplicationContext.with_raw_context(root_namespace: 'non_existent_namespace') do
+ described_class.root_namespace_id
+ end
+ end
+
+ it 'returns nil' do
+ expect(result).to be_nil
+ end
+ end
+
+ context 'when root_namespace is not present in the context' do
+ it 'returns nil' do
+ expect(described_class.root_namespace_id).to be_nil
+ end
+ end
+ end
end
diff --git a/ee/spec/models/ai/model_selection/namespace_feature_setting_spec.rb b/ee/spec/models/ai/model_selection/namespace_feature_setting_spec.rb
index 21145b387f06bc481c340dff5978e374329a8040..ebdcda05119417aa2871eccafac4621e3361725a 100644
--- a/ee/spec/models/ai/model_selection/namespace_feature_setting_spec.rb
+++ b/ee/spec/models/ai/model_selection/namespace_feature_setting_spec.rb
@@ -28,17 +28,22 @@
let(:existing_feature) { ai_feature_setting.feature.to_sym }
let(:new_feature_enum) { :code_completions }
+ context 'when namespace is nil' do
+ it 'returns nil' do
+ result = described_class.find_or_initialize_by_feature(nil, existing_feature)
+ expect(result).to be_nil
+ end
+ end
+
it 'returns existing setting when one exists for the feature' do
ai_feature_setting.save!
result = described_class.find_or_initialize_by_feature(group, existing_feature)
-
expect(result).to eq(ai_feature_setting)
end
it 'initializes a new setting when none exists for the feature' do
new_feature = :code_completions
result = described_class.find_or_initialize_by_feature(group, new_feature_enum)
-
expect(result).to be_a(described_class)
expect(result).to be_new_record
expect(result.namespace).to eq(group)
@@ -57,6 +62,60 @@
end
end
+ describe '.find_by_feature' do
+ let(:feature_name) { "duo_chat" }
+ let(:offered_model_ref) { "claude-3-7-sonnet-20250219" }
+
+ subject(:ai_feature_setting) do
+ create(:ai_namespace_feature_setting,
+ namespace: group,
+ feature: feature_name,
+ offered_model_ref: offered_model_ref)
+ end
+
+ before do
+ ai_feature_setting
+ end
+
+ context 'when namespace is nil' do
+ it 'returns nil' do
+ result = described_class.find_by_feature(nil, feature_name)
+ expect(result).to be_nil
+ end
+ end
+
+ context 'when namespace is not a root namespace' do
+ let(:subgroup) { create(:group, parent: group) }
+
+ it 'returns nil' do
+ result = described_class.find_by_feature(subgroup, feature_name)
+ expect(result).to be_nil
+ end
+ end
+
+ it 'returns existing setting when one exists for the feature' do
+ result = described_class.find_by_feature(group, feature_name)
+ expect(result).to eq(ai_feature_setting)
+ end
+
+ it 'returns nil when no setting exists for the feature' do
+ non_existent_feature = 'code_generations'
+ result = described_class.find_by_feature(group, non_existent_feature)
+ expect(result).to be_nil
+ end
+
+ context 'when the feature is not enabled' do
+ let(:ff_enabled) { false }
+
+ subject(:ai_feature_setting) { build(:ai_namespace_feature_setting) }
+
+ it 'returns nil' do
+ result = described_class.find_by_feature(group, feature_name)
+ expect(result).to be_nil
+ end
+ end
+ end
+
it_behaves_like 'model selection feature setting', scope_class_name: 'Group'
describe 'validations' do
diff --git a/ee/spec/views/layouts/_tanuki_bot_chat.html.haml_spec.rb b/ee/spec/views/layouts/_tanuki_bot_chat.html.haml_spec.rb
index 5b76bd6e8602dbe3dfae0686c88b3bf575c6213a..c3ec7b334d34fa2f4b2832bb55920380143ffeb7 100644
--- a/ee/spec/views/layouts/_tanuki_bot_chat.html.haml_spec.rb
+++ b/ee/spec/views/layouts/_tanuki_bot_chat.html.haml_spec.rb
@@ -10,10 +10,17 @@
allow(::Gitlab::Llm::TanukiBot).to receive_messages(
enabled_for?: true,
resource_id: 'test_resource_id',
- project_id: 'test_project_id'
+ project_id: 'test_project_id',
+ root_namespace_id: 'test_root_namespace_id'
)
end
+ it 'includes the root_namespace_id in the data attributes' do
+ render
+
+ expect(rendered).to have_css("#js-tanuki-bot-chat-app[data-root-namespace-id='test_root_namespace_id']")
+ end
+
context 'when AmazonQ is enabled' do
before do
allow(::Ai::AmazonQ).to receive(:enabled?).and_return(true)