From 8e075cccfec3cca7a6ac7659d9ffb2abf3ba38f1 Mon Sep 17 00:00:00 2001 From: David O'Regan Date: Thu, 13 Jun 2024 21:47:26 +0200 Subject: [PATCH 1/3] feat: add docs set and docs eval --- datasets/duo_chat_docs_questions_context.csv | 65468 +++++++++++++++++ evaluation_scripts/chat/evaluate_docs.py | 60 + 2 files changed, 65528 insertions(+) create mode 100644 datasets/duo_chat_docs_questions_context.csv create mode 100644 evaluation_scripts/chat/evaluate_docs.py diff --git a/datasets/duo_chat_docs_questions_context.csv b/datasets/duo_chat_docs_questions_context.csv new file mode 100644 index 00000000..53fabee6 --- /dev/null +++ b/datasets/duo_chat_docs_questions_context.csv @@ -0,0 +1,65468 @@ +question,answer,context +what is the best way to host a nextjs application in gcp?,,"1. Installing GitLab on Google Cloud Platform + + + +Installing GitLab on Google Cloud Platform + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + +You can install GitLab on a Google Cloud Platform (GCP) using the official Linux package. You should customize it to accommodate your needs. + + + note To deploy production-ready GitLab on +Google Kubernetes Engine, +you can follow Google Cloud Platform’s +Click to Deploy steps +It’s an alternative to using a GCP VM, and uses +the Cloud native GitLab Helm chart. + + +Prerequisites + + +There are two prerequisites to install GitLab on GCP: + + + You must have a Google account. + You must sign up for the GCP program. If this is your first time, Google +gives you $300 credit for free to consume over a 60-day period. + + +After you have performed those two steps, you can create a VM. + +Creating the VM + + +To deploy GitLab on GCP you must create a virtual machine: + + + Go to https://console.cloud.google.com/compute/instances and sign in with your Google credentials. + + Select Create + + + + + On the next page, you can select the type of VM as well as the +estimated costs. Provide the name of the instance, desired data center, and machine type. +Note our hardware requirements for different user base sizes. + + + + + To select the size, type, and desired operating system, +select Change under Boot disk. select Select when finished. + + As a last step allow HTTP and HTTPS traffic, then select Create. The process finishes in a few seconds. + + +Installing GitLab + + +After a few seconds, the instance is created and available to sign in. The next step is to install GitLab onto the instance. + + + + + Make a note of the external IP address of the instance, as you will need that in a later step. + + Select SSH under the connect column to connect to the instance. + + A new window appears, with you logged into the instance. + + + + + Next, follow the instructions for installing GitLab for the operating system you choose, at https://about.gitlab.com/install/. You can use the external IP address you noted before as the hostname. + + + Congratulations! GitLab is now installed and you can access it via your browser. To finish installation, open the URL in your browser and provide the initial administrator password. The username for this account is root. + + + + + +Next steps + + +These are the most important next steps to take after you installed GitLab for +the first time. + +Assigning a static IP + + +By default, Google assigns an ephemeral IP to your instance. It is strongly +recommended to assign a static IP if you are using GitLab in production +and use a domain name as shown below. + +Read Google’s documentation on how to promote an ephemeral IP address. + +Using a domain name + + +Assuming you have a domain name in your possession and you have correctly +set up DNS to point to the static IP you configured in the previous step, +here’s how you configure GitLab to be aware of the change: + + + + SSH into the VM. You can select SSH in the Google console +and a new window pops up. + + + + In the future you might want to set up connecting with an SSH key +instead. + + + Edit the configuration file of the Linux package using your favorite text editor: + + +sudo vim /etc/gitlab/gitlab.rb + + + + Set the external_url value to the domain name you wish GitLab to have +without https: + + +external_url 'http://gitlab.example.com' + + + We will set up HTTPS in the next step, no need to do this now. + + + Reconfigure GitLab for the changes to take effect: + + +sudo gitlab-ctl reconfigure + + + + You can now visit GitLab using the domain name. + + + +Configuring HTTPS with the domain name + + +Although not needed, it’s strongly recommended to secure GitLab with a +TLS certificate. + +Configuring the email SMTP settings + + +You must configure the email SMTP settings correctly otherwise GitLab cannot send notification emails, like comments, and password changes. +Check the Linux package documentation how to do so. + +Further reading + + +GitLab can be configured to authenticate with other OAuth providers, like LDAP, +SAML, and Kerberos. Here are some documents you might be interested in reading: + + + Linux package documentation + Integration documentation + GitLab Pages configuration + GitLab container registry configuration + + + + + +2. Get started with GitLab CI/CD + + + +Get started with GitLab CI/CD + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +CI/CD is a continuous method of software development, where you continuously build, +test, deploy, and monitor iterative code changes. + +This iterative process helps reduce the chance that you develop new code based on +buggy or failed previous versions. GitLab CI/CD can catch bugs early in the development cycle, +and help ensure that all the code deployed to production complies with your established code standards. + +Common terms + + +If you’re new to GitLab CI/CD, start by reviewing some of the commonly used terms. + +The .gitlab-ci.yml file + + +To use GitLab CI/CD, you start with a .gitlab-ci.yml file at the root of your project +which contains the configuration for your CI/CD pipeline. This file follows the YAML format +and has its own syntax. + +You can name this file anything you want, but .gitlab-ci.yml is the most common name. + +Get started: + + + +Create your first .gitlab-ci.yml file. + View all the possible keywords that you can use in the .gitlab-ci.yml file in +the CI/CD YAML syntax reference. + Use the pipeline editor to edit or visualize +your CI/CD configuration. + + +Runners + + +Runners are the agents that run your jobs. These agents can run on physical machines or virtual instances. +In your .gitlab-ci.yml file, you can specify a container image you want to use when running the job. +The runner loads the image, clones your project and runs the job either locally or in the container. + +If you use GitLab.com, runners on Linux, Windows, and macOS are already available for use. And you can register your own +runners on GitLab.com if you’d like. + +If you don’t use GitLab.com, you can: + + + Register runners or use runners already registered for your self-managed instance. + Create a runner on your local machine. + + +Get started: + + + +Create a runner on your local machine. + +Learn more about runners. + + +Pipelines + + +Pipelines are made up of jobs and stages: + + + +Jobs define what you want to do. For example, test code changes, or deploy +to a staging environment. + Jobs are grouped into stages. Each stage contains at least one job. +Typical stages might be build, test, and deploy. + + +Get started: + + + +Learn more about pipelines. + + +CI/CD variables + + +CI/CD variables help you customize jobs by making values defined elsewhere accessible to jobs. +They can be hard-coded in your .gitlab-ci.yml file, project settings, or dynamically generated. + +Get started: + + + +Learn more about CI/CD variables. + +Learn about dynamically generated predefined variables. + + +CI/CD components + + +A CI/CD component is a reusable single pipeline configuration unit. Use them to compose an entire pipeline configuration or a small part of a larger pipeline. + +Get started: + + + +Learn more about CI/CD components. + + +Videos + + + + + GitLab CI/CD demo. + + GitLab CI/CD and the Web IDE. + Webcast: Mastering continuous software development. + + +Related topics + + + + +Five teams that made the switch to GitLab CI/CD. + +Make the case for CI/CD in your organization. + Learn how Verizon reduced rebuilds from 30 days to under 8 hours with GitLab. + Use the GitLab Workflow VS Code extension to +validate your configuration +and view your pipeline status. + + + +" +is it possible to link mrs between two projects?,,"1. Merge requests + + + +Merge requests + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +A merge request (MR) is a proposal to incorporate changes from a source branch to a target branch. + +When you open a merge request, you can visualize and collaborate on the changes before merge. +Merge requests include: + + + A description of the request. + Code changes and inline code reviews. + Information about CI/CD pipelines. + A comment section for discussion threads. + The list of commits. + + +Create a merge request + + +Learn the various ways to create a merge request. + +Use merge request templates + + +When you create a merge request, GitLab checks for the existence of a +description template to add data to your merge request. +GitLab checks these locations in order from 1 to 5, and applies the first template +found to your merge request: + + + + + Name + Project UIsetting + Groupdefault.md + + Instancedefault.md + + Projectdefault.md + + No template + + + + + Standard commit message + 1 + 2 + 3 + 4 + 5 + + + Commit message with an issue closing pattern like Closes #1234 + + 1 + 2 + 3 + 4 + 5 * + + + Branch name prefixed with an issue ID, like 1234-example + + 1 * + 2 * + 3 * + 4 * + 5 * + + + + + + note Items marked with an asterisk (*) also append an issue closing pattern. + + +View merge requests + + +You can view merge requests for your project, group, or yourself. + +For a project + + +To view all merge requests for a project: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests. + + +Or, to use a keyboard shortcut, press g + m. + +For all projects in a group + + +To view merge requests for all projects in a group: + + + On the left sidebar, select Search or go to and find your group. + Select Code > Merge requests. + + +If your group contains subgroups, this view also displays merge requests from the subgroup projects. + +Assigned to you + + +To view all merge requests assigned to you: + + + On the left sidebar, select Search or go to. + From the dropdown list, select Merge requests assigned to me. + + +or: + + + To use a keyboard shortcut, press Shift + m. + + +or: + + + On the left sidebar, select Code > Merge requests ( ). + From the dropdown list, select Assigned. + + +Filter the list of merge requests + + + +History + + + + + Filtering by source-branch introduced in GitLab 16.6. + Filtering by merged-by introduced in GitLab 16.9. Available only when the feature flag mr_merge_user_filter is enabled. + + + + + + +To filter the list of merge requests: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests. + Above the list of merge requests, select Search or filter results. + From the dropdown list, select the attribute you wish to filter by. Some examples: + + +By environment or deployment date. + +ID: Enter filter #30 to return only merge request 30. + User filters: Type (or select from the dropdown list) any of these filters to display a list of users: + + +Approved-By, for merge requests already approved by a user. Premium and Ultimate only. + +Approver, for merge requests that this user is eligible to approve. +(For more information, read about Code owners). Premium and Ultimate only. + +Merged-By, for merge requests merged by this user. + +Reviewer, for merge requests reviewed by this user. + + + + + Select or type the operator to use for filtering the attribute. The following operators are +available: + + +=: Is + +!=: Is not + + + Enter the text to filter the attribute by. +You can filter some attributes by None or Any. + Repeat this process to filter by multiple attributes. Multiple attributes are joined by a logical +AND. + Select a Sort direction, either for descending order, +or for ascending order. + + +By environment or deployment date + + +To filter merge requests by deployment data, such as the environment or a date, +you can type (or select from the dropdown list) the following: + + + Environment + Deployed-before + Deployed-after + + + + note Projects using a fast-forward merge method +do not return results, as this method does not create a merge commit. + + +When filtering by an environment, a dropdown list presents all environments that +you can choose from. + +When filtering by Deployed-before or Deployed-after: + + + The date refers to when the deployment to an environment (triggered by the +merge commit) completed successfully. + You must enter the deploy date manually. + Deploy dates use the format YYYY-MM-DD, and must be wrapped in double quotes ("") +if you want to specify both a date and time (""YYYY-MM-DD HH:MM""). + + +Add changes to a merge request + + +If you have permission to add changes to a merge request, you can add your changes +to an existing merge request in several ways, depending on the complexity of your +change and whether you need access to a development environment: + + + +Edit changes in the Web IDE in your browser with the +. keyboard shortcut. Use this +browser-based method to edit multiple files, or if you are not comfortable with Git commands. +You cannot run tests from the Web IDE. + +Edit changes in Gitpod, if you +need a fully-featured environment to both edit files, and run tests afterward. Gitpod +supports running the GitLab Development Kit (GDK). +To use Gitpod, you must enable Gitpod in your user account. + +Push changes from the command line, if you are +familiar with Git and the command line. + + +Assign a user to a merge request + + +To assign the merge request to a user, use the /assign @user +quick action in a text area in +a merge request, or: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests and find your merge request. + On the right sidebar, expand the right sidebar and locate the Assignees section. + Select Edit. + Search for the user you want to assign, and select the user. + + +The merge request is added to the user’s assigned merge request list. + +Assign multiple users + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + Moved to GitLab Premium in 13.9. + + + + + + +GitLab enables multiple assignees for merge requests, if multiple people are +accountable for it: + + + +To assign multiple assignees to a merge request, use the /assign @user +quick action in a text area, or: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests and find your merge request. + On the right sidebar, expand the right sidebar and locate the Assignees section. + Select Edit and, from the dropdown list, select all users you want +to assign the merge request to. + + +To remove an assignee, clear the user from the same dropdown list. + +Close a merge request + + +If you decide to permanently stop work on a merge request, +GitLab recommends you close the merge request rather than +delete it. The author and assignees of a merge request, and users with +Developer, Maintainer, or Owner roles in a project +can close merge requests in the project: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests and find your merge request. + Scroll to the comment box at the bottom of the page. + Following the comment box, select Close merge request. + + +GitLab closes the merge request, but preserves records of the merge request, +its comments, and any associated pipelines. + +Delete a merge request + + +GitLab recommends you close, rather than delete, merge requests. +You cannot undo the deletion of a merge request. + +Prerequisites: + + + You must have the Owner role for the project. + + +To delete a merge request: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests and find the merge request you want to delete. + Select Edit. + Scroll to the bottom of the page, and select Delete merge request. + + +Delete the source branch on merge + + +You can delete the source branch for a merge request: + + + When you create a merge request, by selecting Delete source branch when merge request accepted. + When you merge a merge request, if you have the Maintainer role, by selecting Delete source branch. + + +An administrator can make this option the default in the project’s settings. + +Update merge requests when target branch merges + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + + +History + + + + + Chained merge requests changed to automatically rebase on the new target branch in GitLab 16.9. + Chained merge requests no longer automatically rebase on the new target branch in GitLab 16.10 with a flag named :rebase_when_retargetting_mrs. Disabled by default. + + + + + + + + On self-managed GitLab, by default this feature is not available. To make it +available, an administrator can enable the feature flag named :rebase_when_retargetting_mrs. +On GitLab.com and GitLab Dedicated, this feature is not available. + + +Merge requests are often chained together, with one merge request depending on +the code added or changed in another merge request. To support keeping individual +merge requests small, GitLab can update up to four open merge requests when their +target branch merges into main. For example: + + + +Merge request 1: merge feature-alpha into main. + +Merge request 2: merge feature-beta into feature-alpha. + + +If these merge requests are open at the same time, and merge request 1 (feature-alpha) +merges into main, GitLab updates the destination of merge request 2 from feature-alpha +to main. + +Merge requests with interconnected content updates are usually handled in one of these ways: + + + Merge request 1 is merged into main first. Merge request 2 is then +retargeted to main. + Merge request 2 is merged into feature-alpha. The updated merge request 1, which +now contains the contents of feature-alpha and feature-beta, is merged into main. + + +This feature works only when a merge request is merged. Selecting Remove source branch +after merging does not retarget open merge requests. This improvement is +proposed as a follow-up. + +Move sidebar actions + + + + + +History + + + + + +Introduced in GitLab 14.10 with a flag named moved_mr_sidebar. Enabled by default. + +Changed to also move actions on issues, incidents, and epics in GitLab 16.0. + + + + + + +When this feature flag is enabled, in the upper-right corner, +Merge request actions ( ) contains the following actions: + + + The notifications toggle + Mark merge request as ready or draft + + Close merge request + Lock discussion + Copy reference + + +In GitLab 16.0 and later, similar action menus are available on issues, incidents, and epics. + +When this feature flag is disabled, these actions are in the right sidebar. + +Merge request workflows + + +For a software developer working in a team: + + + You check out a new branch, and submit your changes through a merge request. + You gather feedback from your team. + You work on the implementation optimizing code with Code Quality reports. + You verify your changes with Unit test reports in GitLab CI/CD. + You avoid using dependencies whose license is not compatible with your project with License approval policies. + You request the approval from your manager. + Your manager: + + Pushes a commit with their final review. + +Approves the merge request. + Sets it to auto-merge (formerly Merge when pipeline succeeds). + + + Your changes get deployed to production with manual jobs for GitLab CI/CD. + Your implementations were successfully shipped to your customer. + + +For a web developer writing a webpage for your company’s website: + + + You check out a new branch and submit a new page through a merge request. + You gather feedback from your reviewers. + You preview your changes with Review Apps. + You request your web designers for their implementation. + You request the approval from your manager. + Once approved, your merge request is squashed and merged, and deployed to staging with GitLab Pages. + Your production team cherry-picks the merge commit into production. + + +Filter activity in a merge request + + + +History + + + + + +Introduced in GitLab 15.11 with a flag named mr_activity_filters. Disabled by default. + +Enabled on GitLab.com in GitLab 16.0. + +Enabled on self-managed in GitLab 16.3 by default. + +Generally available in GitLab 16.5. Feature flag mr_activity_filters removed. + Filtering bot comments introduced in GitLab 16.9. + + + + + + +To understand the history of a merge request, filter its activity feed to show you +only the items that are relevant to you. + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests. + Select a merge request. + Scroll to Activity. + On the right side of the page, select Activity filter to show the filter options. +If you’ve selected filter options previously, this field shows a summary of your +choices, like Activity + 5 more. + + Select the types of activity you want to see. Options include: + + + Assignees & Reviewers + Approvals + Comments (from bots) + Comments (from users) + Commits & branches + Edits + Labels + Lock status + Mentions + Merge request status + Tracking + + + Optional. Select Sort ( ) to reverse the sort order. + + +Your selection persists across all merge requests. You can also change the +sort order by clicking the sort button on the right. + +Resolve a thread + + +When you want to finish a conversation in a merge request, +resolve a thread. + +The number of unresolved threads is shown in the top right corner of a +merge request, like this: 7 unresolved threads. + +Move all unresolved threads in a merge request to an issue + + +If you have multiple unresolved threads in a merge request, you can +create an issue to resolve them separately: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests and find your merge request. + In the merge request, in the top right, find the Unresolved threads +dropdown list, and select Thread options ( ). + Select Resolve all with new issue. + Fill out the fields in the new issue, and select Create issue. + + +All threads are marked as resolved, and a link is added from the merge request to +the newly created issue. + +Move one unresolved thread in a merge request to an issue + + +If you have one specific unresolved thread in a merge request, you can +create an issue to resolve it separately: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests and find your merge request. + In the merge request, find the thread you want to move. + Below the last reply to the thread, next to Resolve thread, select +Create issue to resolve thread ( ). + Fill out the fields in the new issue, and select Create issue. + + +The thread is marked as resolved, and a link is added from the merge request to +the newly created issue. + +Prevent merge unless all threads are resolved + + +You can prevent merge requests from being merged until all threads are +resolved. When this setting is enabled, the Unresolved threads counter in a merge request +is shown in orange when at least one thread remains unresolved. + + + On the left sidebar, select Search or go to and find your project. + Select Settings > Merge requests. + In the Merge checks section, select the All threads must be resolved checkbox. + Select Save changes. + + +Automatically resolve threads in a merge request when they become outdated + + +You can set merge requests to automatically resolve threads when lines are modified +with a new push. + + + On the left sidebar, select Search or go to and find your project. + Select Settings > Merge requests. + In the Merge options section, select +Automatically resolve merge request diff threads when they become outdated. + Select Save changes. + + +Threads are now resolved if a push makes a diff section outdated. +Threads on lines that don’t change and top-level resolvable threads are not resolved. + +Move notifications and to-dos + + +DETAILs: +Tier: Free, Premium, Ultimate +Offering: Self-managed + + +History + + + + + +Introduced in GitLab 16.5 with a flag named notifications_todos_buttons. Disabled by default. + +Issues, incidents, and epics also updated. + + + + + + + + On self-managed GitLab, by default this feature is not available. To make it available, an administrator can enable the feature flag named notifications_todos_buttons. +On GitLab.com and GitLab Dedicated, this feature is not available. + + +When this feature flag is enabled, the notifications and to-do item buttons are moved to the upper right corner of the page. + + + On merge requests, these buttons are located to the far right of the tabs. + On issues, incidents, and epics, these buttons are located at the top of the right sidebar. + + +Related topics + + + + Create a merge request + Review a merge request + Authorization for merge requests + Testing and reports + GitLab keyboard shortcuts + Comments and threads + Suggest code changes + CI/CD pipelines + +Push options for merge requests + + + +2. Crosslinking issues + + + +Crosslinking issues + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +There are several ways to mention an issue or make issues appear in each other’s +Linked issues section. + +For more information on GitLab Issues, read the issues documentation. + +From commit messages + + +Every time you mention an issue in your commit message, you’re creating +a relationship between the two stages of the development workflow: the +issue itself and the first commit related to that issue. + +If the issue and the code you’re committing are both in the same project, +add #xxx to the commit message, where xxx is the issue number. + +git commit -m ""this is my commit message. Ref #xxx"" + + +Since commit messages cannot usually begin with a # character, you may use +the alternative GL-xxx notation as well: + +git commit -m ""GL-xxx: this is my commit message"" + + +If they are in different projects, but in the same group, +add projectname#xxx to the commit message. + +git commit -m ""this is my commit message. Ref projectname#xxx"" + + +If they are not in the same group, you can add the full URL to the issue +(https://gitlab.com///-/issues/). + +git commit -m ""this is my commit message. Related to https://gitlab.com///-/issues/"" + + +Of course, you can replace gitlab.com with the URL of your own GitLab instance. + +Linking your first commit to your issue is relevant +for tracking your process with GitLab Value Stream Analytics. +It measures the time taken for planning the implementation of that issue, +which is the time between creating an issue and making the first commit. + +From linked issues + + +Mentioning linked issues in merge requests and other issues helps your team members and +collaborators know that there are opened issues regarding the same topic. + +You do that as explained above, when mentioning an issue from a commit message. + +When mentioning issue #111 in issue #222, issue #111 also displays a notification +in its tracker. That is, you only need to mention the relationship once for it to +display in both issues. The same is valid when mentioning issues in merge requests. + + + +From merge requests + + +Mentioning issues in merge request comments works exactly the same way as +they do for linked issues. + +When you mention an issue in a merge request description, it +links the issue and merge request together. Additionally, +you can also set an issue to close automatically +as soon as the merge request is merged. + + + +From branch names + + +When you create a branch in the same project as an issue and start the branch name with the issue +number, followed by a hyphen, the issue and MR you create are linked. +For more information, see +Prefix branch names with issue numbers. + + +" +how to standardize cicd using gitlab?,,"1. Get started with GitLab CI/CD + + + +Get started with GitLab CI/CD + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +CI/CD is a continuous method of software development, where you continuously build, +test, deploy, and monitor iterative code changes. + +This iterative process helps reduce the chance that you develop new code based on +buggy or failed previous versions. GitLab CI/CD can catch bugs early in the development cycle, +and help ensure that all the code deployed to production complies with your established code standards. + +Common terms + + +If you’re new to GitLab CI/CD, start by reviewing some of the commonly used terms. + +The .gitlab-ci.yml file + + +To use GitLab CI/CD, you start with a .gitlab-ci.yml file at the root of your project +which contains the configuration for your CI/CD pipeline. This file follows the YAML format +and has its own syntax. + +You can name this file anything you want, but .gitlab-ci.yml is the most common name. + +Get started: + + + +Create your first .gitlab-ci.yml file. + View all the possible keywords that you can use in the .gitlab-ci.yml file in +the CI/CD YAML syntax reference. + Use the pipeline editor to edit or visualize +your CI/CD configuration. + + +Runners + + +Runners are the agents that run your jobs. These agents can run on physical machines or virtual instances. +In your .gitlab-ci.yml file, you can specify a container image you want to use when running the job. +The runner loads the image, clones your project and runs the job either locally or in the container. + +If you use GitLab.com, runners on Linux, Windows, and macOS are already available for use. And you can register your own +runners on GitLab.com if you’d like. + +If you don’t use GitLab.com, you can: + + + Register runners or use runners already registered for your self-managed instance. + Create a runner on your local machine. + + +Get started: + + + +Create a runner on your local machine. + +Learn more about runners. + + +Pipelines + + +Pipelines are made up of jobs and stages: + + + +Jobs define what you want to do. For example, test code changes, or deploy +to a staging environment. + Jobs are grouped into stages. Each stage contains at least one job. +Typical stages might be build, test, and deploy. + + +Get started: + + + +Learn more about pipelines. + + +CI/CD variables + + +CI/CD variables help you customize jobs by making values defined elsewhere accessible to jobs. +They can be hard-coded in your .gitlab-ci.yml file, project settings, or dynamically generated. + +Get started: + + + +Learn more about CI/CD variables. + +Learn about dynamically generated predefined variables. + + +CI/CD components + + +A CI/CD component is a reusable single pipeline configuration unit. Use them to compose an entire pipeline configuration or a small part of a larger pipeline. + +Get started: + + + +Learn more about CI/CD components. + + +Videos + + + + + GitLab CI/CD demo. + + GitLab CI/CD and the Web IDE. + Webcast: Mastering continuous software development. + + +Related topics + + + + +Five teams that made the switch to GitLab CI/CD. + +Make the case for CI/CD in your organization. + Learn how Verizon reduced rebuilds from 30 days to under 8 hours with GitLab. + Use the GitLab Workflow VS Code extension to +validate your configuration +and view your pipeline status. + + + +2. Tutorial: Create and run your first GitLab CI/CD pipeline + + + +Tutorial: Create and run your first GitLab CI/CD pipeline + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +This tutorial shows you how to configure and run your first CI/CD pipeline in GitLab. + +If you are already familiar with basic CI/CD concepts, you can learn about +common keywords in Tutorial: Create a complex pipeline. + +Prerequisites + + +Before you start, make sure you have: + + + A project in GitLab that you would like to use CI/CD for. + The Maintainer or Owner role for the project. + + +If you don’t have a project, you can create a public project for free on https://gitlab.com. + +Steps + + +To create and run your first pipeline: + + + + Ensure you have runners available to run your jobs. + + If you’re using GitLab.com, you can skip this step. GitLab.com provides instance runners for you. + + + Create a .gitlab-ci.yml file +at the root of your repository. This file is where you define the CI/CD jobs. + + + +When you commit the file to your repository, the runner runs your jobs. +The job results are displayed in a pipeline. + +Ensure you have runners available + + +In GitLab, runners are agents that run your CI/CD jobs. + +To view available runners: + + + Go to Settings > CI/CD and expand Runners. + + +As long as you have at least one runner that’s active, with a green circle next to it, +you have a runner available to process your jobs. + +If you don’t have a runner + + +If you don’t have a runner: + + + +Install GitLab Runner on your local machine. + +Register the runner for your project. +Choose the shell executor. + + +When your CI/CD jobs run, in a later step, they will run on your local machine. + +Create a .gitlab-ci.yml file + + +Now create a .gitlab-ci.yml file. It is a YAML file where +you specify instructions for GitLab CI/CD. + +In this file, you define: + + + The structure and order of jobs that the runner should execute. + The decisions the runner should make when specific conditions are encountered. + + +To create a .gitlab-ci.yml file: + + + On the left sidebar, select Code > Repository. + + Above the file list, select the branch you want to commit to. +If you’re not sure, leave master or main. +Then select the plus icon ( ) and New file: + + + + + For the Filename, type .gitlab-ci.yml and in the larger window, +paste this sample code: + + +build-job: + stage: build + script: + - echo ""Hello, $GITLAB_USER_LOGIN!"" + +test-job1: + stage: test + script: + - echo ""This job tests something"" + +test-job2: + stage: test + script: + - echo ""This job tests something, but takes more time than test-job1."" + - echo ""After the echo commands complete, it runs the sleep command for 20 seconds"" + - echo ""which simulates a test that runs 20 seconds longer than test-job1"" + - sleep 20 + +deploy-prod: + stage: deploy + script: + - echo ""This job deploys something from the $CI_COMMIT_BRANCH branch."" + environment: production + + + This example shows four jobs: build-job, test-job1, test-job2, and deploy-prod. +The comments listed in the echo commands are displayed in the UI when you view the jobs. +The values for the predefined variables +$GITLAB_USER_LOGIN and $CI_COMMIT_BRANCH are populated when the jobs run. + + Select Commit changes. + + +The pipeline starts and runs the jobs you defined in the .gitlab-ci.yml file. + +View the status of your pipeline and jobs + + +Now take a look at your pipeline and the jobs within. + + + + Go to Build > Pipelines. A pipeline with three stages should be displayed: + + + + + View a visual representation of your pipeline by selecting the pipeline ID: + + + + + View details of a job by selecting the job name. For example, deploy-prod: + + + + + +You have successfully created your first CI/CD pipeline in GitLab. Congratulations! + +Now you can get started customizing your .gitlab-ci.yml and defining more advanced jobs. + + +.gitlab-ci.yml tips + + +Here are some tips to get started working with the .gitlab-ci.yml file. + +For the complete .gitlab-ci.yml syntax, see the full CI/CD YAML syntax reference. + + + Use the pipeline editor to edit your .gitlab-ci.yml file. + Each job contains a script section and belongs to a stage: + + +stage describes the sequential execution of jobs. +If there are runners available, jobs in a single stage run in parallel. + Use the needs keyword to run jobs out of stage order. +This creates a Directed Acyclic Graph (DAG). + + + You can set additional configuration to customize how your jobs and stages perform: + + Use the rules keyword to specify when to run or skip jobs. +The only and except legacy keywords are still supported, but can’t be used +with rules in the same job. + Keep information across jobs and stages persistent in a pipeline with cache +and artifacts. These keywords are ways to store +dependencies and job output, even when using ephemeral runners for each job. + Use the default keyword to specify additional +configurations that are applied to all jobs. This keyword is often used to define +before_script and after_script +sections that should run on every job. + + + + +Related topics + + + + Migrate from CircleCI + Migrate from Jenkins + + Watch First time GitLab & CI/CD. This includes a quick introduction to GitLab, the first steps with CI/CD, building a Go project, running tests, using the CI/CD pipeline editor, detecting secrets and security vulnerabilities and offers more exercises for asynchronous practice. + + Watch Intro to GitLab CI. This workshop uses the Web IDE to quickly get going with building source code using CI/CD, and run unit tests. + + + +" +i want to clone a github project to gitlab and keep both repositories in sync. how can i do that?,,"1. Import your project from GitHub to GitLab + + + +Import your project from GitHub to GitLab + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 15.8, GitLab no longer automatically creates namespaces or groups that don’t exist. GitLab also no longer falls back to using the user’s personal namespace if the namespace or group name is taken. + +Introduced in GitLab 15.10, you no longer need to add any users to the parent group in GitLab to successfully import the Require a pull request before merging - Allow specified actors to bypass required pull requests branch protection rule. + + + + + + +You can import your GitHub projects from either GitHub.com or GitHub Enterprise. Importing projects does not +migrate or import any types of groups or organizations from GitHub to GitLab. + +The namespace is a user or group in GitLab, such as gitlab.com/sidney-jones or +gitlab.com/customer-success. + +Using the GitLab UI, the GitHub importer always imports from the +github.com domain. If you are importing from a self-hosted GitHub Enterprise Server domain, use the +GitLab Import API GitHub endpoint. + +You can change the target namespace and target repository name before you import. + + +For an overview of the import process, see How to migrate from GitHub to GitLab including Actions. + +Prerequisites + + +To import projects from GitHub, you must enable the +GitHub import source. +If that import source is not enabled, ask your GitLab administrator to enable it. The GitHub import source is enabled +by default on GitLab.com. + +Permissions and roles + + + +History + + + + + Requirement for Maintainer role instead of Developer role introduced in GitLab 16.0 and backported to GitLab 15.11.1 and GitLab 15.10.5. + + + + + + +To use the GitHub importer, you must have: + + + Access to the GitHub project to import. + At least the Maintainer role on the destination GitLab group to import to. + + +Also, the organization the GitHub repository belongs to must not impose restrictions of a +third-party application access policy +on the GitLab instance you import to. + +Accounts for user contribution mapping + + +For user contribution mapping between GitHub and GitLab to work: + + + Each GitHub author and assignee in the repository must have a +public-facing email address. + The GitHub user’s email address must match their GitLab email address. + If a user’s email address in GitHub is set as their secondary email address in GitLab, they must confirm it. + + +GitHub Enterprise does not require a public email address, so you might have to add it to existing accounts. + +If the above requirements are not met, the importer can’t map the particular user’s contributions. In that case: + + + The project creator is set as the author and assignee of issues and merge requests. The project creator is usually the +user that initiated the import process. For some contributions that have a description or note such as pull requests, +issue, notes, the importer amends the text with details of who originally created the contribution. + Reviewers and approvals added on pull requests in GitHub cannot be imported. In this case, the importer creates comments +describing that non-existent users were added as reviewers and approvers. However, the actual reviewer status and +approval are not applied to the merge request in GitLab. + + +Known issues + + + + GitHub pull request comments (known as diff notes in GitLab) created before 2017 are imported in separate threads. +This occurs because of a limitation of the GitHub API that doesn’t include in_reply_to_id for comments before 2017. + Because of a known issue, Markdown attachments from +repositories on GitHub Enterprise Server instances aren’t imported. + Because of a known issue, when importing projects that used +GitHub auto-merge, the imported project in GitLab can have merge commits labeled “unverified” if the commit was signed with GitHub’s internal GPG key. + + +Import your GitHub repository into GitLab + + +Before you begin, ensure that any GitHub user you want to map to a GitLab user +has a GitLab email address that matches their +publicly visible email address +on GitHub. + +If a GitHub user’s public email address doesn’t match any GitLab user email +address, the user’s activity is associated with the user account that is +performing the import. + +You can import your GitHub repository by either: + + + Using GitHub OAuth + Using a GitHub Personal Access Token + Using the API + + +If importing from github.com you can use any method to import. Self-hosted GitHub Enterprise Server customers must use the API. + +Use GitHub OAuth + + +If you are importing to GitLab.com or to a self-managed GitLab that has GitHub OAuth configured, you can use GitHub OAuth to import your repository. + +This method has an advantage over using a Personal Access Token (PAT) +because the backend exchanges the access token with the appropriate permissions. + + + On the left sidebar, at the top, select Create new ( ) and New project/repository. + Select Import project and then GitHub. + Select Authorize with GitHub. + Proceed to selecting which repositories to import. + + +To use a different method to perform an import after previously performing +these steps, sign out of your GitLab account and sign in again. + +Use a GitHub Personal Access Token + + +To import your GitHub repository using a GitHub Personal Access Token: + + + Generate a GitHub Personal Access Token: + + Go to https://github.com/settings/tokens/new. + In the Note field, enter a token description. + Select the repo scope. + Optional. To import collaborators, select the read:org scope. + Select Generate token. + + + On the GitLab left sidebar, at the top, select Create new ( ) and New project/repository. + Select Import project and then GitHub. + Select Authorize with GitHub. + In the Personal Access Token field, paste the GitHub Personal Access Token. + Select Authenticate. + Proceed to selecting which repositories to import. + + +To use a different token to perform an import after previously performing +these steps, sign out of your GitLab account and sign in again, or revoke the +older token in GitHub. + +Use the API + + +The GitLab REST API can be used to import a GitHub repository. It has some advantages over using the GitLab UI: + + + Can be used to import GitHub repositories that you do not own if they are public. + It can be used to import from a GitHub Enterprise Server that is self-hosted. + Can be used to set the timeout_strategy option that is not available to the UI. + + +The REST API is limited to authenticating with GitLab Personal Access Tokens. + +To import your GitHub repository using the GitLab REST API: + + + Generate a GitHub Personal Access Token: + + Go to https://github.com/settings/tokens/new. + In the Note field, enter a token description. + Select the repo scope. + Optional. To import collaborators, select the read:org scope. + Select Generate token. + + + Use the GitLab REST API to import your GitHub repository. + + +Filter repositories list + + + +History + + + + + +Introduced in GitLab 16.0. + + + + + + +After you authorize access to your GitHub repositories, GitLab redirects you to the importer page and +your GitHub repositories are listed. + +Use one of the following tabs to filter the list of repositories: + + + +Owner (default): Filter the list to the repositories that you are the owner of. + +Collaborated: Filter the list to the repositories that you have contributed to. + +Organization: Filter the list to the repositories that belong to an organization you are a member of. + + +When the Organization tab is selected, you can further narrow down your search by selecting an available GitHub organization from a dropdown list. + +Select additional items to import + + + +History + + + + + +Introduced in GitLab 15.5. + Importing collaborators as an additional item was introduced in GitLab 16.0. + Feature flag github_import_extended_events was introduced in GitLab 16.8. Disabled by default. This flag improves the performance of imports but removes the Import issue and pull request events option. + Feature flag github_import_extended_events was enabled on GitLab.com and self-managed in GitLab 16.9. + + + + + + +To make imports as fast as possible, the following items aren’t imported from GitHub by default: + + + Issue and pull request events. For example, opened or closed, renamed, and labeled or unlabeled. + More than approximately 30,000 comments because of a limitation of the GitHub API. + Markdown attachments from repository comments, release posts, issue descriptions, and pull request descriptions. These can include +images, text, or binary attachments. If not imported, links in Markdown to attachments break after you remove the attachments from GitHub. + + +You can choose to import these items, but this could significantly increase import time. To import these items, select the appropriate fields in the UI: + + + +Import issue and pull request events. If the github_import_extended_events feature flag is enabled, this option is unavailable. + +Use alternative comments import method. If importing GitHub projects with more than approximately 30,000 comments across all issues and pull requests, you should enable this method because of a +limitation of the GitHub API. + +Import Markdown attachments. + +Import collaborators (selected by default). Leaving it selected might result in new users using a seat in the group or namespace, +and being granted permissions as high as project owner. Only direct collaborators are imported. +Outside collaborators are never imported. + + +Select which repositories to import + + + +History + + + + + Ability to cancel pending or active imports introduced in GitLab 15.7. + Ability to re-import projects introduced in GitLab 15.9. + + + + + + +By default, the proposed repository namespaces match the names as they exist in GitHub, but based +on your permissions, you can choose to edit these names before you proceed to import any of them. + +To select which repositories to import, next to any number of repositories select Import or +select Import all repositories. + +Additionally, you can filter projects by name. If a filter is applied, Import all repositories +only imports matched repositories. + +The Status column shows the import status of each repository. You can choose to keep the page +open and watch updates in real time or you can return to it later. + +To cancel imports that are pending or in progress, next to the imported project, select Cancel. +If the import has already started, the imported files are kept. + +To open an repository in GitLab URL after it has been imported, select its GitLab path. + +Completed imports can be re-imported by selecting Re-import and specifying new name. This creates a new copy of the source project. + + + +Check status of imports + + + +History + + + + + Details of partially completed imports with a list of entities that failed to import introduced in GitLab 16.1. + + + + + + +After imports are completed, they can be in one of three states: + + + +Complete: GitLab imported all repository entities. + +Partially completed: GitLab failed to import some repository entities. + +Failed: GitLab aborted the import after a critical error occurred. + + +Expand Details to see a list of repository entities that failed to import. + +Mirror a repository and share pipeline status + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +Depending on your GitLab tier, repository mirroring can be set up to keep +your imported repository in sync with its GitHub copy. + +Additionally, you can configure GitLab to send pipeline status updates back to GitHub with the +GitHub Project Integration. + +If you import your project using CI/CD for external repository, then both +of the above are automatically configured. + + + note Mirroring does not sync any new or updated pull requests from your GitHub project. + + +Improve the speed of imports on self-managed instances + + +Administrator access on the GitLab server is required for these steps. + +Increase the number of Sidekiq workers + + +For large projects it may take a while to import all data. To reduce the time necessary, you can increase the number of +Sidekiq workers that process the following queues: + + + github_importer + github_importer_advance_stage + + +For an optimal experience, it’s recommended having at least 4 Sidekiq processes (each running a number of threads equal +to the number of CPU cores) that only process these queues. It’s also recommended that these processes run on separate +servers. For 4 servers with 8 cores this means you can import up to 32 objects (for example, issues) in parallel. + +Reducing the time spent in cloning a repository can be done by increasing network throughput, CPU capacity, and disk +performance (by using high performance SSDs, for example) of the disks that store the Git repositories (for your GitLab instance). +Increasing the number of Sidekiq workers does not reduce the time spent cloning repositories. + +Enable GitHub OAuth using a GitHub Enterprise Cloud OAuth App + + +If you belong to a GitHub Enterprise Cloud organization you can configure your self-managed GitLab instance to obtain a higher GitHub API rate limit. + +GitHub API requests are usually subject to a rate limit of 5,000 requests per hour. Using the steps below, you obtain a higher 15,000 requests per hour rate limit, resulting in a faster overall import time. + +Prerequisites: + + + You have access to a +GitHub Enterprise Cloud organization. + GitLab is configured to enable GitHub OAuth. + + +To enable a higher rate limit: + + + +Create an OAuth app in GitHub. Ensure that the OAuth app is owned by the Enterprise Cloud Organization, not your personal GitHub account. + Perform the project import using GitHub OAuth. + Optional. By default, sign-in is enabled for all configured OAuth providers. +If you want to enable GitHub OAuth for imports but you want to +prevent the ability for users to sign in to your GitLab instance with GitHub, +you can +disable sign-in with GitHub. + + +Imported data + + +The following items of a project are imported: + + + Repository description. + Git repository data. + All project branches. + All branches of forks of the project related to open pull requests, but not closed pull requests. Branches from forks are imported with a naming scheme similar to GH-SHA-username/pull-request-number/fork-name/branch. + Branch protection rules. Introduced in GitLab 15.4. + Collaborators (members). Introduced in GitLab 15.10. From GitLab 16.0, can +be imported as an additional item. + Issues. + Pull requests. + Wiki pages. + Milestones. + Labels. + Release notes content. + Attachments for: + + Release notes. Introduced in GitLab 15.4. + Comments. Introduced in GitLab 15.5. + Issue description. Introduced in GitLab 15.5. + Pull Request description. Introduced in GitLab 15.5. + + + All attachment imports are disabled by default behind +github_importer_attachments_import feature flag. From GitLab 15.5, can +be imported as an additional item. The feature flag was removed. + + Pull request review comments. + Regular issue and pull request comments. + +Git Large File Storage (LFS) Objects. + Pull request reviews. + Pull request assigned reviewers. Introduced in GitLab 15.6. + Pull request “merged by” information. + Pull request comments replies in discussions. Introduced in +GitLab 14.5. + Pull request review comments suggestions. Introduced in GitLab 14.7. + Issue events and pull requests events. Introduced in GitLab 15.4 +with github_importer_issue_events_import feature flag disabled by default. +From GitLab 15.5, can be imported as an additional item. The feature flag was +removed. + + +References to pull requests and issues are preserved. Each imported repository maintains visibility level unless that +visibility level is restricted, in which case it +defaults to the default project visibility. + +Branch protection rules and project settings + + +When they are imported, supported GitHub branch protection rules are mapped to either: + + + GitLab branch protection rules. + Project-wide GitLab settings. + + + + + + GitHub rule + GitLab rule + Introduced in + + + + + +Require conversation resolution before merging for the project’s default branch + +All threads must be resolved project setting + + GitLab 15.5 + + + Require a pull request before merging + +No one option in the Allowed to push and merge list of branch protection settings + + GitLab 15.5 + + + +Require signed commits for the project’s default branch + +Reject unsigned commits GitLab push rule + + GitLab 15.5 + + + Allow force pushes - Everyone + +Allowed to force push branch protection setting + + GitLab 15.6 + + + Require a pull request before merging - Require review from Code Owners + +Require approval from code owners branch protection setting + + GitLab 15.6 + + + Require a pull request before merging - Allow specified actors to bypass required pull requests + List of users in the Allowed to push and merge list of branch protection settings. Without a Premium subscription, the list of users that are allowed to push and merge is limited to roles. + GitLab 15.8 + + + + +Mapping GitHub rule Require status checks to pass before merging to +external status checks was considered in issue +370948. However, this rule is not imported during project import +into GitLab due to technical difficulties. You can still create external status checks +manually. + +Collaborators (members) + + + +History + + + + + +Introduced in GitLab 15.10. + + + + + + +These GitHub collaborator roles are mapped to these GitLab member roles: + + + + + GitHub role + Mapped GitLab role + + + + + Read + Guest + + + Triage + Reporter + + + Write + Developer + + + Maintain + Maintainer + + + Admin + Owner + + + + +GitHub Enterprise Cloud has +custom repository roles. +These roles aren’t supported and cause partially completed imports. + +To import GitHub collaborators, you must have at least the Write role on the GitHub project. Otherwise collaborators import is skipped. + +Import from GitHub Enterprise on an internal network + + +If your GitHub Enterprise instance is on a internal network that is inaccessible to the internet, you can use a reverse proxy +to allow GitLab.com to access the instance. + +The proxy needs to: + + + Forward requests to the GitHub Enterprise instance. + Convert to the public proxy hostname all occurrences of the internal hostname in: + + The API response body. + The API response Link header. + + + + +GitHub API uses the Link header for pagination. + +After configuring the proxy, test it by making API requests. Below there are some examples of commands to test the API: + +curl --header ""Authorization: Bearer "" ""https://{PROXY_HOSTNAME}/user"" + +### URLs in the response body should use the proxy hostname + +{ + ""login"": ""example_username"", + ""id"": 1, + ""url"": ""https://{PROXY_HOSTNAME}/users/example_username"", + ""html_url"": ""https://{PROXY_HOSTNAME}/example_username"", + ""followers_url"": ""https://{PROXY_HOSTNAME}/api/v3/users/example_username/followers"", + ... + ""created_at"": ""2014-02-11T17:03:25Z"", + ""updated_at"": ""2022-10-18T14:36:27Z"" +} + + +curl --head --header ""Authorization: Bearer "" ""https://{PROXY_DOMAIN}/api/v3/repos/{repository_path}/pulls?states=all&sort=created&direction=asc"" + +### Link header should use the proxy hostname + +HTTP/1.1 200 OK +Date: Tue, 18 Oct 2022 21:42:55 GMT +Server: GitHub.com +Content-Type: application/json; charset=utf-8 +Cache-Control: private, max-age=60, s-maxage=60 +... +X-OAuth-Scopes: repo +X-Accepted-OAuth-Scopes: +github-authentication-token-expiration: 2022-11-22 18:13:46 UTC +X-GitHub-Media-Type: github.v3; format=json +X-RateLimit-Limit: 5000 +X-RateLimit-Remaining: 4997 +X-RateLimit-Reset: 1666132381 +X-RateLimit-Used: 3 +X-RateLimit-Resource: core +Link: ; rel=""next"", ; rel=""last"" + + +Also test that cloning the repository using the proxy does not fail: + +git clone -c http.extraHeader=""Authorization: basic "" --mirror https://{PROXY_DOMAIN}/{REPOSITORY_PATH}.git + + +Sample reverse proxy configuration + + +The following configuration is an example on how to configure Apache HTTP Server as a reverse proxy + + + caution For simplicity, the snippet does not have configuration to encrypt the connection between the client and the proxy. However, for security reasons you should include that +configuration. See sample Apache TLS/SSL configuration. + + +# Required modules +LoadModule filter_module lib/httpd/modules/mod_filter.so +LoadModule reflector_module lib/httpd/modules/mod_reflector.so +LoadModule substitute_module lib/httpd/modules/mod_substitute.so +LoadModule deflate_module lib/httpd/modules/mod_deflate.so +LoadModule headers_module lib/httpd/modules/mod_headers.so +LoadModule proxy_module lib/httpd/modules/mod_proxy.so +LoadModule proxy_connect_module lib/httpd/modules/mod_proxy_connect.so +LoadModule proxy_http_module lib/httpd/modules/mod_proxy_http.so +LoadModule ssl_module lib/httpd/modules/mod_ssl.so + + + ServerName GITHUB_ENTERPRISE_HOSTNAME + + # Enables reverse-proxy configuration with SSL support + SSLProxyEngine On + ProxyPass ""/"" ""https://GITHUB_ENTERPRISE_HOSTNAME/"" + ProxyPassReverse ""/"" ""https://GITHUB_ENTERPRISE_HOSTNAME/"" + + # Replaces occurrences of the local GitHub Enterprise URL with the Proxy URL + # GitHub Enterprise compresses the responses, the filters INFLATE and DEFLATE needs to be used to + # decompress and compress the response back + AddOutputFilterByType INFLATE;SUBSTITUTE;DEFLATE application/json + Substitute ""s|https://GITHUB_ENTERPRISE_HOSTNAME|https://PROXY_HOSTNAME|ni"" + SubstituteMaxLineLength 50M + + # GitHub API uses the response header ""Link"" for the API pagination + # For example: + # ; rel=""next"", ; rel=""last"" + # The directive below replaces all occurrences of the GitHub Enterprise URL with the Proxy URL if the + # response header Link is present + Header edit* Link ""https://GITHUB_ENTERPRISE_HOSTNAME"" ""https://PROXY_HOSTNAME"" + + + +Troubleshooting + + +Manually continue a previously failed import process + + +In some cases, the GitHub import process can fail to import the repository. This causes GitLab to abort the project import process and requires the +repository to be imported manually. Administrators can manually import the repository for a failed import process: + + + Open a Rails console. + + Run the following series of commands in the console: + + +project_id = +github_access_token = +github_repository_path = '/' + +github_repository_url = ""https://#{github_access_token}@github.com/#{github_repository_path}.git"" + +# Find project by ID +project = Project.find(project_id) +# Set import URL and credentials +project.import_url = github_repository_url +project.import_type = 'github' +project.import_source = github_repository_path +project.save! +# Create an import state if the project was created manually and not from a failed import +project.create_import_state if project.import_state.blank? +# Set state to start +project.import_state.force_start + +# Optional: If your import had certain optional stages selected or a timeout strategy +# set, you can reset them here. Below is an example. +# The params follow the format documented in the API: +# https://docs.gitlab.com/ee/api/import.html#import-repository-from-github +Gitlab::GithubImport::Settings +.new(project) +.write( + timeout_strategy: ""optimistic"", + optional_stages: { + single_endpoint_issue_events_import: true, + single_endpoint_notes_import: true, + attachments_import: true, + collaborators_import: true + } +) + +# Trigger import from second step +Gitlab::GithubImport::Stage::ImportRepositoryWorker.perform_async(project.id) + + + + +Errors when importing large projects + + +The GitHub importer might encounter some errors when importing large projects. + +Missing comments + + +The GitHub API has a limit that prevents more than approximately 30,000 notes or diff notes from being imported. +When this limit is reached, the GitHub API instead returns the following error: + +In order to keep the API fast for everyone, pagination is limited for this resource. Check the rel=last link relation in the Link response header to see how far back you can traverse. + + +If you are importing GitHub projects with a large number of comments, you should select the Use alternative comments import method +additional item to import checkbox. This setting makes the import process take longer because it increases the number of network requests +required to perform the import. + +Reduce GitHub API request objects per page + + +Some GitHub API endpoints might return a 500 or 502 error for project imports from large repositories. +To reduce the chance of these errors, in the group project importing the data, enable the +github_importer_lower_per_page_limit feature flag. When enabled, the flag reduces the +page size from 100 to 50. + +To enable this feature flag: + + + Start a Rails console. + + Run the following enable command: + + +group = Group.find_by_full_path('my/group/fullpath') + +# Enable +Feature.enable(:github_importer_lower_per_page_limit, group) + + + + +To disable the feature flag, run this command: + +# Disable +Feature.disable(:github_importer_lower_per_page_limit, group) + + +GitLab instance cannot connect to GitHub + + +Self-managed instances that run GitLab 15.10 or earlier, and are behind proxies, cannot resolve DNS for github.com or api.github.com. +In this situation, the GitLab instance fails to connect to GitHub during the import and you must add github.com and api.github.com +entries in the allowlist for local requests. + + +2. Repository mirroring + + + +Repository mirroring + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +You can mirror a repository to and from external sources. You can select which repository +serves as the source. Branches, tags, and commits are synced automatically. + + + note SCP-style URLs are not supported. However, the work for implementing SCP-style URLs is tracked +in this issue. +Subscribe to the issue to follow its progress. + + +Several mirroring methods exist: + + + +Push: Mirror a repository from GitLab to another location. + +Pull: Mirror a repository from another location to a GitLab Premium instance. + +Bidirectional mirroring is also available, but can cause conflicts. + + +Mirror a repository when: + + + The canonical version of your project has migrated to GitLab. To keep providing a +copy of your project at its previous home, configure your GitLab repository as a +push mirror. Changes you make to your GitLab repository are copied to +the old location. + Your GitLab instance is private, but you want to open-source some projects. + You migrated to GitLab, but the canonical version of your project is somewhere else. +Configure your GitLab repository as a pull mirror of the other project. +Your GitLab repository pulls copies of the commits, tags, and branches of project. +They become available to use on GitLab. + + +Create a repository mirror + + +Prerequisites: + + + You must have at least the Maintainer role for the project. + If your mirror connects with ssh://, the host key must be detectable on the server, +or you must have a local copy of the key. + + + + On the left sidebar, select Search or go to and find your project. + Select Settings > Repository. + Expand Mirroring repositories. + Select Add new. + Enter a Git repository URL. For security reasons, the URL to the original +repository is only displayed to users with the Maintainer role +or the Owner role for the mirrored project. + Select a Mirror direction. + If you entered a ssh:// URL, select either: + + +Detect host keys: GitLab fetches the host keys from the server and displays the fingerprints. + +Input host keys manually, and enter the host key into SSH host key. + + + When mirroring the repository, GitLab confirms at least one of the stored host keys +matches before connecting. This check can protect your mirror from malicious code injections, +or your password from being stolen. + + Select an Authentication method. For more information, see +Authentication methods for mirrors. + If you authenticate with SSH host keys, verify the host key +to ensure it is correct. + To prevent force-pushing over diverged refs, select Keep divergent refs. + Optional. To limit the number of branches mirrored, select +Mirror only protected branches or enter a regex in Mirror specific branches. + Select Mirror repository. + + +If you select SSH public key as your authentication method, GitLab generates a +public key for your GitLab repository. You must provide this key to the non-GitLab server. +For more information, see Get your SSH public key. + +Mirror only protected branches + + +You can choose to mirror only the +protected branches in the mirroring project, +either from or to your remote repository. For pull mirroring, +non-protected branches in the mirroring project are not mirrored and can diverge. + +To use this option, select Only mirror protected branches when you create a repository mirror. + +Mirror specific branches + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + Mirroring branches matching a regex introduced in GitLab 15.8 with a flag named mirror_only_branches_match_regex. Disabled by default. + +Enabled by default in GitLab 16.0. + +Generally available in GitLab 16.2. Feature flag mirror_only_branches_match_regex removed. + + + + + + +To mirror only branches with names matching an re2 regular expression, +enter a regular expression into the Mirror specific branches field. Branches with names that +do not match the regular expression are not mirrored. + +Update a mirror + + +When the mirror repository is updated, all new branches, tags, and commits are visible in the +project’s activity feed. A repository mirror at GitLab updates automatically. +You can also manually trigger an update: + + + At most once every five minutes on GitLab.com. + According to the pull mirroring interval limit +set by the administrator on self-managed instances. + + + + note +GitLab Silent Mode disables both push and pull updates. + + +Force an update + + +While mirrors are scheduled to update automatically, you can force an immediate update unless: + + + The mirror is already being updated. + The interval, in seconds +for pull mirroring limits has not elapsed after its last update. + + +Prerequisites: + + + You must have at least the Maintainer role for the project. + + + + On the left sidebar, select Search or go to and find your project. + Select Settings > Repository. + Expand Mirroring repositories. + Scroll to Mirrored repositories and identify the mirror to update. + Select Update now ( ): + + + + +Authentication methods for mirrors + + +When you create a mirror, you must configure the authentication method for it. +GitLab supports these authentication methods: + + + +SSH authentication. + Username and password. + + +For a project access token or +group access token, +use the username (not token name) and the token as the password. + +SSH authentication + + +SSH authentication is mutual: + + + You must prove to the server that you’re allowed to access the repository. + The server must also prove to you that it’s who it claims to be. + + +For SSH authentication, you provide your credentials as a password or public key. +The server that the other repository resides on provides its credentials as a host key. +You must verify the fingerprint of this host key manually. + +If you’re mirroring over SSH (using an ssh:// URL), you can authenticate using: + + + Password-based authentication, just as over HTTPS. + Public key authentication. This method is often more secure than password authentication, +especially when the other repository supports deploy keys. + + +Get your SSH public key + + +When you mirror a repository and select the SSH public key as your +authentication method, GitLab generates a public key for you. The non-GitLab server +needs this key to establish trust with your GitLab repository. To copy your SSH public key: + + + On the left sidebar, select Search or go to and find your project. + Select Settings > Repository. + Expand Mirroring repositories. + Scroll to Mirrored repositories. + Identify the correct repository, and select Copy SSH public key ( ). + Add the public SSH key to the other repository’s configuration: + + If the other repository is hosted on GitLab, add the public SSH key +as a deploy key. + If the other repository is hosted elsewhere, add the key to +your user’s authorized_keys file. Paste the entire public SSH key into the +file on its own line and save it. + + + + +If you must change the key at any time, you can remove and re-add the mirror +to generate a new key. Update the other repository with the new +key to keep the mirror running. + + + note The generated keys are stored in the GitLab database, not in the file system. Therefore, +SSH public key authentication for mirrors cannot be used in a pre-receive hook. + + +Verify a host key + + +When using a host key, always verify the fingerprints match what you expect. +GitLab.com and other code hosting sites publish their fingerprints +for you to check: + + + AWS CodeCommit + Bitbucket + Codeberg + GitHub + GitLab.com + Launchpad + Savannah + SourceForge + + +Other providers vary. You can securely gather key fingerprints with the following +command if you: + + + Run self-managed GitLab. + Have access to the server for the other repository. + + +$ cat /etc/ssh/ssh_host*pub | ssh-keygen -E md5 -l -f - +256 MD5:f4:28:9f:23:99:15:21:1b:bf:ed:1f:8e:a0:76:b2:9d root@example.com (ECDSA) +256 MD5:e6:eb:45:8a:3c:59:35:5f:e9:5b:80:12:be:7e:22:73 root@example.com (ED25519) +2048 MD5:3f:72:be:3d:62:03:5c:62:83:e8:6e:14:34:3a:85:1d root@example.com (RSA) + + +Older versions of SSH may require you to remove -E md5 from the command. + +Related topics + + + + +Troubleshooting for repository mirroring. + Configure a Pull Mirroring Interval + + Disable mirrors for a project + Secrets file and mirroring + + + +" +explain the gitlab flow branching strategy,,"1. Branches + + + +Branches + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +Branches are versions of a project’s working tree. When you create a new +project, GitLab creates a default branch (which +cannot be deleted) for your repository. Default branch settings can be configured +at the project, subgroup, group, or instance level. + +As your project grows, your team creates more +branches, preferably by following branch naming patterns. +Each branch represents a set of changes, which allows development work to be done +in parallel. Development work in one branch does not affect another branch. + +Branches are the foundation of development in a project: + + + To get started, create a branch and add commits to it. + When the work is ready for review, create a merge request to propose +merging the changes in your branch. To streamline this process, you should follow +branch naming patterns. + Preview changes in a branch with a review app. + After the contents of your branch are merged, delete the merged branch. + + +Create branch + + +Prerequisites: + + + You must have at least the Developer role for the project. + + +To create a new branch from the GitLab UI: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Branches. + In the upper-right corner, select New branch. + Enter a Branch name. + In Create from, select the base of your branch: an existing branch, an existing +tag, or a commit SHA. + Select Create branch. + + +In a blank project + + +A blank project does not contain a branch, but +you can add one. + +Prerequisites: + + + You must have at least the Developer role for the project. + If you don’t have the Maintainer or Owner role, the +default branch protection +must be set to Partially protected or Not protected for you to push a commit +to the default branch. + + +To add a default branch to an empty project: + + + On the left sidebar, select Search or go to and find your project. + Scroll to The repository for this project is empty and select the type of +file you want to add. + In the Web IDE, make any desired changes to this file, then select Create commit. + Enter a commit message, and select Commit. + + +GitLab creates a default branch and adds your file to it. + +From an issue + + +Prerequisites: + + + You must have at least the Developer role for the project. + + +When viewing an issue, you can create an associated branch directly from that page. +Branches created this way use the +default pattern for branch names from issues, +including variables. + +Prerequisites: + + + You must have at least the Developer role for the project. + + +To create a branch from an issue: + + + On the left sidebar, select Search or go to and find your project. + Select Plan > Issues and find your issue. + Below the issue description, find the Create merge request dropdown list, and select + to display the dropdown list. + Select Create branch. A default Branch name is provided, based on the +default pattern for +this project. If desired, enter a different Branch name. + Select Create branch to create the branch based on your project’s +default branch. + + +Manage and protect branches + + +GitLab provides multiple methods to protect individual branches. These methods +ensure your branches receive oversight and quality checks from their creation to their deletion: + + + The default branch in your project receives extra protection. + Configure protected branches +to restrict who can commit to a branch, merge other branches into it, or merge +the branch itself into another branch. + Configure approval rules to set review +requirements, including security-related approvals, before a branch can merge. + Integrate with third-party status checks +to ensure your branch contents meet your standards of quality. + + +You can manage your branches: + + + With the GitLab user interface. + With Git on the command line. + With the Branches API. + + +View all branches + + +To view and manage your branches in the GitLab user interface: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Branches. + + +On this page, you can: + + + + See all branches, or filter to see only active or stale branches. + + A branch is considered active if a commit has been made to it in the last three months. +Otherwise it is considered stale. + + Create new branches. + +Compare branches. + Delete merged branches. + + +View branches with configured protections + + + +History + + + + + +Introduced in GitLab 15.1 with a flag named branch_rules. Disabled by default. + +Enabled on GitLab.com in GitLab 15.10. + +Enabled on self-managed in GitLab 15.11. + + + + + + + + On self-managed GitLab, by default this feature is available. To hide the feature, an administrator can disable the feature flag named branch_rules. +On GitLab.com and GitLab Dedicated, this feature is available. + + +Branches in your repository can be protected in multiple ways. You can: + + + Limit who can push to the branch. + Limit who can merge the branch. + Require approval of all changes. + Require external tests to pass. + + +The Branch rules overview page shows all branches with any configured protections, +and their protection methods: + + + +Prerequisites: + + + You must have at least the Maintainer role for the project. + + +To view the Branch rules overview list: + + + On the left sidebar, select Search or go to and find your project. + Select Settings > Repository. + Expand Branch rules to view all branches with protections. + + To add protections to a new branch: + + Select Add branch rule. + Select Create protected branch. + + + To view more information about protections on an existing branch: + + Identify the branch you want more information about. + Select View details to see information about its: + + +Branch protections. + +Approval rules. + +Status checks. + + + + + + + + +Name your branch + + +Git enforces branch name rules +to help ensure branch names remain compatible with other tools. GitLab +adds extra requirements for branch names, and provides benefits for well-structured branch names. + +GitLab enforces these additional rules on all branches: + + + No spaces are allowed in branch names. + Branch names with 40 hexadecimal characters are prohibited, because they are similar to Git commit hashes. + Branch names are case-sensitive. + + +Common software packages, like Docker, can enforce +additional branch naming restrictions. + +For the best compatibility with other software packages, use only: + + + Numbers + Hyphens (-) + Underscores (_) + Lowercase letters from the ASCII standard table + + +You can use forward slashes (/) and emoji in branch names, but compatibility with other +software packages cannot be guaranteed. + +Branch names with specific formatting offer extra benefits: + + + Streamline your merge request workflow by +prefixing branch names with issue numbers. + Automate branch protections based on branch name. + Test branch names with push rules before branches are pushed up to GitLab. + Define which CI/CD jobs to run on merge requests. + + +Configure default pattern for branch names from issues + + +By default, GitLab uses the pattern %{id}-%{title} when creating a branch from +an issue, but you can change this pattern. + +Prerequisites: + + + You must have at least the Maintainer role for the project. + + +To change the default pattern for branches created from issues: + + + On the left sidebar, select Search or go to and find your project. + Select Settings > Repository. + Expand Branch defaults. + Scroll to Branch name template and enter a value. The field supports these variables: + + +%{id}: The numeric ID of the issue. + +%{title}: The title of the issue, modified to use only characters acceptable in Git branch names. + + + Select Save changes. + + +Prefix branch names with issue numbers + + +To streamline the creation of merge requests, start your Git branch name with the +issue number, followed by a hyphen. +For example, to link a branch to issue #123, start the branch name with 123-. + +The issue and the branch must be in the same project. + +GitLab uses the issue number to import data into the merge request: + + + The issue is marked as related to the merge request. The issue and merge request +display links to each other. + The branch is connected to the issue. + If your project is configured with a +default closing pattern, +merging the merge request also closes +the related issue. + Issue milestone and labels are copied to the merge request. + + +Compare branches + + +To compare branches in a repository: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Compare revisions. + Select the Source branch to search for your desired branch. Exact matches are +shown first. You can refine your search with operators: + + +^ matches the beginning of the branch name: ^feat matches feat/user-authentication. + +$ matches the end of the branch name: widget$ matches feat/search-box-widget. + +* matches using a wildcard: branch*cache* matches fix/branch-search-cache-expiration. + You can combine operators: ^chore/*migration$ matches chore/user-data-migration. + + + Select the Target repository and branch. Exact matches are shown first. + Below Show changes, select the method to compare branches: + + + + +Only incoming changes from source (default) shows differences from the source branch since +the latest common commit on both branches. +It doesn’t include unrelated changes made to the target branch after the source branch was created. +This method uses the git diff ... +Git command. +To compare branches, this method uses the merge base instead of the actual commit, so +changes from cherry-picked commits are shown as new changes. + +Include changes to target since source was created shows all the differences between the two +branches. +This method uses the git diff +Git command. + + + + + Select Compare to show the list of commits, and changed files. + Optional. To reverse the Source and Target, select Swap revisions ( ). + + +Delete merged branches + + +Merged branches can be deleted in bulk if they meet all of these criteria: + + + They are not protected branches. + They have been merged into the project’s default branch. + + +Prerequisites: + + + You must have at least the Developer role for the project. + + +To do this: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Branches. + In the upper right corner of the page, select More . + Select Delete merged branches. + In the dialog, enter the word delete to confirm, then select Delete merged branches. + + +Configure workflows for target branches + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 16.4 with a flag named target_branch_rules_flag. Enabled by default. + +Feature flag removed in GitLab 16.7. + + + + + + +Some projects use multiple long-term branches for development, like develop and qa. +In these projects, you might want to keep main as the default branch, but expect +merge requests to target develop or qa instead. Target branch workflows help ensure +merge requests target the appropriate development branch for your project. + +When you create a merge request, the workflow checks the name of the branch. If the +branch name matches the workflow, the merge request targets the branch you specify. If the branch name does not match, the merge request targets the +default branch of the project. + +Rules are processed on a “first-match” basis - if two rules match the same branch name, the top-most rule is applied. + +Prerequisites: + + + You must have at least the Maintainer role. + + +To create a target branch workflow: + + + On the left sidebar, select Search or go to and find your project. + Select Settings > Merge requests. + Scroll down to Merge request branch workflow + + Select Add branch target. + For Branch name pattern, provide a string or wild card to compare against branch names. + Select the Target branch to use when the branch name matches the Branch name pattern. + Select Save. + + +Example + + +You could configure your project to have the following target branch workflows: + + + + + Branch name pattern + Target branch + + + + + feature/* + develop + + + bug/* + develop + + + release/* + main + + + + +These target branches simplify the process of creating merge requests for a project that: + + + Uses main to represent the deployed state of your application. + Tracks current, unreleased development work in another long-running branch, like develop. + + +If your workflow initially places new features in develop instead of main, these target branches +ensure all branches matching either feature/* or bug/* do not target main by mistake. + +When you’re ready to release to main, create a branch named release/*, and +ensure this branch targets main. + +Delete a target branch workflow + + +When you remove a target branch workflow, existing merge requests remain unchanged. + +Prerequisites: + + + You must have at least the Maintainer role. + + +To do this: + + + On the left sidebar, select Search or go to and find your project. + Select Settings > Merge requests. + Select Delete on the branch target you want to delete. + + +Related topics + + + + Protected branches + Branches API + Protected Branches API + Getting started with Git + + +Troubleshooting + + +Multiple branches containing the same commit + + +At a deeper technical level, Git branches aren’t separate entities, but labels +attached to a set of commit SHAs. When GitLab determines whether or not a branch has been +merged, it checks the target branch for the existence of those commit SHAs. +This behavior can cause unexpected results when two merge requests contain the same +commits. In this example, branches B and C both start from the same commit (3) +on branch A: + +#mermaid-1710941425442{font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:16px;fill:#000000;}#mermaid-1710941425442 .error-icon{fill:#552222;}#mermaid-1710941425442 .error-text{fill:#552222;stroke:#552222;}#mermaid-1710941425442 .edge-thickness-normal{stroke-width:2px;}#mermaid-1710941425442 .edge-thickness-thick{stroke-width:3.5px;}#mermaid-1710941425442 .edge-pattern-solid{stroke-dasharray:0;}#mermaid-1710941425442 .edge-pattern-dashed{stroke-dasharray:3;}#mermaid-1710941425442 .edge-pattern-dotted{stroke-dasharray:2;}#mermaid-1710941425442 .marker{fill:#666;stroke:#666;}#mermaid-1710941425442 .marker.cross{stroke:#666;}#mermaid-1710941425442 svg{font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:16px;}#mermaid-1710941425442 .commit-id,#mermaid-1710941425442 .commit-msg,#mermaid-1710941425442 .branch-label{fill:lightgrey;color:lightgrey;font-family:'trebuchet ms',verdana,arial,sans-serif;font-family:var(--mermaid-font-family);}#mermaid-1710941425442 .branch-label0{fill:#333;}#mermaid-1710941425442 .commit0{stroke:hsl(0, 0%, 70.6862745098%);fill:hsl(0, 0%, 70.6862745098%);}#mermaid-1710941425442 .commit-highlight0{stroke:rgb(74.75, 74.75, 74.75);fill:rgb(74.75, 74.75, 74.75);}#mermaid-1710941425442 .label0{fill:hsl(0, 0%, 70.6862745098%);}#mermaid-1710941425442 .arrow0{stroke:hsl(0, 0%, 70.6862745098%);}#mermaid-1710941425442 .branch-label1{fill:white;}#mermaid-1710941425442 .commit1{stroke:#555;fill:#555;}#mermaid-1710941425442 .commit-highlight1{stroke:#aaaaaa;fill:#aaaaaa;}#mermaid-1710941425442 .label1{fill:#555;}#mermaid-1710941425442 .arrow1{stroke:#555;}#mermaid-1710941425442 .branch-label2{fill:#333;}#mermaid-1710941425442 .commit2{stroke:#BBB;fill:#BBB;}#mermaid-1710941425442 .commit-highlight2{stroke:#444444;fill:#444444;}#mermaid-1710941425442 .label2{fill:#BBB;}#mermaid-1710941425442 .arrow2{stroke:#BBB;}#mermaid-1710941425442 .branch-label3{fill:white;}#mermaid-1710941425442 .commit3{stroke:#777;fill:#777;}#mermaid-1710941425442 .commit-highlight3{stroke:#888888;fill:#888888;}#mermaid-1710941425442 .label3{fill:#777;}#mermaid-1710941425442 .arrow3{stroke:#777;}#mermaid-1710941425442 .branch-label4{fill:#333;}#mermaid-1710941425442 .commit4{stroke:#999;fill:#999;}#mermaid-1710941425442 .commit-highlight4{stroke:#666666;fill:#666666;}#mermaid-1710941425442 .label4{fill:#999;}#mermaid-1710941425442 .arrow4{stroke:#999;}#mermaid-1710941425442 .branch-label5{fill:#333;}#mermaid-1710941425442 .commit5{stroke:#DDD;fill:#DDD;}#mermaid-1710941425442 .commit-highlight5{stroke:#222222;fill:#222222;}#mermaid-1710941425442 .label5{fill:#DDD;}#mermaid-1710941425442 .arrow5{stroke:#DDD;}#mermaid-1710941425442 .branch-label6{fill:#333;}#mermaid-1710941425442 .commit6{stroke:#FFF;fill:#FFF;}#mermaid-1710941425442 .commit-highlight6{stroke:#000000;fill:#000000;}#mermaid-1710941425442 .label6{fill:#FFF;}#mermaid-1710941425442 .arrow6{stroke:#FFF;}#mermaid-1710941425442 .branch-label7{fill:#333;}#mermaid-1710941425442 .commit7{stroke:#DDD;fill:#DDD;}#mermaid-1710941425442 .commit-highlight7{stroke:#222222;fill:#222222;}#mermaid-1710941425442 .label7{fill:#DDD;}#mermaid-1710941425442 .arrow7{stroke:#DDD;}#mermaid-1710941425442 .branch{stroke-width:1;stroke:#666;stroke-dasharray:2;}#mermaid-1710941425442 .commit-label{font-size:10px;fill:rgb(2.7499999999, 2.7499999999, 2.7499999999);}#mermaid-1710941425442 .commit-label-bkg{font-size:10px;fill:hsl(0, 0%, 98.9215686275%);opacity:0.5;}#mermaid-1710941425442 .tag-label{font-size:10px;fill:#111111;}#mermaid-1710941425442 .tag-label-bkg{fill:#eee;stroke:hsl(0, 0%, 83.3333333333%);}#mermaid-1710941425442 .tag-hole{fill:#000000;}#mermaid-1710941425442 .commit-merge{stroke:#eee;fill:#eee;}#mermaid-1710941425442 .commit-reverse{stroke:#eee;fill:#eee;stroke-width:3;}#mermaid-1710941425442 .commit-highlight-inner{stroke:#eee;fill:#eee;}#mermaid-1710941425442 .arrow{stroke-width:8;stroke-linecap:round;fill:none;}#mermaid-1710941425442 .gitTitleText{text-anchor:middle;font-size:18px;fill:#000000;}#mermaid-1710941425442 :root{--mermaid-font-family:""trebuchet ms"",verdana,arial,sans-serif;}mainbranch Abranch Bbranch Cabcdemerges commits b, c, d + +If you merge branch B, branch A also appears as merged (without any action from you) +because all commits from branch A now appear in the target branch main. Branch C +remains unmerged, because commit 5 wasn’t part of branch A or B. + +Merge request A remains merged, even if you attempt to push new commits +to its branch. If any changes in merge request A remain unmerged (because they +weren’t part of merge request A), open a new merge request for them. + +Error: ambiguous HEAD branch exists + + +In versions of Git earlier than 2.16.0, you could create a branch named HEAD. +This branch named HEAD collides with the internal reference (also named HEAD) +Git uses to describe the active (checked out) branch. This naming collision can +prevent you from updating the default branch of your repository: + +Error: Could not set the default branch. Do you have a branch named 'HEAD' in your repository? + + +To fix this problem: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Branches. + Search for a branch named HEAD. + Make sure the branch has no uncommitted changes. + Select Delete branch, then Yes, delete branch. + + +Git versions 2.16.0 and later, +prevent you from creating a branch with this name. + +Find all branches you’ve authored + + +To find all branches you’ve authored in a project, run this command in a Git repository: + +git for-each-ref --format='%(authoremail) %(refname:short)' | grep $(git config --get user.email) + + +To get a total of all branches in a project, sorted by author, run this command +in a Git repository: + +git for-each-ref --format='%(authoremail)' | sort | uniq -c | sort -g + + + +2. Forks + + + +Forks + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +Whenever possible, it’s recommended to work in a common Git repository and use +branching strategies to manage your work. However, +if you do not have write access for the repository you want to contribute to, you +can create a fork. + +A fork is a personal copy of the repository and all its branches, which you create +in a namespace of your choice. Make changes in your own fork and +submit them through a merge request to the repository you don’t have access to. + +The forked project uses a +deduplication strategy +to have a potentially smaller storage space than the source project. Forked projects +can access the object pool connected to the source project. + +Create a fork + + + +History + + + + + +Introduced a new form in GitLab 13.11 with a flag named fork_project_form. Disabled by default. + +Enabled on GitLab.com and self-managed in GitLab 14.8. Feature flag fork_project_form removed. + +Introduced in GitLab 16.6. + + + + + + +To fork an existing project in GitLab: + + + + On the project’s homepage, in the upper-right corner, select Fork ( ): + + + + Optional. Edit the Project name. + For Project URL, select the namespace +your fork should belong to. + Add a Project slug. This value becomes part of the URL to your fork. +It must be unique in the namespace. + Optional. Add a Project description. + Select one of the Branches to include options: + + +All branches (default). + +Only the default branch. Uses the --single-branch and --no-tags +Git options. + + + Select the Visibility level for your fork. For more information about +visibility levels, read Project and group visibility. + Select Fork project. + + +GitLab creates your fork, and redirects you to the new fork’s page. + +Update your fork + + +A fork can fall out of sync with its upstream repository, and require an update: + + + +Ahead: Your fork contains new commits not present in the upstream repository. +To sync your fork, create a merge request to push your changes to the upstream repository. + +Behind: The upstream repository contains new commits not present in your fork. +To sync your fork, pull the new commits into your fork. + +Ahead and behind: Both the upstream repository and your fork contain new commits +not present in the other. To fully sync your fork, create a merge request to push +your changes up, and pull the upstream repository’s new changes into your fork. + + +To sync your fork with its upstream repository, update it from the GitLab UI +or the command line. GitLab Premium and Ultimate tiers can also automate updates by +configuring forks as pull mirrors of the upstream repository. + +From the UI + + + +History + + + + + +Introduced in GitLab 15.11 with a flag named synchronize_fork. Disabled by default, but enabled for projects in the gitlab-org/gitlab and gitlab-com/www-gitlab-com namespaces only. + +Generally available in GitLab 16.0. Feature flag synchronize_fork removed. + + + + + + +Prerequisite: + + + The fork must be created from an unprotected branch in upstream repository. + + +To update your fork from the GitLab UI: + + + On the left sidebar, select Search or go to. + Select View all my projects. + Select the fork you want to update. + + Below the dropdown list for branch name, find the Forked from ( ) +information box to determine if your fork is ahead, behind, or both. In this example, +the fork is behind the upstream repository: + + + + If your fork is ahead of the upstream repository, select +Create merge request to propose adding your fork’s changes to the upstream repository. + If your fork is behind the upstream repository, select Update fork +to pull changes from the upstream repository. + If your fork is ahead and behind the upstream repository, you can update from the UI +available only if no merge conflicts are detected: + + If your fork contains no merge conflicts, you can select Create merge request +to propose pushing your changes to the upstream repository, Update fork +to pull changes down to your fork, or both. The type of changes in your fork +determine which actions are appropriate. + If your fork contains merge conflicts, update your fork from the command line. + + + + +From the command line + + +To update your fork from the command line, first ensure that you have configured +an upstream remote repository for your fork: + + + Clone your fork locally, if you have not already done so. For more information, see +Clone a repository. + + View the remotes configured for your fork: + + +git remote -v + + + + If your fork does not have a remote pointing to the original repository, +use one of these examples to configure a remote called upstream: + + +# Use this line to set any repository as your upstream after editing +git remote add upstream + +# Use this line to set the main GitLab repository as your upstream +git remote add upstream https://gitlab.com/gitlab-org/gitlab.git + + + After ensuring your local copy has the extra remote configured, you are ready to update your fork. + + + In your local copy, ensure you have checked out the default branch, +replacing main with the name of your default branch: + + +git checkout main + + + If Git identifies unstaged changes, commit or stash them before continuing. + + + Fetch the changes to the upstream repository: + + +git fetch upstream + + + + Pull the changes into your fork, replacing main with the name of the branch +you are updating: + + +git pull upstream main + + + + Push the changes to your fork repository on the server (GitLab.com or self-managed): + + +git push origin main + + + + +With repository mirroring + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +A fork can be configured as a mirror of the upstream if all these conditions are met: + + + Your subscription is Premium or Ultimate. + You create all changes in branches (not main). + You do not work on merge requests for confidential issues, +which requires changes to main. + + +Repository mirroring keeps your fork synced with the original repository. +This method updates your fork once per hour, with no manual git pull required. +For instructions, read Configure pull mirroring. + + + caution With mirroring, before approving a merge request, you are asked to sync. You should automate it. + + +Merge changes back upstream + + +When you are ready to send your code back to the upstream project, +create a merge request. For Source branch, +choose your forked project’s branch. For Target branch, choose the original project’s branch. + + + note When creating a merge request, if the forked project’s visibility is more restrictive than the parent project (for example the fork is private, the parent is public), the target branch defaults to the forked project’s default branch. This prevents potentially exposing the private code of the forked project. + + + + +Then you can add labels, a milestone, and assign the merge request to someone who can review +your changes. Then select Submit merge request to conclude the process. When successfully merged, your +changes are added to the repository and branch you’re merging into. + +Unlink a fork + + +Removing a fork relationship unlinks your fork from its upstream project. +Your fork then becomes an independent project. + +Prerequisites: + + + You must be a project owner to unlink a fork. + + + + caution If you remove a fork relationship, you can’t send merge requests to the source. +If anyone has forked your project, their fork also loses the relationship. +To restore the fork relationship, use the API. + + +To remove a fork relationship: + + + On the left sidebar, select Search or go to and find your project. + Select Settings > General. + Expand Advanced. + In the Remove fork relationship section, select Remove fork relationship. + To confirm, enter the project path and select Confirm. + + +When you unlink a fork that uses a hashed storage pool +to share objects with another repository: + + + All objects are copied from the pool into your fork. + After the copy process completes, no further updates from the storage pool are propagated to your fork. + + +Related topics + + + + GitLab community forum: Refreshing a fork + + + +Troubleshooting + + +Error: An error occurred while forking the project. Please try again + + +This error can be due to a mismatch in instance runner settings between the forked project +and the new namespace. See Forks +in the Runner documentation for more information. + +Removing fork relationship fails + + +If removing the fork through the UI or API is not working, you can attempt the +fork relationship removal in a +Rails console session: + +p = Project.find_by_full_path('') +u = User.find_by_username('') +Projects::UnlinkForkService.new(p, u).execute + + + +" +how does routing in next.js work? how do i create a new route and page?,,"1. Review apps + + + +Review apps + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +Review apps are a collaboration tool that provide an environment to showcase product changes. + + + note If you have a Kubernetes cluster, you can automate this feature in your applications +by using Auto DevOps. + + +Review apps: + + + Provide an automatic live preview of changes made in a feature branch by spinning up a dynamic environment for your merge requests. + Allow designers and product managers to see your changes without needing to check out your branch and run your changes in a sandbox environment. + Are fully integrated with the GitLab DevOps LifeCycle. + Allow you to deploy your changes wherever you want. + + + + +In the previous example: + + + A review app is built every time a commit is pushed to topic branch. + The reviewer fails two reviews before passing the third review. + After the review passes, topic branch is merged into the default branch, where it’s deployed to staging. + After its approval in staging, the changes that were merged into the default branch are deployed to production. + + +How review apps work + + +A review app is a mapping of a branch with an environment. +Access to the review app is made available as a link on the merge request relevant to the branch. + +The following is an example of a merge request with an environment set dynamically. + + + +In this example, a branch was: + + + Successfully built. + Deployed under a dynamic environment that can be reached by selecting View app. + + +After adding review apps to your workflow, you follow the branched Git flow. That is: + + + Push a branch and let the runner deploy the review app based on the script definition of the dynamic environment job. + Wait for the runner to build and deploy your web application. + To view the changes live, select the link in the merge request related to the branch. + + +Configuring review apps + + +Review apps are built on dynamic environments, which allow you to dynamically create a new environment for each branch. + +The process of configuring review apps is as follows: + + + Set up the infrastructure to host and deploy the review apps (check the examples below). + +Install and configure a runner to do deployment. + Set up a job in .gitlab-ci.yml that uses the predefined CI/CD variable ${CI_COMMIT_REF_SLUG} +to create dynamic environments and restrict it to run only on branches. +Alternatively, you can get a YAML template for this job by enabling review apps for your project. + Optionally, set a job that manually stops the review apps. + + +Enable review apps button + + + +History + + + + + +Introduced in GitLab 12.8. + + + + + + +When configuring review apps for a project, you add a new job to the .gitlab-ci.yml file, +as mentioned above. To facilitate this, and if you are using Kubernetes, you can select +Enable review apps and GitLab prompts you with a template code block that +you can copy and paste into .gitlab-ci.yml as a starting point. + +Prerequisites: + + + You need at least the Developer role for the project. + + +To use the review apps template: + + + On the left sidebar, select Search or go to and +find the project you want to create a review app job for. + Select Operate > Environments. + Select Enable review apps. + + Copy the provided code snippet and paste it into your +.gitlab-ci.yml file: + + + + + +You can edit this template as needed. + +Review apps auto-stop + + +See how to configure review apps environments to expire and auto-stop +after a given period of time. + +Review apps examples + + +The following are example projects that demonstrate review app configuration: + + + + + Project + Configuration file + + + + + NGINX + .gitlab-ci.yml + + + OpenShift + .gitlab-ci.yml + + + HashiCorp Nomad + .gitlab-ci.yml + + + GitLab Documentation + build-and-deploy.gitlab-ci.yml + + + https://about.gitlab.com/ + .gitlab-ci.yml + + + GitLab Insights + .gitlab-ci.yml + + + + +Other examples of review apps: + + + + +Cloud Native Development with GitLab. + +Review apps for Android. + + +Route Maps + + +Route Maps allows you to go directly from source files +to public pages on the environment defined for +review apps. + +Once set up, the review app link in the merge request +widget can take you directly to the pages changed, making it easier +and faster to preview proposed modifications. + +Configuring Route Maps involves telling GitLab how the paths of files +in your repository map to paths of pages on your website using a Route Map. +When you configure Route Maps, View on buttons are displayed. +Select these buttons to go to the pages changed directly from merge requests. + +To set up a route map, add a file inside the repository at .gitlab/route-map.yml, +which contains a YAML array that maps source paths (in the repository) to public +paths (on the website). + +Route Maps example + + +The following is an example of a route map for Middleman, +a static site generator (SSG) used to build the GitLab website, +deployed from its project on GitLab.com: + +# Team data +- source: 'data/team.yml' # data/team.yml + public: 'team/' # team/ + +# Blogposts +- source: /source\/posts\/([0-9]{4})-([0-9]{2})-([0-9]{2})-(.+?)\..*/ # source/posts/2017-01-30-around-the-world-in-6-releases.html.md.erb + public: '\1/\2/\3/\4/' # 2017/01/30/around-the-world-in-6-releases/ + +# HTML files +- source: /source\/(.+?\.html).*/ # source/index.html.haml + public: '\1' # index.html + +# Other files +- source: /source\/(.*)/ # source/images/blogimages/around-the-world-in-6-releases-cover.png + public: '\1' # images/blogimages/around-the-world-in-6-releases-cover.png + + +Mappings are defined as entries in the root YAML array, and are identified by a - prefix. Within an entry, there is a hash map with two keys: + + + +source + + A string, starting and ending with ', for an exact match. + A regular expression, starting and ending with /, for a pattern match: + + The regular expression needs to match the entire source path - ^ and $ anchors are implied. + Can include capture groups denoted by () that can be referred to in the public path. + Slashes (/) can, but don’t have to, be escaped as \/. + Literal periods (.) should be escaped as \.. + + + + + +public, a string starting and ending with '. + + Can include \N expressions to refer to capture groups in the source regular expression in order of their occurrence, starting with \1. + + + + +The public path for a source path is determined by finding the first +source expression that matches it, and returning the corresponding +public path, replacing the \N expressions with the values of the +() capture groups if appropriate. + +In the example above, the fact that mappings are evaluated in order +of their definition is used to ensure that source/index.html.haml +matches /source\/(.+?\.html).*/ instead of /source\/(.*)/, +and results in a public path of index.html, instead of +index.html.haml. + +After you have the route mapping set up, it takes effect in the following locations: + + + In the merge request widget: + + The View app button takes you to the environment URL set in the .gitlab-ci.yml file. + + The list shows the first 5 matched items from the route map, but you can filter them if more +than 5 are available. + + + + + + + In the diff for a comparison or commit, by selecting View ( ) next to the file. + + In the blob file view, by selecting View ( ) next to the file. + + + +Visual Reviews (deprecated) + + + +Tier: Premium, Ultimate +Offering: Self-managed + + +History + + + + + +Introduced in GitLab 12.0. + +Moved to GitLab Premium in 13.9. + It’s deployed behind a feature flag, anonymous_visual_review_feedback, disabled by default. + It’s disabled on GitLab.com. + + + + + + + + caution This feature was deprecated in GitLab 15.8 +and is planned for removal in 17.0. This change is a breaking change. + + + + On self-managed GitLab, by default this feature is not available. To make it available, +an administrator can enable the feature flag named anonymous_visual_review_feedback. + + +With Visual Reviews, members of any team (Product, Design, Quality, and so on) can provide feedback comments through a form in your review apps. The comments are added to the merge request that triggered the review app. + +Using Visual Reviews + + +After Visual Reviews has been configured for the +review app, the Visual Reviews feedback form is overlaid on the right side of every page. + + + +To use the feedback form to make a comment in the merge request: + + + On the right side of a page, select the Review tab. + Make a comment on the visual review. You can make use of all the +Markdown annotations that are also available in +merge request comments. + Enter your personal information: + + If data-require-auth is true, you must enter your personal access token. + Otherwise, enter your name, and optionally your email. + + + Select Send feedback. + + + +To see Visual reviews in action, see the Visual Reviews Walk through. + +Configure review apps for Visual Reviews + + +The feedback form is served through a script you add to pages in your review app. +It should be added to the of your application and +consists of some project and merge request specific values. Here’s how it +looks for a project with code hosted in a project on GitLab.com: + + + + +Ideally, you should use CI/CD variables +to replace those values at runtime when each review app is created: + + + +data-project-id is the project ID, which can be found by the CI_PROJECT_ID +variable or on the project overview page. + +data-merge-request-id is the merge request ID, which can be found by the +CI_MERGE_REQUEST_IID variable. CI_MERGE_REQUEST_IID is available only if +rules:if: $CI_PIPELINE_SOURCE == ""merge_request_event +is used and the merge request is created. + +data-mr-url is the URL of the GitLab instance and is the same for all +review apps. + +data-project-path is the project’s path, which can be found by CI_PROJECT_PATH. + +data-require-auth is optional for public projects but required for private and internal ones. If this is set to true, the user is required to enter their personal access token instead of their name and email. + +id is always review-app-toolbar-script, you don’t need to change that. + +src is the source of the review toolbar script, which resides in the +respective GitLab instance and is the same for all review apps. + + +For example, in a Ruby application with code hosted on in a project GitLab.com, you would need to have this script: + + + + +Then, when your app is deployed via GitLab CI/CD, those variables should get +replaced with their real values. + +Determining merge request ID + + +The visual review tools retrieve the merge request ID from the data-merge-request-id +data attribute included in the script HTML tag used to add the visual review tools +to your review app. + +After determining the ID for the merge request to link to a visual review app, you +can supply the ID by either: + + + Hard-coding it in the script tag via the data attribute data-merge-request-id of the app. + Dynamically adding the data-merge-request-id value during the build of the app. + Supplying it manually through the visual review form in the app. + + +If the ID is missing from the script, the visual review tool prompts you to enter the +merge request ID before you can provide feedback. + +Authentication for Visual Reviews + + + +History + + + + + +Introduced in GitLab 12.10. + + + + + + +To enable visual reviews for private and internal projects, set the +data-require-auth variable to true. When enabled, +the user must enter a personal access token +with api scope before submitting feedback. + +This same method can be used to require authentication for any public projects. + + + + +2. Installing a GitLab POC on Amazon Web Services (AWS) + + + +Installing a GitLab POC on Amazon Web Services (AWS) + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + +This page offers a walkthrough of a common configuration for GitLab on AWS using the official Linux package. You should customize it to accommodate your needs. + + + note For organizations with 1,000 users or less, the recommended AWS installation method is to launch an EC2 single box Linux package installation and implement a snapshot strategy for backing up the data. See the 1,000 user reference architecture for more information. + + +Getting started for production-grade GitLab + + + + note This document is an installation guide for a proof of concept instance. It is not a reference architecture and it does not result in a highly available configuration. + + +Following this guide exactly results in a proof of concept instance that roughly equates to a scaled down version of a two availability zone implementation of the Non-HA 2000 User Reference Architecture. The 2K reference architecture is not HA because it is primarily intended to provide some scaling while keeping costs and complexity low. The 3000 User Reference Architecture is the smallest size that is GitLab HA. It has additional service roles to achieve HA, most notably it uses Gitaly Cluster to achieve HA for Git repository storage and specifies triple redundancy. + +GitLab maintains and tests two main types of Reference Architectures. The Linux package architectures are implemented on instance compute while Cloud Native Hybrid architectures maximize the use of a Kubernetes cluster. Cloud Native Hybrid reference architecture specifications are addendum sections to the Reference Architecture size pages that start by describing the Linux package architecture. For example, the 3000 User Cloud Native Reference Architecture is in the subsection titled Cloud Native Hybrid reference architecture with Helm Charts (alternative) in the 3000 User Reference Architecture page. + +Getting started for production-grade Linux package installations + + +The Infrastructure as Code tooling GitLab Environment Tool (GET) is the best place to start for building using the Linux package on AWS and most especially if you are targeting an HA setup. While it does not automate everything, it does complete complex setups like Gitaly Cluster for you. GET is open source so anyone can build on top of it and contribute improvements to it. + +Getting started for production-grade Cloud Native Hybrid GitLab + + +The GitLab Environment Toolkit (GET) is a set of opinionated Terraform and Ansible scripts. These scripts help with the deployment of Linux package or Cloud Native Hybrid environments on selected cloud providers and are used by GitLab developers for GitLab Dedicated (for example). + +You can use the GitLab Environment Toolkit to deploy a Cloud Native Hybrid environment on AWS. However, it’s not required and may not support every valid permutation. That said, the scripts are presented as-is and you can adapt them accordingly. + +Introduction + + +For the most part, we make use of the Linux package in our setup, but we also leverage native AWS services. Instead of using the Linux package-bundled PostgreSQL and Redis, we use Amazon RDS and ElastiCache. + +In this guide, we go through a multi-node setup where we start by +configuring our Virtual Private Cloud and subnets to later integrate +services such as RDS for our database server and ElastiCache as a Redis +cluster to finally manage them in an auto scaling group with custom +scaling policies. + +Requirements + + +In addition to having a basic familiarity with AWS and Amazon EC2, you need: + + + An AWS account + +To create or upload an SSH key +to connect to the instance via SSH + A domain name for the GitLab instance + An SSL/TLS certificate to secure your domain. If you do not already own one, you can provision a free public SSL/TLS certificate through AWS Certificate Manager(ACM) for use with the Elastic Load Balancer we create. + + + + note It can take a few hours to validate a certificate provisioned through ACM. To avoid delays later, request your certificate as soon as possible. + + +Architecture + + +Below is a diagram of the recommended architecture. + + + +AWS costs + + +GitLab uses the following AWS services, with links to pricing information: + + + +EC2: GitLab is deployed on shared hardware, for which +on-demand pricing applies. +If you want to run GitLab on a dedicated or reserved instance, see the +EC2 pricing page for information about +its cost. + +S3: GitLab uses S3 (pricing page) to +store backups, artifacts, and LFS objects. + +ELB: A Classic Load Balancer (pricing page), +used to route requests to the GitLab instances. + +RDS: An Amazon Relational Database Service using PostgreSQL +(pricing page). + +ElastiCache: An in-memory cache environment (pricing page), +used to provide a Redis configuration. + + +Create an IAM EC2 instance role and profile + + +As we are using Amazon S3 object storage, our EC2 instances must have read, write, and list permissions for our S3 buckets. To avoid embedding AWS keys in our GitLab configuration, we make use of an IAM Role to allow our GitLab instance with this access. We must create an IAM policy to attach to our IAM role: + +Create an IAM Policy + + + + Go to the IAM dashboard and select Policies in the left menu. + Select Create policy, select the JSON tab, and add a policy. We want to follow security best practices and grant least privilege, giving our role only the permissions needed to perform the required actions. + + Assuming you prefix the S3 bucket names with gl- as shown in the diagram, add the following policy: + + + +{ ""Version"": ""2012-10-17"", + ""Statement"": [ + { + ""Effect"": ""Allow"", + ""Action"": [ + ""s3:PutObject"", + ""s3:GetObject"", + ""s3:DeleteObject"", + ""s3:PutObjectAcl"" + ], + ""Resource"": ""arn:aws:s3:::gl-*/*"" + }, + { + ""Effect"": ""Allow"", + ""Action"": [ + ""s3:ListBucket"", + ""s3:AbortMultipartUpload"", + ""s3:ListMultipartUploadParts"", + ""s3:ListBucketMultipartUploads"" + ], + ""Resource"": ""arn:aws:s3:::gl-*"" + } + ] +} + + + Select Review policy, give your policy a name (we use gl-s3-policy), and select Create policy. + + +Create an IAM Role + + + + Still on the IAM dashboard, select Roles in the left menu, and +select Create role. + Create a new role by selecting AWS service > EC2, then select +Next: Permissions. + In the policy filter, search for the gl-s3-policy we created above, select it, and select Tags. + Add tags if needed and select Review. + Give the role a name (we use GitLabS3Access) and select Create Role. + + +We use this role when we create a launch configuration later on. + +Configuring the network + + +We start by creating a VPC for our GitLab cloud infrastructure, then +we can create subnets to have public and private instances in at least +two Availability Zones (AZs). Public subnets require a Route Table keep and an associated +Internet Gateway. + +Creating the Virtual Private Cloud (VPC) + + +We now create a VPC, a virtual networking environment that you control: + + + Sign in to Amazon Web Services. + + Select Your VPCs from the left menu and then select Create VPC. +At the “Name tag” enter gitlab-vpc and at the “IPv4 CIDR block” enter +10.0.0.0/16. If you don’t require dedicated hardware, you can leave +“Tenancy” as default. Select Yes, Create when ready. + + + + Select the VPC, select Actions, select Edit DNS resolution, and enable DNS resolution. Select Save when done. + + +Subnets + + +Now, let’s create some subnets in different Availability Zones. Make sure +that each subnet is associated to the VPC we just created and +that CIDR blocks don’t overlap. This also +allows us to enable multi AZ for redundancy. + +We create private and public subnets to match load balancers and +RDS instances as well: + + + Select Subnets from the left menu. + + Select Create subnet. Give it a descriptive name tag based on the IP, +for example gitlab-public-10.0.0.0, select the VPC we created previously, select an availability zone (we use us-west-2a), +and at the IPv4 CIDR block let’s give it a 24 subnet 10.0.0.0/24: + + + + + Follow the same steps to create all subnets: + + + + + Name tag + Type + Availability Zone + CIDR block + + + + + gitlab-public-10.0.0.0 + public + us-west-2a + 10.0.0.0/24 + + + gitlab-private-10.0.1.0 + private + us-west-2a + 10.0.1.0/24 + + + gitlab-public-10.0.2.0 + public + us-west-2b + 10.0.2.0/24 + + + gitlab-private-10.0.3.0 + private + us-west-2b + 10.0.3.0/24 + + + + + Once all the subnets are created, enable Auto-assign IPv4 for the two public subnets: + + Select each public subnet in turn, select Actions, and select Modify auto-assign IP settings. Enable the option and save. + + + + +Internet Gateway + + +Now, still on the same dashboard, go to Internet Gateways and +create a new one: + + + Select Internet Gateways from the left menu. + Select Create internet gateway, give it the name gitlab-gateway and +select Create. + + Select it from the table, and then under the Actions dropdown list choose +“Attach to VPC”. + + + + Choose gitlab-vpc from the list and hit Attach. + + +Create NAT Gateways + + +Instances deployed in our private subnets must connect to the internet for updates, but should not be reachable from the public internet. To achieve this, we make use of NAT Gateways deployed in each of our public subnets: + + + Go to the VPC dashboard and select NAT Gateways in the left menu bar. + Select Create NAT Gateway and complete the following: + + +Subnet: Select gitlab-public-10.0.0.0 from the dropdown list. + +Elastic IP Allocation ID: Enter an existing Elastic IP or select Allocate Elastic IP address to allocate a new IP to your NAT gateway. + Add tags if needed. + Select Create NAT Gateway. + + + + +Create a second NAT gateway but this time place it in the second public subnet, gitlab-public-10.0.2.0. + +Route Tables + + +Public Route Table + + +We must create a route table for our public subnets to reach the internet via the internet gateway we created in the previous step. + +On the VPC dashboard: + + + Select Route Tables from the left menu. + Select Create Route Table. + At the “Name tag” enter gitlab-public and choose gitlab-vpc under “VPC”. + Select Create. + + +We now must add our internet gateway as a new target and have +it receive traffic from any destination. + + + Select Route Tables from the left menu and select the gitlab-public +route to show the options at the bottom. + Select the Routes tab, select Edit routes > Add route and set 0.0.0.0/0 +as the destination. In the target column, select the gitlab-gateway we created previously. +Select Save routes when done. + + +Next, we must associate the public subnets to the route table: + + + Select the Subnet Associations tab and select Edit subnet associations. + Check only the public subnets and select Save. + + +Private Route Tables + + +We also must create two private route tables so that instances in each private subnet can reach the internet via the NAT gateway in the corresponding public subnet in the same availability zone. + + + Follow the same steps as above to create two private route tables. Name them gitlab-private-a and gitlab-private-b. + Next, add a new route to each of the private route tables where the destination is 0.0.0.0/0 and the target is one of the NAT gateways we created earlier. + + Add the NAT gateway we created in gitlab-public-10.0.0.0 as the target for the new route in the gitlab-private-a route table. + Similarly, add the NAT gateway in gitlab-public-10.0.2.0 as the target for the new route in the gitlab-private-b. + + + Lastly, associate each private subnet with a private route table. + + Associate gitlab-private-10.0.1.0 with gitlab-private-a. + Associate gitlab-private-10.0.3.0 with gitlab-private-b. + + + + +Load Balancer + + +We create a load balancer to evenly distribute inbound traffic on ports 80 and 443 across our GitLab application servers. Based on the scaling policies we create later, instances are added to or removed from our load balancer as needed. Additionally, the load balancer performs health checks on our instances. + +On the EC2 dashboard, look for Load Balancer in the left navigation bar: + + + Select Create Load Balancer. + + Choose the Classic Load Balancer. + Give it a name (we use gitlab-loadbalancer) and for the Create LB Inside option, select gitlab-vpc from the dropdown list. + In the Listeners section, set the following listeners: + + HTTP port 80 for both load balancer and instance protocol and ports + TCP port 22 for both load balancer and instance protocols and ports + HTTPS port 443 for load balancer protocol and ports, forwarding to HTTP port 80 on the instance (we configure GitLab to listen on port 80 later in the guide) + + + In the Select Subnets section, select both public subnets from the list so that the load balancer can route traffic to both availability zones. + + + We add a security group for our load balancer to act as a firewall to control what traffic is allowed through. Select Assign Security Groups and select Create a new security group, give it a name +(we use gitlab-loadbalancer-sec-group) and description, and allow both HTTP and HTTPS traffic +from anywhere (0.0.0.0/0, ::/0). Also allow SSH traffic, select a custom source, and add a single trusted IP address or an IP address range in CIDR notation. This allows users to perform Git actions over SSH. + Select Configure Security Settings and set the following: + + Select an SSL/TLS certificate from ACM or upload a certificate to IAM. + Under Select a Cipher, pick a predefined security policy from the dropdown list. You can see a breakdown of Predefined SSL Security Policies for Classic Load Balancers in the AWS documentation. Check the GitLab codebase for a list of supported SSL ciphers and protocols. + + + Select Configure Health Check and set up a health check for your EC2 instances. + + For Ping Protocol, select HTTP. + For Ping Port, enter 80. + For Ping Path - we recommend that you use the Readiness check endpoint. You must add the VPC IP Address Range (CIDR) to the IP allowlist for the Health Check endpoints + + Keep the default Advanced Details or adjust them according to your needs. + + + Select Add EC2 Instances - don’t add anything as we create an Auto Scaling Group later to manage instances for us. + Select Add Tags and add any tags you need. + Select Review and Create, review all your settings, and select Create if you’re happy. + + +After the Load Balancer is up and running, you can revisit your Security +Groups to refine the access only through the ELB and any other requirements +you might have. + +Configure DNS for Load Balancer + + +On the Route 53 dashboard, select Hosted zones in the left navigation bar: + + + Select an existing hosted zone or, if you do not already have one for your domain, select Create Hosted Zone, enter your domain name, and select Create. + Select Create Record Set and provide the following values: + + +Name: Use the domain name (the default value) or enter a subdomain. + +Type: Select A - IPv4 address. + +Alias: Defaults to No. Select Yes. + +Alias Target: Find the ELB Classic Load Balancers section and select the classic load balancer we created earlier. + +Routing Policy: We use Simple but you can choose a different policy based on your use case. + +Evaluate Target Health: We set this to No but you can choose to have the load balancer route traffic based on target health. + Select Create. + + + If you registered your domain through Route 53, you’re done. If you used a different domain registrar, you must update your DNS records with your domain registrar. You must: + + Select Hosted zones and select the domain you added above. + You see a list of NS records. From your domain registrar’s administrator panel, add each of these as NS records to your domain’s DNS records. These steps may vary between domain registrars. If you’re stuck, Google “name of your registrar” add DNS records and you should find a help article specific to your domain registrar. + + + + +The steps for doing this vary depending on which registrar you use and is beyond the scope of this guide. + +PostgreSQL with RDS + + +For our database server we use Amazon RDS for PostgreSQL which offers Multi AZ +for redundancy (Aurora is not supported). First we create a security group and subnet group, then we +create the actual RDS instance. + +RDS Security Group + + +We need a security group for our database that allows inbound traffic from the instances we deploy in our gitlab-loadbalancer-sec-group later on: + + + From the EC2 dashboard, select Security Groups from the left menu bar. + Select Create security group. + Give it a name (we use gitlab-rds-sec-group), a description, and select the gitlab-vpc from the VPC dropdown list. + In the Inbound rules section, select Add rule and set the following: + + +Type: search for and select the PostgreSQL rule. + +Source type: set as “Custom”. + +Source: select the gitlab-loadbalancer-sec-group we created earlier. + + + When done, select Create security group. + + +RDS Subnet Group + + + + Go to the RDS dashboard and select Subnet Groups from the left menu. + Select Create DB Subnet Group. + Under Subnet group details, enter a name (we use gitlab-rds-group), a description, and choose the gitlab-vpc from the VPC dropdown list. + From the Availability Zones dropdown list, select the Availability Zones that include the subnets you’ve configured. In our case, we add eu-west-2a and eu-west-2b. + From the Subnets dropdown list, select the two private subnets (10.0.1.0/24 and 10.0.3.0/24) as we defined them in the subnets section. + Select Create when ready. + + +Create the database + + + + caution Avoid using burstable instances (t class instances) for the database as this could lead to performance issues due to CPU credits running out during sustained periods of high load. + + +Now, it’s time to create the database: + + + Go to the RDS dashboard, select Databases from the left menu, and select Create database. + Select Standard Create for the database creation method. + Select PostgreSQL as the database engine and select the minimum PostgreSQL version as defined for your GitLab version in our database requirements. + Because this is a production server, let’s choose Production from the Templates section. + Under Settings, use: + + +gitlab-db-ha for the DB instance identifier. + +gitlab for a master username. + A very secure password for the master password. + + + Make a note of these as we need them later. + + For the DB instance size, select Standard classes and select an instance size that meets your requirements from the dropdown list. We use a db.m4.large instance. + Under Storage, configure the following: + + Select Provisioned IOPS (SSD) from the storage type dropdown list. Provisioned IOPS (SSD) storage is best suited for this use (though you can choose General Purpose (SSD) to reduce the costs). Read more about it at Storage for Amazon RDS. + Allocate storage and set provisioned IOPS. We use the minimum values, 100 and 1000, respectively. + Enable storage autoscaling (optional) and set a maximum storage threshold. + + + Under Availability & durability, select Create a standby instance to have a standby RDS instance provisioned in a different Availability Zone. + Under Connectivity, configure the following: + + Select the VPC we created earlier (gitlab-vpc) from the Virtual Private Cloud (VPC) dropdown list. + Expand the Additional connectivity configuration section and select the subnet group (gitlab-rds-group) we created earlier. + Set public accessibility to No. + Under VPC security group, select Choose existing and select the gitlab-rds-sec-group we create above from the dropdown list. + Leave the database port as the default 5432. + + + For Database authentication, select Password authentication. + Expand the Additional configuration section and complete the following: + + The initial database name. We use gitlabhq_production. + Configure your preferred backup settings. + The only other change we make here is to disable auto minor version updates under Maintenance. + Leave all the other settings as is or tweak according to your needs. + If you’re happy, select Create database. + + + + +Now that the database is created, let’s move on to setting up Redis with ElastiCache. + +Redis with ElastiCache + + +ElastiCache is an in-memory hosted caching solution. Redis maintains its own +persistence and is used to store session data, temporary cache information, and background job queues for the GitLab application. + +Create a Redis Security Group + + + + Go to the EC2 dashboard. + Select Security Groups from the left menu. + Select Create security group and fill in the details. Give it a name (we use gitlab-redis-sec-group), +add a description, and choose the VPC we created previously + In the Inbound rules section, select Add rule and add a Custom TCP rule, set port 6379, and set the “Custom” source as the gitlab-loadbalancer-sec-group we created earlier. + When done, select Create security group. + + +Redis Subnet Group + + + + Go to the ElastiCache dashboard from your AWS console. + Go to Subnet Groups in the left menu, and create a new subnet group (we name ours gitlab-redis-group). +Make sure to select our VPC and its private subnets. + + Select Create when ready. + + + + + +Create the Redis Cluster + + + + Go back to the ElastiCache dashboard. + Select Redis on the left menu and select Create to create a new +Redis cluster. Do not enable Cluster Mode as it is not supported. Even without cluster mode on, you still get the +chance to deploy Redis in multiple availability zones. + In the settings section: + + Give the cluster a name (gitlab-redis) and a description. + For the version, select the latest. + Leave the port as 6379 because this is what we used in our Redis security group above. + Select the node type (at least cache.t3.medium, but adjust to your needs) and the number of replicas. + + + In the advanced settings section: + + Select the multi-AZ auto-failover option. + Select the subnet group we created previously. + + Manually select the preferred availability zones, and under “Replica 2” +choose a different zone than the other two. + + + + + + In the security settings, edit the security groups and choose the +gitlab-redis-sec-group we had previously created. + Leave the rest of the settings to their default values or edit to your liking. + When done, select Create. + + +Setting up Bastion Hosts + + +Because our GitLab instances are in private subnets, we need a way to connect +to these instances with SSH for actions that include making configuration changes +and performing upgrades. One way of doing this is by using a bastion host, +sometimes also referred to as a jump box. + + + note If you do not want to maintain bastion hosts, you can set up AWS Systems Manager Session Manager for access to instances. This is beyond the scope of this document. + + +Create Bastion Host A + + + + Go to the EC2 Dashboard and select Launch instance. + Select the Ubuntu Server 18.04 LTS (HVM) AMI. + Choose an instance type. We use a t2.micro as we only use the bastion host to SSH into our other instances. + Select Configure Instance Details. + + Under Network, select the gitlab-vpc from the dropdown list. + Under Subnet, select the public subnet we created earlier (gitlab-public-10.0.0.0). + Double check that under Auto-assign Public IP you have Use subnet setting (Enable) selected. + Leave everything else as default and select Add Storage. + + + For storage, we leave everything as default and only add an 8GB root volume. We do not store anything on this instance. + Select Add Tags and on the next screen select Add Tag. + + We only set Key: Name and Value: Bastion Host A. + + + Select Configure Security Group. + + Select Create a new security group, enter a Security group name (we use bastion-sec-group), and add a description. + We enable SSH access from anywhere (0.0.0.0/0). If you want stricter security, specify a single IP address or an IP address range in CIDR notation. + Select Review and Launch + + + + Review all your settings and, if you’re happy, select Launch. + Acknowledge that you have access to an existing key pair or create a new one. Select Launch Instance. + + +Confirm that you can SSH into the instance: + + + On the EC2 Dashboard, select Instances in the left menu. + Select Bastion Host A from your list of instances. + Select Connect and follow the connection instructions. + If you are able to connect successfully, let’s move on to setting up our second bastion host for redundancy. + + +Create Bastion Host B + + + + Create an EC2 instance following the same steps as above with the following changes: + + For the Subnet, select the second public subnet we created earlier (gitlab-public-10.0.2.0). + Under the Add Tags section, we set Key: Name and Value: Bastion Host B so that we can easily identify our two instances. + For the security group, select the existing bastion-sec-group we created above. + + + + +Use SSH Agent Forwarding + + +EC2 instances running Linux use private key files for SSH authentication. You connect to your bastion host using an SSH client and the private key file stored on your client. Because the private key file is not present on the bastion host, you are not able to connect to your instances in private subnets. + +Storing private key files on your bastion host is a bad idea. To get around this, use SSH agent forwarding on your client. See Securely Connect to Linux Instances Running in a Private Amazon VPC for a step-by-step guide on how to use SSH agent forwarding. + +Install GitLab and create custom AMI + + +We need a preconfigured, custom GitLab AMI to use in our launch configuration later. As a starting point, we use the official GitLab AMI to create a GitLab instance. Then, we add our custom configuration for PostgreSQL, Redis, and Gitaly. If you prefer, instead of using the official GitLab AMI, you can also spin up an EC2 instance of your choosing and manually install GitLab. + +Install GitLab + + +From the EC2 dashboard: + + + Use the section below titled “Find official GitLab-created AMI IDs on AWS” to find the correct AMI to launch. + After selecting Launch on the desired AMI, select an instance type based on your workload. Consult the hardware requirements to choose one that fits your needs (at least c5.xlarge, which is sufficient to accommodate 100 users). + Select Configure Instance Details: + + In the Network dropdown list, select gitlab-vpc, the VPC we created earlier. + In the Subnet dropdown list, select gitlab-private-10.0.1.0 from the list of subnets we created earlier. + Double check that Auto-assign Public IP is set to Use subnet setting (Disable). + Select Add Storage. + The root volume is 8GiB by default and should be enough given that we do not store any data there. + + + Select Add Tags and add any tags you may need. In our case, we only set Key: Name and Value: GitLab. + Select Configure Security Group. Check Select an existing security group and select the gitlab-loadbalancer-sec-group we created earlier. + Select Review and launch followed by Launch if you’re happy with your settings. + Finally, acknowledge that you have access to the selected private key file or create a new one. Select Launch Instances. + + +Add custom configuration + + +Connect to your GitLab instance via Bastion Host A using SSH Agent Forwarding. Once connected, add the following custom configuration: + +Disable Let’s Encrypt + + +Because we’re adding our SSL certificate at the load balancer, we do not need the GitLab built-in support for Let’s Encrypt. Let’s Encrypt is enabled by default when using an https domain in GitLab 10.7 and later, so we must explicitly disable it: + + + + Open /etc/gitlab/gitlab.rb and disable it: + + +letsencrypt['enable'] = false + + + + Save the file and reconfigure for the changes to take effect: + + +sudo gitlab-ctl reconfigure + + + + +Install the required extensions for PostgreSQL + + +From your GitLab instance, connect to the RDS instance to verify access and to install the required pg_trgm and btree_gist extensions. + +To find the host or endpoint, go to Amazon RDS > Databases and select the database you created earlier. Look for the endpoint under the Connectivity & security tab. + +Do not to include the colon and port number: + +sudo /opt/gitlab/embedded/bin/psql -U gitlab -h -d gitlabhq_production + + +At the psql prompt create the extension and then quit the session: + +psql (10.9) +Type ""help"" for help. + +gitlab=# CREATE EXTENSION pg_trgm; +gitlab=# CREATE EXTENSION btree_gist; +gitlab=# \q + + +Configure GitLab to connect to PostgreSQL and Redis + + + + + Edit /etc/gitlab/gitlab.rb, find the external_url 'http://' option +and change it to the https domain you are using. + + + Look for the GitLab database settings and uncomment as necessary. In +our current case we specify the database adapter, encoding, host, name, +username, and password: + + +# Disable the built-in Postgres + postgresql['enable'] = false + +# Fill in the connection details +gitlab_rails['db_adapter'] = ""postgresql"" +gitlab_rails['db_encoding'] = ""unicode"" +gitlab_rails['db_database'] = ""gitlabhq_production"" +gitlab_rails['db_username'] = ""gitlab"" +gitlab_rails['db_password'] = ""mypassword"" +gitlab_rails['db_host'] = """" + + + + Next, we must configure the Redis section by adding the host and +uncommenting the port: + + +# Disable the built-in Redis +redis['enable'] = false + +# Fill in the connection details +gitlab_rails['redis_host'] = """" +gitlab_rails['redis_port'] = 6379 + + + + Finally, reconfigure GitLab for the changes to take effect: + + +sudo gitlab-ctl reconfigure + + + + You can also run a check and a service status to make sure +everything has been setup correctly: + + +sudo gitlab-rake gitlab:check +sudo gitlab-ctl status + + + + +Set up Gitaly + + + + caution In this architecture, having a single Gitaly server creates a single point of failure. Use +Gitaly Cluster to remove this limitation. + + +Gitaly is a service that provides high-level RPC access to Git repositories. +It should be enabled and configured on a separate EC2 instance in one of the +private subnets we configured previously. + +Let’s create an EC2 instance where we install Gitaly: + + + From the EC2 dashboard, select Launch instance. + Choose an AMI. In this example, we select the Ubuntu Server 18.04 LTS (HVM), SSD Volume Type. + Choose an instance type. We pick a c5.xlarge. + Select Configure Instance Details. + + In the Network dropdown list, select gitlab-vpc, the VPC we created earlier. + In the Subnet dropdown list, select gitlab-private-10.0.1.0 from the list of subnets we created earlier. + Double check that Auto-assign Public IP is set to Use subnet setting (Disable). + Select Add Storage. + + + Increase the Root volume size to 20 GiB and change the Volume Type to Provisioned IOPS SSD (io1). (This is an arbitrary size. Create a volume big enough for your repository storage requirements.) + + For IOPS set 1000 (20 GiB x 50 IOPS). You can provision up to 50 IOPS per GiB. If you select a larger volume, increase the IOPS accordingly. Workloads where many small files are written in a serialized manner, like git, requires performant storage, hence the choice of Provisioned IOPS SSD (io1). + + + Select Add Tags and add your tags. In our case, we only set Key: Name and Value: Gitaly. + Select Configure Security Group and let’s Create a new security group. + + Give your security group a name and description. We use gitlab-gitaly-sec-group for both. + Create a Custom TCP rule and add port 8075 to the Port Range. For the Source, select the gitlab-loadbalancer-sec-group. + Also add an inbound rule for SSH from the bastion-sec-group so that we can connect using SSH Agent Forwarding from the Bastion hosts. + + + Select Review and launch followed by Launch if you’re happy with your settings. + Finally, acknowledge that you have access to the selected private key file or create a new one. Select Launch Instances. + + + + note Instead of storing configuration and repository data on the root volume, you can also choose to add an additional EBS volume for repository storage. Follow the same guidance as above. See the Amazon EBS pricing. We do not recommend using EFS as it may negatively impact the performance of GitLab. You can review the relevant documentation for more details. + + +Now that we have our EC2 instance ready, follow the documentation to install GitLab and set up Gitaly on its own server. Perform the client setup steps from that document on the GitLab instance we created above. + +Add Support for Proxied SSL + + +As we are terminating SSL at our load balancer, follow the steps at Supporting proxied SSL to configure this in /etc/gitlab/gitlab.rb. + +Remember to run sudo gitlab-ctl reconfigure after saving the changes to the gitlab.rb file. + +Fast lookup of authorized SSH keys + + +The public SSH keys for users allowed to access GitLab are stored in /var/opt/gitlab/.ssh/authorized_keys. Typically we’d use shared storage so that all the instances are able to access this file when a user performs a Git action over SSH. Because we do not have shared storage in our setup, we update our configuration to authorize SSH users via indexed lookup in the GitLab database. + +Follow the instructions at Set up fast SSH key lookup to switch from using the authorized_keys file to the database. + +If you do not configure fast lookup, Git actions over SSH results in the following error: + +Permission denied (publickey). +fatal: Could not read from remote repository. + +Please make sure you have the correct access rights +and the repository exists. + + +Configure host keys + + +Ordinarily we would manually copy the contents (primary and public keys) of /etc/ssh/ on the primary application server to /etc/ssh on all secondary servers. This prevents false man-in-the-middle-attack alerts when accessing servers in your cluster behind a load balancer. + +We automate this by creating static host keys as part of our custom AMI. As these host keys are also rotated every time an EC2 instance boots up, “hard coding” them into our custom AMI serves as a workaround. + +On your GitLab instance run the following: + +sudo mkdir /etc/ssh_static +sudo cp -R /etc/ssh/* /etc/ssh_static + + +In /etc/ssh/sshd_config update the following: + +# HostKeys for protocol version 2 +HostKey /etc/ssh_static/ssh_host_rsa_key +HostKey /etc/ssh_static/ssh_host_dsa_key +HostKey /etc/ssh_static/ssh_host_ecdsa_key +HostKey /etc/ssh_static/ssh_host_ed25519_key + + +Amazon S3 object storage + + +Because we’re not using NFS for shared storage, we use Amazon S3 buckets to store backups, artifacts, LFS objects, uploads, merge request diffs, container registry images, and more. Our documentation includes instructions on how to configure object storage for each of these data types, and other information about using object storage with GitLab. + + + note Because we are using the AWS IAM profile we created earlier, be sure to omit the AWS access key and secret access key/value pairs when configuring object storage. Instead, use 'use_iam_profile' => true in your configuration as shown in the object storage documentation linked above. + + +Remember to run sudo gitlab-ctl reconfigure after saving the changes to the gitlab.rb file. + + + +That concludes the configuration changes for our GitLab instance. Next, we create a custom AMI based on this instance to use for our launch configuration and auto scaling group. + +Log in for the first time + + +Using the domain name you used when setting up DNS for the load balancer, you should now be able to visit GitLab in your browser. + +Depending on how you installed GitLab and if you did not change the password by any other means, the default password is either: + + + Your instance ID if you used the official GitLab AMI. + A randomly generated password stored for 24 hours in /etc/gitlab/initial_root_password. + + +To change the default password, log in as the root user with the default password and change it in the user profile. + +When our auto scaling group spins up new instances, we are able to sign in with username root and the newly created password. + +Create custom AMI + + +On the EC2 dashboard: + + + Select the GitLab instance we created earlier. + Select Actions, scroll down to Image and select Create Image. + Give your image a name and description (we use GitLab-Source for both). + Leave everything else as default and select Create Image + + + +Now we have a custom AMI that we use to create our launch configuration the next step. + +Deploy GitLab inside an auto scaling group + + +Create a launch configuration + + +From the EC2 dashboard: + + + Select Launch Configurations from the left menu and select Create launch configuration. + Select My AMIs from the left menu and select the GitLab custom AMI we created above. + Select an instance type best suited for your needs (at least a c5.xlarge) and select Configure details. + Enter a name for your launch configuration (we use gitlab-ha-launch-config). + +Do not check Request Spot Instance. + From the IAM Role dropdown list, pick the GitLabAdmin instance role we created earlier. + Leave the rest as defaults and select Add Storage. + The root volume is 8GiB by default and should be enough given that we do not store any data there. Select Configure Security Group. + Check Select and existing security group and select the gitlab-loadbalancer-sec-group we created earlier. + Select Review, review your changes, and select Create launch configuration. + Acknowledge that you have access to the private key or create a new one. Select Create launch configuration. + + +Create an auto scaling group + + + + After the launch configuration is created, select Create an Auto Scaling group using this launch configuration to start creating the auto scaling group. + Enter a Group name (we use gitlab-auto-scaling-group). + For Group size, enter the number of instances you want to start with (we enter 2). + Select the gitlab-vpc from the Network dropdown list. + Add both the private subnets we created earlier. + Expand the Advanced Details section and check the Receive traffic from one or more load balancers option. + From the Classic Load Balancers dropdown list, select the load balancer we created earlier. + For Health Check Type, select ELB. + We leave our Health Check Grace Period as the default 300 seconds. Select Configure scaling policies. + Check Use scaling policies to adjust the capacity of this group. + For this group we scale between 2 and 4 instances where one instance is added if CPU +utilization is greater than 60% and one instance is removed if it falls +to less than 45%. + + + + + + Finally, configure notifications and tags as you see fit, review your changes, and create the +auto scaling group. + + +As the auto scaling group is created, you see your new instances spinning up in your EC2 dashboard. You also see the new instances added to your load balancer. After the instances pass the heath check, they are ready to start receiving traffic from the load balancer. + +Because our instances are created by the auto scaling group, go back to your instances and terminate the instance we created manually above. We only needed this instance to create our custom AMI. + +Health check and monitoring with Prometheus + + +Apart from Amazon’s Cloudwatch which you can enable on various services, +GitLab provides its own integrated monitoring solution based on Prometheus. +For more information about how to set it up, see +GitLab Prometheus. + +GitLab also has various health check endpoints +that you can ping and get reports. + +GitLab Runner + + +If you want to take advantage of GitLab CI/CD, you have to +set up at least one runner. + +Read more on configuring an +autoscaling GitLab Runner on AWS. + +Backup and restore + + +GitLab provides a tool to back up +and restore its Git data, database, attachments, LFS objects, and so on. + +Some important things to know: + + + The backup/restore tool does not store some configuration files, like secrets; you +must configure this yourself. + By default, the backup files are stored locally, but you can +backup GitLab using S3. + You can exclude specific directories form the backup. + + +Backing up GitLab + + +To back up GitLab: + + + SSH into your instance. + + Take a backup: + + +sudo gitlab-backup create + + + + + + note For GitLab 12.1 and earlier, use gitlab-rake gitlab:backup:create. + + +Restoring GitLab from a backup + + +To restore GitLab, first review the restore documentation, +and primarily the restore prerequisites. Then, follow the steps under the +Linux package installations section. + +Updating GitLab + + +GitLab releases a new version every month on the release date. Whenever a new version is +released, you can update your GitLab instance: + + + SSH into your instance + + Take a backup: + + +sudo gitlab-backup create + + + + + + note For GitLab 12.1 and earlier, use gitlab-rake gitlab:backup:create. + + + + + Update the repositories and install GitLab: + + +sudo apt update +sudo apt install gitlab-ee + + + + +After a few minutes, the new version should be up and running. + +Find official GitLab-created AMI IDs on AWS + + +Read more on how to use GitLab releases as AMIs. + +Conclusion + + +In this guide, we went mostly through scaling and some redundancy options, +your mileage may vary. + +Keep in mind that all solutions come with a trade-off between +cost/complexity and uptime. The more uptime you want, the more complex the solution. +And the more complex the solution, the more work is involved in setting up and +maintaining it. + +Have a read through these other resources and feel free to +open an issue +to request additional material: + + + +Scaling GitLab: +GitLab supports several different types of clustering. + +Geo replication: +Geo is the solution for widely distributed development teams. + +Linux package - Everything you must know +about administering your GitLab instance. + +Add a license: +Activate all GitLab Enterprise Edition functionality with a license. + +Pricing: Pricing for the different tiers. + + +Troubleshooting + + +Instances are failing health checks + + +If your instances are failing the load balancer’s health checks, verify that they are returning a status 200 from the health check endpoint we configured earlier. Any other status, including redirects like status 302, causes the health check to fail. + +You may have to set a password on the root user to prevent automatic redirects on the sign-in endpoint before health checks pass. + +“The change you requested was rejected (422)” + + +If you see this page when trying to set a password via the web interface, make sure external_url in gitlab.rb matches the domain you are making a request from, and run sudo gitlab-ctl reconfigure after making any changes to it. + +Some job logs are not uploaded to object storage + + +When the GitLab deployment is scaled up to more than one node, some job logs may not be uploaded to object storage properly. Incremental logging is required for CI to use object storage. + +Enable incremental logging if it has not already been enabled. + + +" +where are the settings for merge request approvals?,,"1. Merge request approval settings + + + +Merge request approval settings + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +You can configure the settings for merge request approvals to +ensure the approval rules meet your use case. You can also configure +approval rules, which define the number and type of users who must +approve work before it’s merged. Merge request approval settings define how +those rules are applied as a merge request moves toward completion. + +Edit merge request approval settings + + +To view or edit merge request approval settings: + + + On the left sidebar, select Search or go to and find your project. + Select Settings > Merge requests. + Expand Approvals. + + +Approval settings + + +These settings limit who can approve merge requests: + + + +Prevent approval by author: +Prevents the author of a merge request from approving it. + +Prevent approvals by users who add commits: +Prevents users who add commits to a merge request from also approving it. + +Prevent editing approval rules in merge requests: +Prevents users from overriding project level approval rules on merge requests. + +Require user re-authentication (password or SAML) to approve: +Force potential approvers to first authenticate with either a password or with SAML. + Code Owner approval removals: Define what happens to existing approvals when +commits are added to the merge request. + + +Keep approvals: Do not remove any approvals. + +Remove all approvals: +Remove all existing approvals. + +Remove approvals by Code Owners if their files changed: +If a Code Owner approves a merge request, and a later commit changes files +they are a Code Owner for, their approval is removed. + + + + +Prevent approval by author + + +By default, the author of a merge request cannot approve it. To change this setting: + + + On the left sidebar, select Search or go to and find your project. + Select Settings > Merge requests. + In the Merge request approvals section, scroll to Approval settings and +clear the Prevent approval by author checkbox. + Select Save changes. + + +Authors can edit the approval rule in an individual merge request and override +this setting, unless you configure one of these options: + + + +Prevent overrides of default approvals at +the project level. + +(Self-managed instances only) Prevent overrides of default approvals +at the instance level. When configured +at the instance level, you can’t edit this setting at the project or individual +merge request levels. + + +Prevent approvals by users who add commits + + + +History + + + + + +Feature flag keep_merge_commits_for_approvals added in GitLab 16.3 to also include merge commits in this check. + +Feature flag keep_merge_commits_for_approvals removed in GitLab 16.5. This check now includes merge commits. + + + + + + +By default, users who commit to a merge request can still approve it. At both +the project level or instance level, +you can prevent committers from approving merge requests that are partially +their own. To do this: + + + On the left sidebar, select Settings > Merge requests. + In the Merge request approvals section, scroll to Approval settings and +select Prevent approvals by users who add commits. +If this checkbox is cleared, an administrator has disabled it +at the instance level, and +it can’t be changed at the project level. + Select Save changes. + + +Depending on your version of GitLab, code owners who commit +to a merge request may or may not be able to approve the work: + + + In GitLab 13.10 and earlier, code owners who commit +to a merge request can approve it, even if the merge request affects files they own. + In GitLab 13.11 and later, +code owners who commit +to a merge request cannot approve it, when the merge request affects files they own. + + +For more information, see the official Git documentation. + +Prevent editing approval rules in merge requests + + +By default, users can override the approval rules you create for a project +on a per-merge-request basis. If you don’t want users to change approval rules +on merge requests, you can disable this setting: + + + On the left sidebar, select Settings > Merge requests. + In the Merge request approvals section, scroll to Approval settings and +select Prevent editing approval rules in merge requests. + Select Save changes. + + +This change affects all open merge requests. + +When this field is changed, it can affect all open merge requests depending on the setting: + + + If users could edit approval rules previously, and you disable this behavior, +all open merge requests are updated to enforce the approval rules. + If users could not edit approval rules previously, and you enable approval rule +editing, open merge requests remain unchanged. This preserves any changes already +made to approval rules in those merge requests. + + +Require user re-authentication to approve + + + +History + + + + + Requiring re-authentication by using SAML authentication for GitLab.com groups introduced in GitLab 16.6 with a flag named ff_require_saml_auth_to_approve. Disabled by default. + Requiring re-authentication by using SAML authentication for self-managed instances introduced in GitLab 16.7 with a flag named ff_require_saml_auth_to_approve. Disabled by default. + +Enabled ff_require_saml_auth_to_approve by default in GitLab 16.8 for GitLab.com and self-managed instances. + + + + + + + + On self-managed GitLab, by default requiring re-authentication by using SAML authentication is available. To hide the feature, an administrator can +disable the feature flag named ff_require_saml_auth_to_approve. On GitLab.com and GitLab Dedicated, this feature is available. + + +You can force potential approvers to first authenticate with either: + + + A password. + SAML. + + +This permission enables an electronic signature for approvals, such as the one defined by +Code of Federal Regulations (CFR) Part 11. This +setting is only available on top-level groups. For more information, see Settings cascading. + + + On the left sidebar, select Search or go to and find your project. + Enable password authentication and SAML authentication. For more information on: + + Password authentication, see +sign-in restrictions documentation. + SAML authentication for GitLab.com groups, see +SAML SSO for GitLab.com groups documentation. + SAML authentication for self-managed instances, see SAML SSO for self-managed GitLab instances. + + + On the left sidebar, select Settings > Merge requests. + In the Merge request approvals section, scroll to Approval settings and +select Require user re-authentication (password or SAML) to approve. + Select Save changes. + + +Remove all approvals when commits are added to the source branch + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +By default, an approval on a merge request is removed when you add more changes +after the approval. In GitLab Premium and Ultimate tiers, to keep existing approvals +after more changes are added to the merge request: + + + On the left sidebar, select Search or go to and find your project. + Select Settings > Merge requests. + In the Merge request approvals section, scroll to Approval settings and +clear the Remove all approvals checkbox. + Select Save changes. + + +Approvals aren’t removed when a merge request is rebased from the UI +However, approvals are reset if the target branch is changed. + +Remove approvals by Code Owners if their files changed + + + +History + + + + + +Introduced in GitLab 15.3. + + + + + + +If you only want to remove approvals by Code Owners whose files have been changed when a commit is added: + +Prerequisites: + + + You must have at least the Maintainer role for a project. + + +To do this: + + + On the left sidebar, select Search or go to and find your project. + Select Settings > Merge requests. + In the Merge request approvals section, scroll to Approval settings and +select Remove approvals by Code Owners if their files changed. + Select Save changes. + + +Settings cascading + + + +History + + + + + +Introduced in GitLab 14.4. Deployed behind the group_merge_request_approval_settings_feature_flag flag, disabled by default. + +Enabled by default in GitLab 14.5. + +Feature flag group_merge_request_approval_settings_feature_flag removed in GitLab 14.9. + + + + + + +You can also enforce merge request approval settings: + + + At the instance level, which apply to all groups +on an instance and, therefore, all projects. + On a top-level group, which apply to all subgroups +and projects. + + +If the settings are inherited by a group or project, they cannot be changed in the group or project +that inherited them. + +Related topics + + + + Instance-level merge request approval settings + Compliance center + Merge request approvals API + + + +2. Merge request approvals + + + +Merge request approvals + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +You can configure your merge requests so that they must be approved before +they can be merged. While GitLab Free allows +all users with Developer or greater permissions to +approve merge requests, these approvals are optional. +GitLab Premium and +GitLab Ultimate provide additional +flexibility: + + + Create required rules about the number and type of approvers before work can merge. + Specify a list of users who act as code owners for specific files, +and require their approval before work can merge. + For GitLab Premium and GitLab Ultimate, configure approvals +for the entire instance. + + +You can configure merge request approvals on a per-project basis, and some approvals can be configured +on the group level. Support for +group-level settings for merge request approval rules is tracked in this +epic. + +How approvals work + + +With merge request approval rules, you can set the minimum number of +required approvals before work can merge into your project. You can also extend these +rules to define what types of users can approve work. Some examples of rules you can create include: + + + Users with specific permissions can always approve work. + +Code owners can approve work for files they own. + Users with specific permissions can approve work, +even if they don’t have merge rights +to the repository. + Users with specific permissions can be allowed or denied the ability +to override approval rules on a specific merge request. + + +You can also configure: + + + Additional settings for merge request approvals for more control of the +level of oversight and security your project needs. + Merge request approval rules and settings through the GitLab UI or with the +Merge request approvals API. + + +Approvals cannot be changed after a merge request is merged. + +Approve a merge request + + +When an eligible approver visits an open merge request, +GitLab displays one of these buttons after the body of the merge request: + + + +Approve: The merge request doesn’t yet have the required number of approvals. + +Approve additionally: The merge request has the required number of approvals. + +Revoke approval: The user viewing the merge request has already approved +the merge request. + + +Eligible approvers can also use the /approve +quick action when adding a comment to +a merge request. Users in the reviewer list who have approved a merge request display +a green check mark ( ) next to their name. + +After a merge request receives the number and type of approvals you configure, it can merge +unless it’s blocked for another reason. Merge requests can be blocked by other problems, +such as merge conflicts, unresolved threads, +or a failed CI/CD pipeline. + +To prevent merge request authors from approving their own merge requests, +enable Prevent author approval +in your project’s settings. + +If you enable approval rule overrides, +merge requests created before a change to default approval rules are not affected. +The only exceptions are changes to the target branch +of the rule. + +Optional approvals + + +GitLab allows all users with Developer or greater permissions +to approve merge requests. Approvals in GitLab Free are optional, and don’t prevent +a merge request from merging without approval. + +Required approvals + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + Moved to GitLab Premium in 13.9. + + + + + + +Required approvals enforce code reviews by the number and type of users you specify. +Without the approvals, the work cannot merge. Required approvals enable multiple use cases: + + + Enforce review of all code that gets merged into a repository. + Specify reviewers for a given proposed code change, and a minimum number +of reviewers, through Approval rules. + Specify categories of reviewers, such as backend, frontend, quality assurance, or +database, for all proposed code changes. + Use the code owners of changed files, +to determine who should review the work. + Require an approval before merging code that causes test coverage to decline. + Users on GitLab Ultimate can also require approval from a security team +before merging code that could introduce a vulnerability. + + +Invalid rules + + + +History + + + + + +Introduced in GitLab 15.1. + +Changed in GitLab 15.11 with a flag named invalid_scan_result_policy_prevents_merge. Disabled by default. + +Generally available in GitLab 16.2. Feature flag invalid_scan_result_policy_prevents_merge removed. + + + + + + + + On self-managed GitLab, by default this feature is available. To hide the feature, +an administrator can disable the feature flag named invalid_scan_result_policy_prevents_merge. + + +Whenever an approval rule cannot be satisfied, the rule is displayed as Auto approved. This applies to the following conditions: + + + The only eligible approver is the author of the merge request. + No eligible approvers (either groups or users) have been assigned to the approval rule. + The number of required approvals is more than the number of eligible approvers. + + +These rules are automatically approved to unblock their respective merge requests, unless they were +created through a merge request approval policy. +Invalid approval rules created through a merge request approval policy are presented with +Action required and are not automatically approved, blocking their respective merge requests. + +Related topics + + + + Merge request approvals API + +Instance-level approval rules for self-managed installations + + + + + +" +what is the difference between gitlab ultimate and gitlab premium? do i get more features? which ones? is it worth it for a business of 1000 ppl?,,"1. GitLab self-managed subscription + + + +GitLab self-managed subscription + + + +Tier: Premium, Ultimate +Offering: Self-managed + +After you subscribe to GitLab, you can manage the details of your self-managed subscription. + +Obtain a self-managed subscription + + +To subscribe to GitLab for a GitLab self-managed installation: + + + Go to the Customers Portal and purchase a GitLab self-managed plan. + After purchase, an activation code is sent to the email address associated with the Customers Portal account. +You must add this code to your GitLab instance. + + + + note If you’re purchasing a subscription for an existing Free GitLab self-managed +instance, ensure you’re purchasing enough seats to +cover your users. + + +Subscription seats + + +A GitLab self-managed subscription uses a hybrid model. You pay for a subscription +according to the maximum number of users enabled during the subscription period. +For instances that aren’t offline or on a closed network, the maximum number of +simultaneous users in the GitLab self-managed installation is checked each quarter. + +If an instance is unable to generate a quarterly usage report, the existing true up model is used. +Prorated charges are not possible without a quarterly usage report. + +View user totals + + +View the amount of users in your instance to determine if they exceed the amount +paid for in your subscription. + + + On the left sidebar, at the bottom, select Admin Area. + Select Users. + + +The lists of users are displayed. + +Billable users + + +Billable users count toward the number of subscription seats purchased in your subscription. + +A user is not counted as a billable user if: + + + They are deactivated or +blocked. + They are pending approval. + They have only the Minimal Access role on self-managed Ultimate subscriptions or any GitLab.com subscriptions. + They have only the Guest or Minimal Access roles on an Ultimate subscription. + They do not have project or group memberships on an Ultimate subscription. + The account is a GitLab-created service account: + + +Ghost User. + Bots such as: + + +Support Bot. + +Bot users for projects. + +Bot users for groups. + Other internal users. + + + + + + +The amount of Billable users is reported once a day in the Admin Area. + +Maximum users + + +The number of maximum users reflects the highest peak of billable users for the current license period. + +Users over subscription + + +The number of users over subscription shows how many users are in excess of the number allowed by the subscription. This number reflects the current subscription period. + +For example, if: + + + The subscription allows 100 users and + +Maximum users is 150, + + +Then this value would be 50. + +If the Maximum users value is less than or equal to 100, then this value is 0. + +A trial license always displays zero for Users over subscription. + +If you add more users to your GitLab instance than you are licensed for, payment for the additional users is due at the time of renewal. + +If you do not add these users during the renewal process, your license key will not work. + +Free Guest users + + + +Tier: Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +In the Ultimate tier, users who are assigned the Guest role do not consume a seat. +The user must not be assigned any other role, anywhere in the instance. + + + If your project is private or internal, a user with the Guest role has +a set of permissions. + If your project is public, all users, including those with the Guest role +can access your project. + A user’s highest assigned role is updated asynchronously and may take some time to update. + + + + note If a user creates a project, they are assigned the Maintainer or Owner role. +To prevent a user from creating projects, as an administrator, you can mark the user +as external. + + +Tips for managing users and subscription seats + + +Managing the number of users against the number of subscription seats can be a challenge: + + + If LDAP integration is enabled, anyone in the configured domain can sign up for a GitLab account. +This can result in an unexpected bill at time of renewal. + If sign-up is enabled on your instance, anyone who can access the instance can sign up for an +account. + + +GitLab has several features which can help you manage the number of users: + + + Enable the Require administrator approval for new sign ups +option. + Enable block_auto_created_users for new sign-ups via LDAP or OmniAuth. + Enable the User cap +option. Available in GitLab 13.7 and later. + +Disable new sign-ups, and instead manage new +users manually. + View a breakdown of users by role in the Users statistics page. + + +Subscription data synchronization + + + +History + + + + + Introduced in GitLab 14.1. + + + + + + +Subscription data can be automatically synchronized between your self-managed instance and GitLab. +To enable subscription data synchronization you must have: + + + GitLab Enterprise Edition (EE), version 14.1 or later. + Connection to the internet, and must not have an offline environment. + +Activated your instance with an activation code. + + +When your instance is activated, and data is synchronized, the following processes are automated: + + + +Quarterly subscription reconciliation. + Subscription renewals. + Subscription updates, such as adding more seats or upgrading a GitLab tier. + + +At approximately 03:00 UTC, a daily synchronization job sends subscription data to the Customers +Portal. For this reason, updates and renewals may not apply immediately. + +The data is sent securely through an encrypted HTTPS connection to customers.gitlab.com on port +443. If the job fails, it retries up to 12 times over approximately 17 hours. + +Subscription data + + +The daily synchronization job sends only the following information to the Customers Portal: + + + Date + Timestamp + License key + + Company name (encrypted within license key) + Licensee name (encrypted within license key) + Licensee email (encrypted within license key) + + + Historical maximum user count + Billable users count + GitLab version + Hostname + Instance ID + + +Example of a license sync request: + +{ + ""gitlab_version"": ""14.1.0-pre"", + ""timestamp"": ""2021-06-14T12:00:09Z"", + ""date"": ""2021-06-14"", + ""license_key"": ""eyJkYXRhIjoiYlR2MFBPSEJPSnNOc1plbGtFRGZ6M + Ex1mWWhyM1Y3NWFOU0Zj\nak1xTmtLZHU1YzJJUWJzZzVxT3FQRU1PXG5 + KRzErL2ZNd0JuKzBwZmQ3YnY4\nTkFrTDFsMFZyQi9NcG5DVEdkTXQyNT + R3NlR0ZEc0MjBoTTVna2VORlVcbjAz\nbUgrNGl5N0NuenRhZlljd096R + nUzd2JIWEZ3NzV2V2lqb3FuQ3RYZWppWVFU\neDdESkgwSUIybFJhZlxu + Y2k0Mzl3RWlKYjltMkJoUzExeGIwWjN3Uk90ZGp1\nNXNNT3dtL0Vtc3l + zWVowSHE3ekFILzBjZ2FXSXVQXG5ENWJwcHhOZzRlcFhr\neFg0K3d6Zk + w3cHRQTTJMTGdGb2Vwai90S0VJL0ZleXhxTEhvaUc2NzVIbHRp\nVlRcb + nYzY090bmhsdTMrc0VGZURJQ3VmcXFFUS9ISVBqUXRhL3ZTbW9SeUNh\n + SjdDTkU4YVJnQTlBMEF5OFBiZlxuT0VORWY5WENQVkREdUMvTTVCb25Re + ENv\nK0FrekFEWWJ6VGZLZ1dBRjgzUXhyelJWUVJGTTErWm9TeTQ4XG5V + aWdXV0d4\nQ2graGtoSXQ1eXdTaUFaQzBtZGd2aG1YMnl1KzltcU9WMUx + RWXE4a2VSOHVn\nV3BMN1VFNThcbnMvU3BtTk1JZk5YUHhOSmFlVHZqUz + lXdjlqMVZ6ODFQQnFx\nL1phaTd6MFBpdG5NREFOVnpPK3h4TE5CQ1xub + GtacHNRdUxTZmtWWEZVUnB3\nWTZtWGdhWE5GdXhURjFndWhyVDRlTE92 + bTR3bW1ac0pCQnBkVWJIRGNyXG5z\nUjVsTWJxZEVUTXJNRXNDdUlWVlZ + CTnJZVTA2M2dHblc4eVNXZTc0enFUcW1V\nNDBrMUZpN3RTdzBaZjBcbm + 16UGNYV0RoelpkVk02cWR1dTl0Q1VqU05tWWlU\nOXlwRGZFaEhXZWhjb + m50RzA5UWVjWEM5em52Y1BjU1xueFU0MDMvVml5R3du\nQXNMTHkyajN5 + b3hhTkJUSWpWQ1BMUjdGeThRSEVnNGdBd0x6RkRHVWg1M0Qz\nMHFRXG5 + 5eWtXdHNHN3VBREdCNmhPODFJanNSZnEreDhyb2ZpVU5JVXo4NCtD\nem + Z1V1Q0K1l1VndPTngyc1l0TU5cbi9WTzlaaVdPMFhtMkZzM2g1NlVXcGI + y\nSUQzRnRlbW5vZHdLOWU4L0tiYWRESVRPQmgzQnIxbDNTS2tHN1xuQ3 + hpc29D\nNGh4UW5mUmJFSmVoQkh6eHV1dkY5aG11SUsyVmVDQm1zTXZCY + nZQNGdDbHZL\ndUExWnBEREpDXG41eEhEclFUd3E1clRYS2VuTjhkd3BU + SnVLQXgvUjlQVGpy\ncHJLNEIzdGNMK0xIN2JKcmhDOTlabnAvLzZcblZ + HbXk5SzJSZERIcXp3U2c3\nQjFwSmFPcFBFUHhOUFJxOUtnY2hVR0xWMF + d0Rk9vPVxuIiwia2V5IjoiUURM\nNU5paUdoRlVwZzkwNC9lQWg5bFY0Q + 3pkc2tSQjBDeXJUbG1ZNDE2eEpPUzdM\nVXkrYXRhTFdpb0lTXG5sTWlR + WEU3MVY4djFJaENnZHJGTzJsTUpHbUR5VHY0\ndWlSc1FobXZVWEhpL3h + vb1J4bW9XbzlxK2Z1OGFcblB6anp1TExhTEdUQVdJ\nUDA5Z28zY3JCcz + ZGOEVLV28xVzRGWWtUUVh2TzM0STlOSjVHR1RUeXkzVkRB\nc1xubUdRe + jA2eCtNNkFBM1VxTUJLZXRMUXRuNUN2R3l3T1VkbUx0eXZNQ3JX\nSWVQ + TElrZkJwZHhPOUN5Z1dCXG44UkpBdjRSQ1dkMlFhWVdKVmxUMllRTXc5\ + nL29LL2hFNWRQZ1pLdWEyVVZNRWMwRkNlZzg5UFZrQS9mdDVcbmlETWlh + YUZz\nakRVTUl5SjZSQjlHT2ovZUdTRTU5NVBBMExKcFFiVzFvZz09XG4 + iLCJpdiI6\nImRGSjl0YXlZWit2OGlzbGgyS2ZxYWc9PVxuIn0=\n"", + ""max_historical_user_count"": 75, + ""billable_users_count"": 75, + ""hostname"": ""gitlab.example.com"", + ""instance_id"": ""9367590b-82ad-48cb-9da7-938134c29088"" +} + + +Manually synchronize your subscription details + + +You can manually synchronize your subscription details at any time. + + + On the left sidebar, at the bottom, select Admin Area. + Select Subscription. + In the Subscription details section, select Sync subscription details. + + +A job is queued. When the job finishes, the subscription details are updated. + +View your subscription + + +If you are an administrator, you can view the status of your subscription: + + + On the left sidebar, at the bottom, select Admin Area. + Select Subscription. + + +The Subscription page includes the following details: + + + Licensee + Plan + When it was uploaded, started, and when it expires + + +It also displays the following information: + + + + + Field + Description + + + + + Users in License + The number of users you’ve paid for in the current license loaded on the system. The number does not change unless you add seats during your current subscription period. + + + Billable users + The daily count of billable users on your system. The count may change as you block, deactivate, or add users to your instance. + + + Maximum users + The highest number of billable users on your system during the term of the loaded license. + + + Users over subscription + Calculated as Maximum users - Users in subscription for the current license term. This number incurs a retroactive charge that must be paid before renewal. + + + + +Export your license usage + + + +History + + + + + +Introduced in GitLab 14.2. + + + + + + +If you are an administrator, you can export your license usage into a CSV: + + + On the left sidebar, at the bottom, select Admin Area. + Select Subscription. + In the upper-right corner, select Export license usage file. + + +This file contains the information GitLab uses to manually process quarterly reconciliations or renewals. If your instance is firewalled or an offline environment, you must provide GitLab with this information. + +The License Usage CSV includes the following details: + + + License key + Licensee email + License start date + License end date + Company + Generated at (the timestamp for when the file was exported) + Table of historical user counts for each day in the period: + + Timestamp the count was recorded + Billable user count + + + + +NOTES: + + + All date timestamps are displayed in UTC. + A custom format is used for dates and times in CSV files. + + + + caution Do not open the license usage file. If you open the file, failures might occur when you submit your license usage data. + + +Renew your subscription + + +You can renew your subscription starting from 15 days before your subscription expires. To renew your subscription: + + + Prepare for renewal by reviewing your account. + Renew your GitLab self-managed subscription. + + +Prepare for renewal by reviewing your account + + +The Customers Portal is your +tool for renewing and modifying your subscription. Before going ahead with renewal, +sign in and go to Billing account settings. Verify or update: + + + The credit card on file under the Payment methods section. + The invoice contact details in the Company information section. + + + + note Contact our support team +if you need assistance accessing the Customers Portal or if you need to change +the contact person who manages your subscription. + + +It’s important to regularly review your user accounts, because: + + + Stale user accounts may count as billable users. You may pay more than you should +if you renew for too many users. + Stale user accounts can be a security risk. A regular review helps reduce this risk. + + +Users over subscription + + +A GitLab subscription is valid for a specific number of seats. The number of users over subscription +is the number of maximum users that exceed the users in subscription for the current subscription term. +You must pay for this number of users either before renewal, or at the time of renewal. This is +called the true up process. + +To view the number of users over subscription go to the Admin Area. + +Users over subscription example + + +You purchase a subscription for 10 users. + + + + + Event + Billable users + Maximum users + + + + + Ten users occupy all 10 seats. + 10 + 10 + + + Two new users join. + 12 + 12 + + + Three users leave and their accounts are blocked. + 9 + 12 + + + Four new users join. + 13 + 13 + + + + +Users over subscription = 13 - 10 (Maximum users - users in license) + +Add seats to a subscription + + +The users in license count can be increased by adding seats to a subscription any time during the +subscription period. The cost of seats added during the subscription +period is prorated from the date of purchase through the end of the subscription period. + +To add seats to a subscription: + + + Sign in to the Customers Portal. + Go to the Manage Purchases page. + Select Add more seats on the relevant subscription card. + Enter the number of additional users. + Review the Purchase summary section. The system lists the total price for all users on the system and a credit for what you’ve already paid. You are only charged for the net change. + Enter your payment information. + Select Purchase seats. + + +A payment receipt is emailed to you, which you can also access in the Customers Portal under View invoices. + +If your subscription was activated with an activation code, the additional seats are reflected in +your instance immediately. If you’re using a license file, you receive an updated file. +To add the seats, add the license file +to your instance. + +Renew subscription manually + + +Starting 30 days before a subscription expires, a banner with the expiry date displays for administrators in the GitLab user interface. + +You should follow these steps during renewal: + + + Prior to the renewal date, prune any inactive or unwanted users by blocking them. + Determine if you have a need for user growth in the upcoming subscription. + + Sign in to the Customers Portal and beneath your existing subscription, select Renew. The Renew button displays only 15 days before a subscription expires. If there are more than 15 days before the subscription expires, select Subscription actions ( ), then select Renew subscription to view the date when you can renew. + + + note If you need to change your GitLab tier, contact our sales team with the sales contact form for assistance as this can’t be done in the Customers Portal. + + + In the first box, enter the total number of user licenses you’ll need for the upcoming year. Be sure this number is at least equal to, or greater than the number of billable users in the system at the time of performing the renewal. + Enter the number of users over subscription in the second box for the user overage incurred in your previous subscription term. + Review your renewal details and complete the payment process. + An activation code for the renewal term is available on the Manage Purchases page on the relevant subscription card. Select Copy activation code to get a copy. + +Add the activation code to your instance. + + +An invoice is generated for the renewal and available for viewing or download on the View invoices page. If you have difficulty during the renewal process, contact our support team for assistance. + +Automatic subscription renewal + + +When a subscription is set to auto-renew, it renews automatically on the expiration date (at midnight UTC) without a gap in available service. Subscriptions purchased through Customers Portal are set to auto-renew by default. + +The number of user licenses is adjusted to fit the number of billable users in your instance at the time of renewal, if that number is higher than the current subscription quantity. Before auto-renewal you should prepare for the renewal at least 2 days before the renewal date, so that your changes synchronize to GitLab in time for your renewal. To auto-renew your subscription, +you must have enabled the synchronization of subscription data. + +You can view and download your renewal invoice on the Customers Portal View invoices page. If your account has a saved credit card, the card is charged for the invoice amount. If we are unable to process a payment or the auto-renewal fails for any other reason, you have 14 days to renew your subscription, after which your GitLab tier is downgraded. + +Email notifications + + +15 days before a subscription automatically renews, an email is sent with information about the renewal. + + + If your credit card is expired, the email tells you how to update it. + If you have any outstanding overages or subscription isn’t able to auto-renew for any other reason, the email tells you to contact our Sales team or renew in Customers Portal. + If there are no issues, the email specifies the names and quantity of the products being renewed. The email also includes the total amount you owe. If your usage increases or decreases before renewal, this amount can change. + + +Enable or disable automatic subscription renewal + + +To view or change automatic subscription renewal (at the same tier as the +previous period), sign in to the Customers Portal, and: + + + If the subscription card displays Expires on DATE, your subscription is not set to automatically renew. To enable automatic renewal, in Subscription actions ( ), select Turn on auto-renew. + If the subscription card displays Autorenews on DATE, your subscription is set to automatically renew at the end of the subscription period. To cancel automatic renewal, in Subscription actions ( ), select Cancel subscription. + + +If you have difficulty during the renewal process, contact the +Support team for assistance. + +Renew for fewer seats + + +There are several options to renew a subscription for fewer seats, as long as the seat total is equal to or greater than the billable user quantity at the time of renewal: + + + +Turn off auto-renewal to avoid renewing at a higher seat quantity. + +Manually renew within 15 days of subscription renewal date, and specify the desired seat quantity. + Work with the Sales team to renew your subscription. + + +Upgrade your subscription tier + + +To upgrade your GitLab tier: + + + Sign in to the Customers Portal. + Select Upgrade on the relevant subscription card. + Select the desired upgrade. + Confirm the active form of payment, or add a new form of payment. + Select the I accept the Privacy Policy and Terms of Service checkbox. + Select Purchase. + + +The following is emailed to you: + + + A payment receipt. You can also access this information in the Customers Portal under +View invoices. + A new activation code for your license. + + +Add the activation code to your instance. +The new tier takes effect when the new license is activated. + +Add or change the contacts for your subscription + + +Contacts can renew a subscription, cancel a subscription, or transfer the subscription to a different namespace. + +For information about how to transfer ownership of the Customers Portal account to another person, see +Change profile owner information. + +To add a secondary contact for your subscription: + + + Ensure an account exists in the +Customers Portal for the user you want to add. + +Create a ticket with the Support team. Include any relevant material in your request. + + +Subscription expiry + + +When your license expires, GitLab locks down features, like Git pushes +and issue creation. Then, your instance becomes read-only and +an expiration message is displayed to all administrators. + +For GitLab self-managed instances, you have a 14-day grace period +before this occurs. + + + To resume functionality, activate a new license. + To fall back to Free features, delete the expired license. + + +Activate a license file or key + + +If you have a license file or key, you can activate it in the Admin Area. + +Contact Support + + + + See the tiers of GitLab Support. + +Submit a request through the Support Portal. + + +We also encourage all users to search our project trackers for known issues and +existing feature requests in the GitLab project. + +These issues are the best avenue for getting updates on specific product plans +and for communicating directly with the relevant GitLab team members. + +Storage + + +The amount of storage and transfer for self-managed instances has no application limits. Administrators are responsible for the underlying infrastructure costs and can set repository size limits. + +Troubleshooting + + +Subscription data fails to synchronize + + +If the synchronization job is not working, ensure you allow network traffic from your GitLab +instance to IP addresses 172.64.146.11:443 and 104.18.41.245:443 (customers.gitlab.com). + +Credit card declined + + +If your credit card is declined when purchasing a GitLab subscription, possible reasons include: + + + The credit card details provided are incorrect. + The credit card account has insufficient funds. + You are using a virtual credit card and it has insufficient funds, or has expired. + The transaction exceeds the credit limit. + The transaction exceeds the credit card’s maximum transaction amount. + + +Check with your financial institution to confirm if any of these reasons apply. If they don’t +apply, contact GitLab Support. + +Check daily and historical billable users + + +Administrators can get a list of daily and historical billable users in your GitLab instance. + + + +Start a Rails console session. + + Count the number of users in the instance: + + +User.billable.count + + + + Get the historical maximum number of users on the instance from the past year: + + +::HistoricalData.max_historical_user_count(from: 1.year.ago.beginning_of_day, to: Time.current.end_of_day) + + + + +Update daily billable and historical users + + +Administrators can trigger a manual update of the daily and historical billable users in your GitLab instance. + + + +Start a Rails console session. + + Force an update of the daily billable users: + + +identifier = Analytics::UsageTrends::Measurement.identifiers[:billable_users] +::Analytics::UsageTrends::CounterJobWorker.new.perform(identifier, User.minimum(:id), User.maximum(:id), Time.zone.now) + + + + Force an update of the historical max billable users: + + +::HistoricalDataWorker.new.perform + + + + + +2. Issue boards + + + +Issue boards + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +The issue board is a software project management tool used to plan, +organize, and visualize a workflow for a feature or product release. + +You can use it as a Kanban or a +Scrum board. + +Issue boards pair issue tracking and project management, keeping everything together, +so you can organize your workflow on a single platform. + +Issue boards use issues and labels. +Your issues appear as cards in vertical lists, organized by their assigned +labels, milestones, or assignees. + +Issue boards help you to visualize and manage your entire process in GitLab. +You add your labels, and then create the corresponding list for your existing issues. +When you’re ready, you can drag your issue cards from one step to another one. + +An issue board can show you the issues your team is working on, who is assigned to each, +and where the issues are in the workflow. + +To let your team members organize their own workflows, use +multiple issue boards. This allows creating multiple issue +boards in the same project. + + + +Different issue board features are available in different GitLab tiers: + + + + + Tier + Number of project issue boards + Number of group issue boards + + Configurable issue boards + Assignee lists + + + + + Free + Multiple + 1 + + No + + No + + + Premium + Multiple + Multiple + + Yes + + Yes + + + Ultimate + Multiple + Multiple + + Yes + + Yes + + + + +Read more about GitLab Enterprise features for issue boards. + + + + +Watch a video presentation (April 2020) of +the issue board feature. + +Multiple issue boards + + + +History + + + + + Multiple issue boards per project moved to GitLab Free in 12.1. + Multiple issue boards per group are available in GitLab Premium. + + + + + + +Multiple issue boards allow for more than one issue board for: + + + A project in all tiers + A group in the Premium and Ultimate tier + + +This is great for large projects with more than one team or when a repository hosts the code of multiple products. + +Using the search box at the top of the menu, you can filter the listed boards. + +When you have ten or more boards available, a Recent section is also shown in the menu, with +shortcuts to your last four visited boards. + + + +When you’re revisiting an issue board in a project or group with multiple boards, +GitLab automatically loads the last board you visited. + +Create an issue board + + +Prerequisites: + + + You must have at least the Reporter role for the project. + + +To create a new issue board: + + + In the upper-left corner of the issue board page, select the dropdown list with the current board name. + Select Create new board. + Enter the new board’s name and select its scope: milestone, labels, assignee, or weight. + + +Delete an issue board + + +Prerequisites: + + + You must have at least the Reporter role for the project. + + +To delete the currently active issue board: + + + In the upper-left corner of the issue board page, select the dropdown list with the current board name. + Select Delete board. + Select Delete to confirm. + + +Issue boards use cases + + +You can tailor GitLab issue boards to your own preferred workflow. +Here are some common use cases for issue boards. + +For examples of using issue boards along with epics, +issue health status, and +scoped labels for various Agile frameworks, see: + + + +How to use GitLab for Agile portfolio planning and project management blog post (November 2020) + + +Cross-project Agile work management with GitLab (15 min, July 2020) + + +Use cases for a single issue board + + +With the GitLab Flow you can +discuss proposals in issues, label them, and organize and prioritize them with issue boards. + +For example, let’s consider this simplified development workflow: + + + You have a repository that hosts your application’s codebase, and your team actively contributes code. + Your backend team starts working on a new implementation, gathers feedback and approval, and +passes it over to the frontend team. + When frontend is complete, the new feature is deployed to a staging environment to be tested. + When successful, it’s deployed to production. + + +If you have the labels Backend, Frontend, Staging, and +Production, and an issue board with a list for each, you can: + + + Visualize the entire flow of implementations since the beginning of the development life cycle +until deployed to production. + Prioritize the issues in a list by moving them vertically. + Move issues between lists to organize them according to the labels you’ve set. + Add multiple issues to lists in the board by selecting one or more existing issues. + + + + +Use cases for multiple issue boards + + +With multiple issue boards, +each team can have their own board to organize their workflow individually. + + + See the video: Portfolio Planning - Portfolio Management. + + + + + +Scrum team + + +With multiple issue boards, each team has one board. Now you can move issues through each +part of the process. For example: To Do, Doing, and Done. + +Organization of topics + + +Create lists to order issues by topic and quickly change them between topics or groups, +such as between UX, Frontend, and Backend. The changes are reflected across boards, +as changing lists updates the labels on each issue accordingly. + +Issue board workflow between teams + + +For example, suppose we have a UX team with an issue board that contains: + + + To Do + Doing + Frontend + + +When finished with something, they move the card to Frontend. The Frontend team’s board looks like: + + + Frontend + Doing + Done + + +Cards finished by the UX team automatically appear in the Frontend column when they are ready +for them. + +For a tutorial how to set up your boards in a similar way with scoped labels, see +Tutorial: Set up issue boards for team hand-off. + + + note For a broader use case, see the blog post +What is GitLab Flow?. +For a real use case example, you can read why +Codepen decided to adopt issue boards +to improve their workflow with multiple boards. + + +Quick assignments + + +To quickly assign issues to your team members: + + + Create assignee lists for each team member. + Drag an issue onto the team member’s list. + + +Issue board terminology + + +An issue board represents a unique view of your issues. It can have multiple lists with each +list consisting of issues represented by cards. + +A list is a column on the issue board that displays issues matching certain attributes. +In addition to the default “Open” and “Closed” lists, each additional list shows issues matching +your chosen label, assignee, or milestone. On the top of each list you can see the number of issues +that belong to it. Types of lists include: + + + +Open (default): all open issues that do not belong to one of the other lists. +Always appears as the leftmost list. + +Closed (default): all closed issues. Always appears as the rightmost list. + +Label list: all open issues for a label. + +Assignee list: all open issues assigned to a user. + +Milestone list: all open issues for a milestone. + + +A Card is a box on a list, and it represents an issue. You can drag cards from one list to +another to change their label, assignee, or milestone. The information you can see on a +card includes: + + + Issue title + Associated labels + Issue number + Assignee + + +Ordering issues in a list + + +Prerequisites: + + + You must have at least the Reporter role for the project. + + +When an issue is created, the system assigns a relative order value that is greater than the maximum value +of that issue’s project or root group. This means the issue is at the bottom of any issue list that +it appears in. + +When you visit a board, issues appear ordered in any list. You’re able to change +that order by dragging the issues. The changed order is saved, so that anybody who visits the same +board later sees the reordering, with some exceptions. + +Any time you drag and reorder the issue, its relative order value changes accordingly. +Then, any time that issue appears in any board, the ordering is done according to +the updated relative order value. If a user in your GitLab instance +drags issue A above issue B, the ordering is maintained when these two issues are subsequently +loaded in any board in the same instance. +This could be a different project board or a different group +board, for example. + +This ordering also affects issue lists. +Changing the order in an issue board changes the ordering in an issue list, +and vice versa. + +Focus mode + + +To enable or disable focus mode, in the upper-right corner, select Toggle focus mode ( ). +In focus mode, the navigation UI is hidden, allowing you to focus on issues in the board. + +Group issue boards + + +Accessible at the group navigation level, a group issue board offers the same features as a project-level board. +It can display issues from all projects that fall under the group and its descendant subgroups. + +Users on GitLab Free can use a single group issue board. + +GitLab Enterprise features for issue boards + + +GitLab issue boards are available on the GitLab Free tier, but some +advanced functionality is present in higher tiers only. + +Configurable issue boards + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + Setting current iteration as scope introduced in GitLab 13.8. + Moved to GitLab Premium in 13.9. + + + + + + +An issue board can be associated with a milestone, +labels, assignee, weight, and current iteration, +which automatically filter the board issues accordingly. +This allows you to create unique boards according to your team’s need. + + + +You can define the scope of your board when creating it or by selecting the Edit board button. +After a milestone, iteration, assignee, or weight is assigned to an issue board, you can no longer +filter through these in the search bar. To do that, you need to remove the desired scope +(for example, milestone, assignee, or weight) from the issue board. + +If you don’t have editing permission in a board, you’re still able to see the configuration by +selecting View scope. + + +Watch a video presentation of +the configurable issue board feature. + +Sum of issue weights + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + Moved to GitLab Premium in 13.9. + + + + + + +The top of each list indicates the sum of issue weights for the issues that +belong to that list. This is useful when using boards for capacity allocation, +especially in combination with assignee lists. + + + +Assignee lists + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +As in a regular list showing all issues with a chosen label, you can add +an assignee list that shows all issues assigned to a user. +You can have a board with both label lists and assignee lists. + +Prerequisites: + + + You must have at least the Reporter role for the project. + + +To add an assignee list: + + + Select Create list. + Select Assignee. + In the dropdown list, select a user. + Select Add to board. + + +Now that the assignee list is added, you can assign or unassign issues to that user +by moving issues to and from an assignee list. +To remove an assignee list, just as with a label list, select the trash icon. + + + +Milestone lists + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +You’re also able to create lists of a milestone. These are lists that filter issues by the assigned +milestone, giving you more freedom and visibility on the issue board. + +Prerequisites: + + + You must have at least the Reporter role for the project. + + +To add a milestone list: + + + Select Create list. + Select Milestone. + In the dropdown list, select a milestone. + Select Add to board. + + +Like the assignee lists, you’re able to drag issues +to and from a milestone list to manipulate the milestone of the dragged issues. +As in other list types, select the trash icon to remove a list. + + + +Iteration lists + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 13.11 with a flag named iteration_board_lists. Enabled by default. + +Generally available in GitLab 14.6. Feature flag iteration_board_lists removed. + + + + + + +You can create lists of issues in an iteration. + +Prerequisites: + + + You must have at least the Reporter role for the project. + + +To add an iteration list: + + + Select Create list. + Select Iteration. + In the dropdown list, select an iteration. + Select Add to board. + + +Like the milestone lists, you’re able to drag issues +to and from a iteration list to manipulate the iteration of the dragged issues. + + + +Group issues in swimlanes + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + Grouping by epic introduced in GitLab 13.6. + Editing issue titles in the issue sidebar introduced in GitLab 13.8. + Editing iteration in the issue sidebar introduced in GitLab 13.9. + + + + + + +With swimlanes you can visualize issues grouped by epic. +Your issue board keeps all the other features, but with a different visual organization of issues. +This feature is available both at the project and group level. + + +For a video overview, see Epics Swimlanes Walkthrough - 13.6 (November 2020). + +Prerequisites: + + + You must have at least the Reporter role for the project. + + +To group issues by epic in an issue board: + + + Select Group by. + Select Epic. + + + + +To edit an issue without leaving this view, select the issue card (not its title), and a sidebar +appears on the right. There you can see and edit the issue’s: + + + Title + Assignees + Epic + Milestone + Time tracking value (view only) + Due date + Labels + Weight + Notifications setting + + +You can also drag issues to change their position and epic assignment: + + + To reorder an issue, drag it to the new position within a list. + To assign an issue to another epic, drag it to the epic’s horizontal lane. + To unassign an issue from an epic, drag it to the Issues with no epic assigned lane. + To move an issue to another epic and another list, at the same time, drag the issue diagonally. + + + + +Work in progress limits + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + Moved to GitLab Premium in 13.9. + + + + + + +You can set a work in progress (WIP) limit for each issue list on an issue board. When a limit is +set, the list’s header shows the number of issues in the list and the soft limit of issues. A line in the list separates items within the limit from those in excess of the limit. +You cannot set a WIP limit on the default lists (Open and Closed). + +Examples: + + + When you have a list with four issues and a limit of five, the header shows 4/5. +If you exceed the limit, the current number of issues is shown in red. + You have a list with five issues with a limit of five. When you move another issue to that list, +the list’s header displays 6/5, with the six shown in red. The work in progress line is shown before the sixth issue. + + +Prerequisites: + + + You must have at least the Reporter role for the project. + + +To set a WIP limit for a list, in an issue board: + + + On the top of the list you want to edit, select Edit list settings ( ). +The list settings sidebar opens on the right. + Next to Work in progress limit, select Edit. + Enter the maximum number of issues. + Press Enter to save. + + +Blocked issues + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 13.10: View blocking issues when hovering over the “blocked” icon. + + + + + + +If an issue is blocked by another issue, an icon appears next to its title to indicate its blocked +status. + +When you hover over the blocked icon ( ), a detailed information popover is displayed. + + + +Actions you can take on an issue board + + + + +Edit an issue. + +Create a new list. + +Remove an existing list. + +Remove an issue from a list. + +Filter issues that appear across your issue board. + +Move issues and lists. + +Multi-select issue cards. + Drag and reorder the lists. + Change issue labels (by dragging an issue between lists). + Close an issue (by dragging it to the Closed list). + + +Edit an issue + + + +History + + + + + Editing title, iteration, and confidentiality introduced in GitLab 14.1. + + + + + + +You can edit an issue without leaving the board view. +To open the right sidebar, select an issue card (not its title). + +Prerequisites: + + + You must have at least the Reporter role for the project. + + +You can edit the following issue attributes in the right sidebar: + + + Assignees + Confidentiality + Due date + Epic + Health status + Iteration + Labels + Milestone + Notifications setting + Title + Weight + + +Additionally, you can also see the time tracking value. + +Create a new list + + +To create a new list, in the upper-right corner of the issue board, select Create. + + + +Then, choose the label, user or milestone to base the new list on. The new list is inserted +at the end of the lists, before Closed. To move and reorder lists, drag them around. + +Remove a list + + +Removing a list doesn’t have any effect on issues and labels, as it’s just the +list view that’s removed. You can always create it again later if you need. + +Prerequisites: + + + You must have at least the Reporter role for the project. + + +To remove a list from an issue board: + + + On the top of the list you want to remove, select Edit list settings ( ). +The list settings sidebar opens on the right. + Select Remove list. + On the confirmation dialog, select Remove list again. + + +Add issues to a list + + +Prerequisites: + + + You must have at least the Reporter role for the project. + + +If your board is scoped to one or more attributes, go to the issues you want to add and apply the +same attributes as your board scope. + +For example, to add an issue to a list scoped to the Doing label, in a group issue board: + + + Go to an issue in the group or one of the subgroups or projects. + Add the Doing label. + + +The issue should now show in the Doing list on your issue board. + +Remove an issue from a list + + +When an issue should no longer belong to a list, you can remove it. + +Prerequisites: + + + You must have at least the Reporter role for the project. + + +The steps depend on the scope of the list: + + + To open the right sidebar, select the issue card. + Remove what’s keeping the issue in the list. +If it’s a label list, remove the label. If it’s an assignee list, unassign the user. + + +Filter issues + + + +History + + + + + Filtering by iteration introduced in GitLab 13.6. + Filtering by issue type introduced in GitLab 14.6. + + + + + + +You can use the filters on top of your issue board to show only +the results you want. It’s similar to the filtering used in the issue tracker. + +Prerequisites: + + + You must have at least the Reporter role for the project. + + +You can filter by the following: + + + Assignee + Author + Epic + Iteration + Label + Milestone + My Reaction + Release + Type (issue/incident) + Weight + + +Filtering issues in a group board + + +When filtering issues in a group board, keep this behavior in mind: + + + Milestones: you can filter by the milestones belonging to the group and its descendant groups. + Labels: you can only filter by the labels belonging to the group but not its descendant groups. + + +When you edit issues individually using the right sidebar, you can additionally select the +milestones and labels from the project that the issue is from. + +Move issues and lists + + +You can move issues and lists by dragging them. + +Prerequisites: + + + You must have at least the Reporter role for a project in GitLab. + + +To move an issue, select the issue card and drag it to another position in its current list or +into a different list. Learn about possible effects in Dragging issues between lists. + +To move a list, select its top bar, and drag it horizontally. +You can’t move the Open and Closed lists, but you can hide them when editing an issue board. + +Move an issue to the start of the list + + + +History + + + + + +Introduced in GitLab 15.4. + + + + + + +You can move issues to the top of the list with a menu shortcut. + +Your issue is moved to the top of the list even if other issues are hidden by a filter. + +Prerequisites: + + + You must at least have the Reporter role for the project. + + +To move an issue to the start of the list: + + + In an issue board, hover over the card of the issue you want to move. + Select Card options ( ), then Move to start of list. + + +Move an issue to the end of the list + + + +History + + + + + +Introduced in GitLab 15.4. + + + + + + +You can move issues to the bottom of the list with a menu shortcut. + +Your issue is moved to the bottom of the list even if other issues are hidden by a filter. + +Prerequisites: + + + You must at least have the Reporter role for the project. + + +To move an issue to the end of the list: + + + In an issue board, hover over the card of the issue you want to move. + Select Card options ( ), then Move to end of list. + + +Dragging issues between lists + + +To move an issue to another list, select the issue card and drag it onto that list. + +When you drag issues between lists, the result is different depending on the source list +and the target list. + + + + +   + To Open + To Closed + To label B list + To assignee Bob list + + + + + From Open + - + Close issue + Add label B + Assign Bob + + + From Closed + Reopen issue + - + Reopen issue and add label B + Reopen issue and assign Bob + + + From label A list + Remove label A + Close issue + Remove label A and add label B + Assign Bob + + + From assignee Alice list + Unassign Alice + Close issue + Add label B + Unassign Alice and assign Bob + + + + +Multi-select issue cards + + + +History + + + + + +Moved behind a feature flag named board_multi_select in GitLab 14.0. Disabled by default. + + + + + + + + On self-managed GitLab, by default this feature is not available. To make it available, ask an +administrator to enable the feature flag named board_multi_select. +On GitLab.com and GitLab Dedicated, this feature is not available. +The feature is not ready for production use. + + +You can select multiple issue cards, then drag the group to another position within the list, or to +another list. This makes it faster to reorder many issues at once. + +Prerequisites: + + + You must have at least the Reporter role for the project. + + +To select and move multiple cards: + + + Select each card with Control+Click on Windows or Linux, or Command+Click on MacOS. + Drag one of the selected cards to another position or list and all selected cards are moved. + + + + +Tips + + +A few things to remember: + + + Moving an issue between lists removes the label from the list it came from +and adds the label from the list it goes to. + An issue can exist in multiple lists if it has more than one label. + Lists are populated with issues automatically if the issues are labeled. + Selecting the issue title inside a card takes you to that issue. + Selecting a label inside a card quickly filters the entire issue board +and show only the issues from all lists that have that label. + For performance and visibility reasons, each list shows the first 20 issues +by default. If you have more than 20 issues, start scrolling down and the next +20 appear. + + +Troubleshooting issue boards + + + +There was a problem fetching users on group issue board when filtering by Author or Assignee + + +If you get a banner with There was a problem fetching users error when filtering by author or assignee on +group issue board, make sure that you are added as a member to the current group. +Non-members do not have permission to list group members when filtering by author or assignee on issue boards. + +To fix this error, you should add all of your users to the top-level group with at least the Guest role. + +Use Rails console to fix issue boards not loading and timing out + + +If you see issue board not loading and timing out in UI, use Rails console to call the Issue Rebalancing service to fix it: + + + +Start a Rails console session. + + Run these commands: + + +p = Project.find_by_full_path('/') + +Issues::RelativePositionRebalancingService.new(p.root_namespace.all_projects).execute + + + To exit the Rails console, type quit. + + + +" +what can GitLab Duo Chat do with ai,,"1. GitLab Duo + + + +GitLab Duo + + + +History + + + + + +First GitLab Duo features introduced in GitLab 16.0. + +Removed third-party AI setting in GitLab 16.6. + +Removed support for OpenAI from all GitLab Duo features in GitLab 16.6. + + + + + + +GitLab is creating AI-assisted features across our DevSecOps platform. These features aim to help increase velocity and solve key pain points across the software development lifecycle. + +Some features are still in development. View details about support for each status (Experiment, Beta, Generally Available). + +As features become Generally Available, GitLab is transparent and updates the documentation to clearly state how and where you can access these capabilities. + + + + + Goal + Feature + Tier/Offering/Status + + + + + Helps you write code more efficiently by showing code suggestions as you type. Watch overview + + Code Suggestions + +Tier: Premium or Ultimate with GitLab Duo Pro Offering: GitLab.com, Self-managed, GitLab Dedicated + + + Processes and generates text and code in a conversational manner. Helps you quickly identify useful information in large volumes of text in issues, epics, code, and GitLab documentation. + Chat + +Tier: Premium, Ultimate Offering: GitLab.com, Self-managed, GitLab Dedicated Status: Beta (Subject to the Testing Agreement) + + + Helps you discover or recall Git commands when and where you need them. + Git suggestions + +Tier: Ultimate Offering: GitLab.com Status: Experiment + + + Assists with quickly getting everyone up to speed on lengthy conversations to help ensure you are all on the same page. + Discussion summary + +Tier: Ultimate Offering: GitLab.com Status: Experiment + + + Generates issue descriptions. + Issue description generation + +Tier: UltimateOffering: GitLab.com Status: Experiment + + + Automates repetitive tasks and helps catch bugs early. + Test generation + +Tier: Ultimate Offering: GitLab.com, Self-managed, GitLab Dedicated Status: Beta + + + Generates a description for the merge request based on the contents of the template. + Merge request template population + +Tier: UltimateOffering: GitLab.com Status: Experiment + + + Assists in creating faster and higher-quality reviews by automatically suggesting reviewers for your merge request. Watch overview + + Suggested Reviewers + +Tier: Ultimate Offering: GitLab.comStatus: Generally Available + + + Efficiently communicates the impact of your merge request changes. + Merge request summary + +Tier: Ultimate Offering: GitLab.com Status: Experiment + + + Helps ease merge request handoff between authors and reviewers and help reviewers efficiently understand suggestions. + Code review summary + +Tier: Ultimate Offering: GitLab.com Status: Experiment + + + Helps you remediate vulnerabilities more efficiently, boost your skills, and write more secure code. Watch overview + + Vulnerability explanation + +Tier: Ultimate Offering: GitLab.com Status: Beta + + + Generates a merge request containing the changes required to mitigate a vulnerability. + Vulnerability resolution + +Tier: Ultimate Offering: GitLab.com Status: Experiment + + + Helps you understand code by explaining it in English language. Watch overview + + Code explanation + +Tier: Ultimate Offering: GitLab.com Status: Experiment + + + Assists you in determining the root cause for a pipeline failure and failed CI/CD build. + Root cause analysis + +Tier: Ultimate Offering: GitLab.com Status: Experiment + + + Assists you with predicting productivity metrics and identifying anomalies across your software development lifecycle. + Value stream forecasting + +Tier: Ultimate Offering: GitLab.com, Self-managed, GitLab Dedicated Status: Experiment + + + + +Enable AI/ML features + + +For features listed as Experiment and Beta: + + + These features are disabled by default. + To enable, a user with the Owner role for the group must turn on this setting. +On GitLab.com, this setting is available for Ultimate subscriptions only. + These features are subject to the +Testing Terms of Use. + + +For all self-managed features: + + + Your firewalls and HTTP proxy servers must allow outbound connections +to https://cloud.gitlab.com:443. To use an HTTP/S proxy, both gitLab_workhorse and gitLab_rails must have the necessary +web proxy environment variables set. + + +For other features: + + + +Code Suggestions is enabled when you purchase the +GitLab Duo Pro add-on and assign seats to users. + +Chat + + View how to enable for self-managed. + View how to enable for GitLab.com. + + + + +Disable GitLab Duo features + + + +History + + + + + +Settings to disable AI features were introduced in GitLab 16.10. + + + + + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +You can disable GitLab Duo AI features for a group, project, or instance. +When it’s disabled, any attempt to use GitLab Duo features on the group, project, or instance is blocked and an error is displayed. +GitLab Duo features are also blocked for resources in the group or project, like epics, +issues, and vulnerabilities. + +For the group or project + + +Prerequisites: + + + You must have the Owner role for the group or project. + + +To disable GitLab Duo: + + + Use the GitLab GraphQL API +groupUpdate or projectSettingsUpdate mutation. + Disable GitLab Duo for the project or group by setting the duo_features_enabled setting to false. +(The default is true.) + Optional. To make all groups or projects in the hierarchy inherit the value for a top-level group, +set lock_duo_features_enabled to true. (The default is false.) +The child groups and projects cannot override this value. + + +For an instance + + +Prerequisites: + + + You must be an administrator. + + +To disable GitLab Duo: + + + Use the application settings API. + Disable GitLab Duo for the instance by setting the duo_features_enabled setting to false. +(The default is true.) + Optional. To ensure this setting cannot be overridden at the group or project level, +set lock_duo_features_enabled to true. (The default is false.) +The child groups and projects cannot override this value. + + +Future plans + + + + An issue exists for making this setting +available in the UI. + An issue exists for making the setting +cascade to all groups and projects. Right now the projects and groups do not +display the setting of the top-level group. To ensure the setting cascades, +ensure lock_duo_features_enabled is set to true. + + +Experimental AI features and how to use them + + +The following subsections describe the experimental AI features in more detail. + +Explain code in the Web UI with Code explanation + + + +Tier: Ultimate +Offering: GitLab.com +Status: Experiment + + +History + + + + + Introduced in GitLab 15.11 as an Experiment on GitLab.com. + + + + + + +To use this feature: + + + The parent group of the project must: + + Enable the experiment and beta features setting. + + + You must be a member of the project with sufficient permissions to view the repository. + + +GitLab can help you get up to speed faster if you: + + + Spend a lot of time trying to understand pieces of code that others have created, or + Struggle to understand code written in a language that you are not familiar with. + + +By using a large language model, GitLab can explain the code in natural language. + +To explain your code: + + + On the left sidebar, select Search or go to and find your project. + Select any file in your project that contains code. + On the file, select the lines that you want to have explained. + On the left side, select the question mark ( ). You might have to scroll to the first line of your selection to view it. This sends the selected code, together with a prompt, to provide an explanation to the large language model. + A drawer is displayed on the right side of the page. Wait a moment for the explanation to be generated. + Provide feedback about how satisfied you are with the explanation, so we can improve the results. + + +You can also have code explained in the context of a merge request. To explain +code in a merge request: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests, then select your merge request. + On the secondary menu, select Changes. + + On the file you would like explained, select the three dots ( ) and select View File @ $SHA. + + A separate browser tab opens and shows the full file with the latest changes. + + On the new tab, select the lines that you want to have explained. + On the left side, select the question mark ( ). You might have to scroll to the first line of your selection to view it. This sends the selected code, together with a prompt, to provide an explanation to the large language model. + A drawer is displayed on the right side of the page. Wait a moment for the explanation to be generated. + Provide feedback about how satisfied you are with the explanation, so we can improve the results. + + + + +We cannot guarantee that the large language model produces results that are correct. Use the explanation with caution. + +Summarize issue discussions with Discussion summary + + + +Tier: Ultimate +Offering: GitLab.com +Status: Experiment + + +History + + + + + +Introduced in GitLab 16.0 as an Experiment. + + + + + + +To use this feature: + + + The parent group of the issue must: + + Enable the experiment and beta features setting. + + + You must be a member of the project with sufficient permissions to view the issue. + + +You can generate a summary of discussions on an issue: + + + In an issue, scroll to the Activity section. + Select View summary. + + +The comments in the issue are summarized in as many as 10 list items. +The summary is displayed only for you. + +Provide feedback on this experimental feature in issue 407779. + +Data usage: When you use this feature, the text of all comments on the issue are sent to the large +language model referenced above. + +Forecast deployment frequency with Value stream forecasting + + + +Tier: Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated +Status: Experiment + + +History + + + + + +Introduced in GitLab 16.2 as an Experiment. + + + + + + +To use this feature: + + + The parent group of the project must: + + Enable the experiment and beta features setting. + + + You must be a member of the project with sufficient permissions to view the CI/CD analytics. + + +In CI/CD Analytics, you can view a forecast of deployment frequency: + + + On the left sidebar, select Search or go to and find your project. + Select Analyze > CI/CD analytics. + Select the Deployment frequency tab. + Turn on the Show forecast toggle. + On the confirmation dialog, select Accept testing terms. + + +The forecast is displayed as a dotted line on the chart. Data is forecasted for a duration that is half of the selected date range. +For example, if you select a 30-day range, a forecast for the following 15 days is displayed. + + + +Provide feedback on this experimental feature in issue 416833. + +Root cause analysis + + + +Tier: Ultimate +Offering: GitLab.com +Status: Experiment + + +History + + + + + +Introduced in GitLab 16.2 as an Experiment. + + + + + + +To use this feature: + + + The parent group of the project must: + + Enable the experiment and beta features setting. + + + You must be a member of the project with sufficient permissions to view the CI/CD job. + + +When the feature is available, the “Root cause analysis” button will appears on +a failed CI/CD job. Selecting this button generates an analysis regarding the +reason for the failure. + +Summarize an issue with Issue description generation + + + +Tier: Ultimate +Offering: GitLab.com +Status: Experiment + + +History + + + + + +Introduced in GitLab 16.3 as an Experiment. + + + + + + +To use this feature: + + + The parent group of the project must: + + Enable the experiment and beta features setting. + + + You must be a member of the project with sufficient permissions to view the issue. + + +You can generate the description for an issue from a short summary. + + + Create a new issue. + Above the Description field, select AI actions > Generate issue description. + Write a short description and select Submit. + + +The issue description is replaced with AI-generated text. + +Provide feedback on this experimental feature in issue 409844. + +Data usage: When you use this feature, the text you enter is sent to the large +language model referenced above. + +Language models + + + + + + Feature + Large Language Model + + + + + Git suggestions + Vertex AI Codey codechat-bison + + + + Discussion summary + Anthropic Claude-2 + + + + Issue description generation + Anthropic Claude-2 + + + + Code Suggestions + For Code Completion: Vertex AI Codey code-gecko For Code Generation: Anthropic Claude-2 + + + + Test generation + Anthropic Claude-2 + + + + Merge request template population + Vertex AI Codey text-bison + + + + Suggested Reviewers + GitLab creates a machine learning model for each project, which is used to generate reviewers View the issue + + + + Merge request summary + Vertex AI Codey text-bison + + + + Code review summary + Vertex AI Codey text-bison + + + + Vulnerability explanation + Vertex AI Codey text-bison Anthropic Claude-2 if degraded performance + + + Vulnerability resolution + Vertex AI Codey code-bison + + + + Code explanation + Vertex AI Codey codechat-bison + + + + GitLab Duo Chat + Anthropic Claude-2 Vertex AI Codey textembedding-gecko + + + + Root cause analysis + Vertex AI Codey text-bison + + + + Value stream forecasting + Statistical forecasting + + + + +Data usage + + +GitLab AI features leverage generative AI to help increase velocity and aim to help make you more productive. Each feature operates independently of other features and is not required for other features to function. GitLab selects the best-in-class large-language models for specific tasks. We use Google Vertex AI Models and Anthropic Claude. + +Progressive enhancement + + +These features are designed as a progressive enhancement to existing GitLab features across our DevSecOps platform. They are designed to fail gracefully and should not prevent the core functionality of the underlying feature. You should note each feature is subject to its expected functionality as defined by the relevant feature support policy. + +Stability and performance + + +These features are in a variety of feature support levels. Due to the nature of these features, there may be high demand for usage which may cause degraded performance or unexpected downtime of the feature. We have built these features to gracefully degrade and have controls in place to allow us to mitigate abuse or misuse. GitLab may disable beta and experimental features for any or all customers at any time at our discretion. + +Data privacy + + +GitLab Duo AI features are powered by a generative AI models. The processing of any personal data is in accordance with our Privacy Statement. You may also visit the Sub-Processors page to see the list of our Sub-Processors that we use to provide these features. + +Data retention + + +The below reflects the current retention periods of GitLab AI model Sub-Processors: + + + Anthropic discards model input and output data immediately after the output is provided. Anthropic currently does not store data for abuse monitoring. Model input and output is not used to train models. + Google discards model input and output data immediately after the output is provided. Google currently does not store data for abuse monitoring. Model input and output is not used to train models. + + +All of these AI providers are under data protection agreements with GitLab that prohibit the use of Customer Content for their own purposes, except to perform their independent legal obligations. + +GitLab retains input and output for up to 30 days for the purpose of troubleshooting, debugging, and addressing latency issues. + +Telemetry + + +GitLab Duo collects aggregated or de-identified first-party usage data through our Snowplow collector. This usage data includes the following metrics: + + + Number of unique users + Number of unique instances + Prompt lengths + Model used + Status code responses + API responses times + + +Training data + + +GitLab does not train generative AI models based on private (non-public) data. The vendors we work with also do not train models based on private data. + +For more information on our AI sub-processors, see: + + + Google Vertex AI Models APIs data governance and responsible AI. + Anthropic Claude’s constitution. + + +Model accuracy and quality + + +Generative AI may produce unexpected results that may be: + + + Low-quality + Incoherent + Incomplete + Produce failed pipelines + Insecure code + Offensive or insensitive + Out of date information + + +GitLab is actively iterating on all our AI-assisted capabilities to improve the quality of the generated content. We improve the quality through prompt engineering, evaluating new AI/ML models to power these features, and through novel heuristics built into these features directly. + + +2. GitLab Duo Chat + + + +GitLab Duo Chat + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated +Status: Beta + + +History + + + + + +Introduced as an Experiment for SaaS in GitLab 16.0. + Changed to Beta for SaaS in GitLab 16.6. + +Introduced as a Beta for self-managed in GitLab 16.8. + Changed from Ultimate to Premium tier in GitLab 16.9. + + + + + + +GitLab Duo Chat is your personal AI-powered assistant for boosting productivity. +It can assist various tasks of your daily work with the AI-generated content. +Here are the examples of use cases: + + + + + Feature + Use case example + Supported interfaces + Supported deployments + + + + + Ask about GitLab + I want to know how to create an issue in GitLab. + GitLab, VS Code, and Web IDE 1 + + GitLab.com + + + Ask about a specific issue + I want to summarize this issue. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Ask about a specific epic + I want to summarize this epic. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Ask about code + I want to understand how this code works. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Ask about CI/CD + I want to create a new CI/CD pipeline configuration. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Explain code in the IDE + I want to understand how this code works. + VS Code and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Refactor code in the IDE + I want to refactor this code. + VS Code and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Write tests in the IDE + I want to write a test for this code. + VS Code and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + + + +Footnotes: + + GitLab Duo Chat is not available in Web IDE on self-managed + + + + + + note This is a Beta feature. We’re continuously extending the capabilities and reliability of the responses. + + +Watch a demo + + + + View how to setup and use GitLab Duo Chat. + + + + + +What GitLab Duo Chat can help with + + +GitLab Duo Chat can help in a variety of areas. + +Ask about GitLab + + + +History + + + + + +Introduced for SaaS in GitLab 16.0. + + + + + + +You can ask questions about how GitLab works. Things like: + + + Explain the concept of a 'fork' in a concise manner. + Provide step-by-step instructions on how to reset a user's password. + + + + note This feature is not currently supported in self-managed instances. +See this epic for more information. + + +Ask about a specific issue + + + +History + + + + + +Introduced for SaaS in GitLab 16.0. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can ask about a specific GitLab issue. For example: + + + Generate a summary for the issue identified via this link: + When you are viewing an issue in GitLab, you can ask Generate a concise summary of the current issue. + + How can I improve the description of so that readers understand the value and problems to be solved? + + +Ask about a specific epic + + + +History + + + + + +Introduced for SaaS in GitLab 16.3. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can ask about a specific GitLab epic. For example: + + + Generate a summary for the epic identified via this link: + When you are viewing an epic in GitLab, you can ask Generate a concise summary of the opened epic. + + What are the unique use cases raised by commenters in ? + + +Ask about code + + + +History + + + + + +Introduced for SaaS in GitLab 16.1. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can also ask GitLab Duo Chat to generate code: + + + Write a Ruby function that prints 'Hello, World!' when called. + Develop a JavaScript program that simulates a two-player Tic-Tac-Toe game. Provide both game logic and user interface, if applicable. + Create a regular expression for parsing IPv4 and IPv6 addresses in Python. + Generate code for parsing a syslog log file in Java. Use regular expressions when possible, and store the results in a hash map. + Create a product-consumer example with threads and shared memory in C++. Use atomic locks when possible. + Generate Rust code for high performance gRPC calls. Provide a source code example for a server and client. + + +And you can ask GitLab Duo Chat to explain code: + + + Provide a clear explanation of the given Ruby code: def sum(a, b) a + b end. Describe what this code does and how it works. + + +Alternatively, you can use the /explain command to explain the selected code in your editor. + +For more practical examples, see the GitLab Duo examples. + +Ask about errors + + +Programming languages that require compiling the source code may throw cryptic error messages. Similarly, a script or a web application could throw a stack trace. You can ask GitLab Duo Chat by prefixing the copied error message with, for example, Please explain this error message:. Add the specific context, like the programming language. + + + Explain this error message in Java: Int and system cannot be resolved to a type + Explain when this C function would cause a segmentation fault: sqlite3_prepare_v2() + Explain what would cause this error in Python: ValueError: invalid literal for int() + Why is ""this"" undefined in VueJS? Provide common error cases, and explain how to avoid them. + How to debug a Ruby on Rails stacktrace? Share common strategies and an example exception. + + +For more practical examples, see the GitLab Duo examples. + +Ask about CI/CD + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can ask GitLab Duo Chat to create a CI/CD configuration: + + + Create a .gitlab-ci.yml configuration file for testing and building a Ruby on Rails application in a GitLab CI/CD pipeline. + Create a CI/CD configuration for building and linting a Python application. + Create a CI/CD configuration to build and test Rust code. + Create a CI/CD configuration for C++. Use gcc as compiler, and cmake as build tool. + Create a CI/CD configuration for VueJS. Use npm, and add SAST security scanning. + Generate a security scanning pipeline configuration, optimized for Java. + + +You can also ask to explain specific job errors by copy-pasting the error message, prefixed with Please explain this CI/CD job error message, in the context of :: + + + Please explain this CI/CD job error message in the context of a Go project: build.sh: line 14: go command not found + + +Alternatively, you can use root cause analysis in CI/CD. + +For more practical examples, see the GitLab Duo examples. + +Explain code in the IDE + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + + + note This feature is available in VS Code and the Web IDE only. + + +/explain is a special command to explain the selected code in your editor. +You can also add additional instructions to be considered, for example: /explain the performance +See Use GitLab Duo Chat in VS Code for more information. + + + /explain focus on the algorithm + /explain the performance gains or losses using this code + +/explain the object inheritance (classes, object-oriented) + +/explain why a static variable is used here (C++) + +/explain how this function would cause a segmentation fault (C) + +/explain how concurrency works in this context (Go) + +/explain how the request reaches the client (REST API, database) + + +For more practical examples, see the GitLab Duo examples. + +Refactor code in the IDE + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + + + note This feature is available in VS Code and the Web IDE only. + + +/refactor is a special command to generate a refactoring suggestion for the selected code in your editor. +You can include additional instructions to be considered. For example: + + + Use a specific coding pattern, for example /refactor with ActiveRecord or /refactor into a class providing static functions. + Use a specific library, for example /refactor using mysql. + Use a specific function/algorithm, for example /refactor into a stringstream with multiple lines in C++. + Refactor to a different programming language, for example /refactor to TypeScript. + Focus on performance, for example /refactor improving performance. + Focus on potential vulnerabilities, for example /refactor avoiding memory leaks and exploits. + + +See Use GitLab Duo Chat in the VS Code for more information. + +For more practical examples, see the GitLab Duo examples. + +Write tests in the IDE + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + + + note This feature is available in VS Code and the Web IDE only. + + +/tests is a special command to generate a testing suggestion for the selected code in your editor. +You can also add additional instructions to be considered, for example: /tests using the Boost.Test framework +See Use GitLab Duo Chat in the VS Code for more information. + + + Use a specific test framework, for example /tests using the Boost.test framework (C++) or /tests using Jest (JavaScript). + Focus on extreme test cases, for example /tests focus on extreme cases, force regression testing. + Focus on performance, for example /tests focus on performance. + Focus on regressions and potential exploits, for example /tests focus on regressions and potential exploits. + + +For more practical examples, see the GitLab Duo examples. + +Ask follow up questions + + +You can ask follow-up questions to delve deeper into the topic or task at hand. +This helps you get more detailed and precise responses tailored to your specific needs, +whether it’s for further clarification, elaboration, or additional assistance. + +A follow-up to the question Write a Ruby function that prints 'Hello, World!' when called could be: + + + Can you also explain how I can call and execute this Ruby function in a typical Ruby environment, such as the command line? + + +A follow-up to the question How to start a C# project? could be: + + + Can you also please explain how to add a .gitignore and .gitlab-ci.yml file for C#? + + +For more practical examples, see the GitLab Duo examples. + +Enable GitLab Duo Chat + + +For SaaS users + + +To use this feature, at least one group you’re a member of must +have the experiment and beta features setting enabled. + +You can ask questions about resources that belong only to groups where this setting is enabled. + +Troubleshoot Chat access + + +If you have access to chat responses you did not expect, you might be part of +a group that has the Use Experiment and Beta features setting enabled. +Review the list of your groups and verify which ones you have access to. + +GitLab.com administrators can verify your access by running this snippet in the Rails console: + +u = User.find_by_username($USERNAME) +u.member_namespaces.namespace_settings_with_ai_features_enabled.with_ai_supported_plan(:ai_chat) + + +You can ask specific questions about group resources (like “summarize this issue”) when this feature is enabled. + +For self-managed users + + + + note Usage of GitLab Duo Chat is governed by the GitLab Testing Agreement. +Learn about data usage when using GitLab Duo Chat. + + +Prerequisites: + + + You have GitLab version 16.8 or later. + The Premium or Ultimate license is activated in your GitLab instance by using cloud licensing. + Your firewalls and HTTP proxy servers allow outbound connections +to cloud.gitlab.com. To use an HTTP proxy, both +gitLab _workhorse and gitLab_rails have the necessary +web proxy environment variables set. + All of the users in your instance have the latest version of their IDE extension. + You are an administrator. + + +To enable GitLab Duo Chat for your self-managed GitLab instance: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > General. + Expand AI-powered features and select Enable Experiment and Beta AI-powered features. + Select Save changes. + To make sure GitLab Duo Chat works immediately, you must +manually synchronize your subscription. + + +Manually synchronize your subscription + + +You must manually synchronize your subscription if either: + + + You have just purchased a subscription for the Premium or Ultimate tier and have upgraded to GitLab 16.8. + You already have a subscription for the Premium or Ultimate tier and have upgraded to GitLab 16.8. + + +Without the manual synchronization, it might take up to 24 hours to activate GitLab Duo Chat on your instance. + +Use GitLab Duo Chat in the GitLab UI + + + + In the lower-left corner, select the Help icon. +The new left sidebar must be enabled. + Select GitLab Duo Chat. A drawer opens on the right side of your screen. + Enter your question in the chat input box and press Enter or select Send. It may take a few seconds for the interactive AI chat to produce an answer. + You can ask a follow-up question. + If you want to ask a new question unrelated to the previous conversation, you may receive better answers if you clear the context by typing /reset into the input box and selecting Send. + + + + note Only the last 50 messages are retained in the chat history. The chat history expires 3 days after last use. + + +Delete all conversations + + +To delete all previous conversations: + + + In the text box, type /clean and select Send. + + +Use GitLab Duo Chat in the Web IDE + + + +Tier: Premium, Ultimate +Status: Experiment + + +History + + + + + Introduced in GitLab 16.6 as an Experiment + + + + + + + +To use GitLab Duo Chat in the Web IDE on GitLab: + + + Open the Web IDE: + + On the left sidebar, select Search or go to and find your project. + Select a file. Then in the upper right, select Edit > Open in Web IDE. + + + Then open Chat by using one of the following methods: + + On the left sidebar, select GitLab Duo Chat. + In the file that you have open in the editor, select some code. + + Right-click and select GitLab Duo Chat. + Select Explain selected code or Generate Tests. + + + Use the keyboard shortcut: ALT+d (on Windows and Linux) or Option+d (on Mac) + + + In the message box, enter your question and press Enter or select Send. + + +If you have selected code in the editor, this selection is sent along with your question to the AI. This way you can ask questions about this code selection. For instance, Could you simplify this?. + + + note GitLab Duo Chat is not available in the Web IDE on self-managed. + + +Use GitLab Duo Chat in VS Code + + + +Tier: Premium, Ultimate +Status: Experiment + + +History + + + + + Introduced in GitLab 16.6 as an Experiment. + + + + + + +To use GitLab Duo Chat in GitLab Workflow extension for VS Code: + + + Install and set up the Workflow extension for VS Code: + + In VS Code, download and Install the GitLab Workflow extension for VS Code. + Configure the GitLab Workflow extension. + + + In VS Code, open a file. The file does not need to be a file in a Git repository. + Open Chat by using one of the following methods: + + On the left sidebar, select GitLab Duo Chat. + In the file that you have open in the editor, select some code. + + Right-click and select GitLab Duo Chat. + Select Explain selected code or Generate Tests. + + + Use the keyboard shortcut: ALT+d (on Windows and Linux) or Option+d (on Mac) + + + In the message box, enter your question and press Enter or select Send. + + +If you have selected code in the editor, this selection is sent along with your question to the AI. This way you can ask questions about this code selection. For instance, Could you simplify this?. + +Perform standard task in the IDE from the context menu or by using slash commands + + +Get code explained, code refactored or get tests generated for code. To do so: + + + Select code in your editor in VS Code or in the Web IDE. + Type one the following slash commands into the chat field: /explain, /refactor or /tests. Alternatively, use the context menu to perform these tasks. + + +When you use one of the slash commands you can also add additional instructions to be considered, for example: /tests using the Boost.Test framework + +Disable Chat in VS Code + + +To disable GitLab Duo Chat in VS Code: + + + Go to Settings > Extensions > GitLab Workflow (GitLab VS Code Extension). + Clear the Enable GitLab Duo Chat assistant checkbox. + + +Give feedback + + +Your feedback is important to us as we continually enhance your GitLab Duo Chat experience. +Leaving feedback helps us customize the Chat for your needs and improve its performance for everyone. + +To give feedback about a specific response, use the feedback buttons in the response message. +Or, you can add a comment in the feedback issue. + + +" +can i use GitLab Duo Chat to search for code on gitlab?,,"1. GitLab Duo Chat + + + +GitLab Duo Chat + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated +Status: Beta + + +History + + + + + +Introduced as an Experiment for SaaS in GitLab 16.0. + Changed to Beta for SaaS in GitLab 16.6. + +Introduced as a Beta for self-managed in GitLab 16.8. + Changed from Ultimate to Premium tier in GitLab 16.9. + + + + + + +GitLab Duo Chat is your personal AI-powered assistant for boosting productivity. +It can assist various tasks of your daily work with the AI-generated content. +Here are the examples of use cases: + + + + + Feature + Use case example + Supported interfaces + Supported deployments + + + + + Ask about GitLab + I want to know how to create an issue in GitLab. + GitLab, VS Code, and Web IDE 1 + + GitLab.com + + + Ask about a specific issue + I want to summarize this issue. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Ask about a specific epic + I want to summarize this epic. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Ask about code + I want to understand how this code works. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Ask about CI/CD + I want to create a new CI/CD pipeline configuration. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Explain code in the IDE + I want to understand how this code works. + VS Code and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Refactor code in the IDE + I want to refactor this code. + VS Code and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Write tests in the IDE + I want to write a test for this code. + VS Code and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + + + +Footnotes: + + GitLab Duo Chat is not available in Web IDE on self-managed + + + + + + note This is a Beta feature. We’re continuously extending the capabilities and reliability of the responses. + + +Watch a demo + + + + View how to setup and use GitLab Duo Chat. + + + + + +What GitLab Duo Chat can help with + + +GitLab Duo Chat can help in a variety of areas. + +Ask about GitLab + + + +History + + + + + +Introduced for SaaS in GitLab 16.0. + + + + + + +You can ask questions about how GitLab works. Things like: + + + Explain the concept of a 'fork' in a concise manner. + Provide step-by-step instructions on how to reset a user's password. + + + + note This feature is not currently supported in self-managed instances. +See this epic for more information. + + +Ask about a specific issue + + + +History + + + + + +Introduced for SaaS in GitLab 16.0. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can ask about a specific GitLab issue. For example: + + + Generate a summary for the issue identified via this link: + When you are viewing an issue in GitLab, you can ask Generate a concise summary of the current issue. + + How can I improve the description of so that readers understand the value and problems to be solved? + + +Ask about a specific epic + + + +History + + + + + +Introduced for SaaS in GitLab 16.3. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can ask about a specific GitLab epic. For example: + + + Generate a summary for the epic identified via this link: + When you are viewing an epic in GitLab, you can ask Generate a concise summary of the opened epic. + + What are the unique use cases raised by commenters in ? + + +Ask about code + + + +History + + + + + +Introduced for SaaS in GitLab 16.1. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can also ask GitLab Duo Chat to generate code: + + + Write a Ruby function that prints 'Hello, World!' when called. + Develop a JavaScript program that simulates a two-player Tic-Tac-Toe game. Provide both game logic and user interface, if applicable. + Create a regular expression for parsing IPv4 and IPv6 addresses in Python. + Generate code for parsing a syslog log file in Java. Use regular expressions when possible, and store the results in a hash map. + Create a product-consumer example with threads and shared memory in C++. Use atomic locks when possible. + Generate Rust code for high performance gRPC calls. Provide a source code example for a server and client. + + +And you can ask GitLab Duo Chat to explain code: + + + Provide a clear explanation of the given Ruby code: def sum(a, b) a + b end. Describe what this code does and how it works. + + +Alternatively, you can use the /explain command to explain the selected code in your editor. + +For more practical examples, see the GitLab Duo examples. + +Ask about errors + + +Programming languages that require compiling the source code may throw cryptic error messages. Similarly, a script or a web application could throw a stack trace. You can ask GitLab Duo Chat by prefixing the copied error message with, for example, Please explain this error message:. Add the specific context, like the programming language. + + + Explain this error message in Java: Int and system cannot be resolved to a type + Explain when this C function would cause a segmentation fault: sqlite3_prepare_v2() + Explain what would cause this error in Python: ValueError: invalid literal for int() + Why is ""this"" undefined in VueJS? Provide common error cases, and explain how to avoid them. + How to debug a Ruby on Rails stacktrace? Share common strategies and an example exception. + + +For more practical examples, see the GitLab Duo examples. + +Ask about CI/CD + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can ask GitLab Duo Chat to create a CI/CD configuration: + + + Create a .gitlab-ci.yml configuration file for testing and building a Ruby on Rails application in a GitLab CI/CD pipeline. + Create a CI/CD configuration for building and linting a Python application. + Create a CI/CD configuration to build and test Rust code. + Create a CI/CD configuration for C++. Use gcc as compiler, and cmake as build tool. + Create a CI/CD configuration for VueJS. Use npm, and add SAST security scanning. + Generate a security scanning pipeline configuration, optimized for Java. + + +You can also ask to explain specific job errors by copy-pasting the error message, prefixed with Please explain this CI/CD job error message, in the context of :: + + + Please explain this CI/CD job error message in the context of a Go project: build.sh: line 14: go command not found + + +Alternatively, you can use root cause analysis in CI/CD. + +For more practical examples, see the GitLab Duo examples. + +Explain code in the IDE + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + + + note This feature is available in VS Code and the Web IDE only. + + +/explain is a special command to explain the selected code in your editor. +You can also add additional instructions to be considered, for example: /explain the performance +See Use GitLab Duo Chat in VS Code for more information. + + + /explain focus on the algorithm + /explain the performance gains or losses using this code + +/explain the object inheritance (classes, object-oriented) + +/explain why a static variable is used here (C++) + +/explain how this function would cause a segmentation fault (C) + +/explain how concurrency works in this context (Go) + +/explain how the request reaches the client (REST API, database) + + +For more practical examples, see the GitLab Duo examples. + +Refactor code in the IDE + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + + + note This feature is available in VS Code and the Web IDE only. + + +/refactor is a special command to generate a refactoring suggestion for the selected code in your editor. +You can include additional instructions to be considered. For example: + + + Use a specific coding pattern, for example /refactor with ActiveRecord or /refactor into a class providing static functions. + Use a specific library, for example /refactor using mysql. + Use a specific function/algorithm, for example /refactor into a stringstream with multiple lines in C++. + Refactor to a different programming language, for example /refactor to TypeScript. + Focus on performance, for example /refactor improving performance. + Focus on potential vulnerabilities, for example /refactor avoiding memory leaks and exploits. + + +See Use GitLab Duo Chat in the VS Code for more information. + +For more practical examples, see the GitLab Duo examples. + +Write tests in the IDE + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + + + note This feature is available in VS Code and the Web IDE only. + + +/tests is a special command to generate a testing suggestion for the selected code in your editor. +You can also add additional instructions to be considered, for example: /tests using the Boost.Test framework +See Use GitLab Duo Chat in the VS Code for more information. + + + Use a specific test framework, for example /tests using the Boost.test framework (C++) or /tests using Jest (JavaScript). + Focus on extreme test cases, for example /tests focus on extreme cases, force regression testing. + Focus on performance, for example /tests focus on performance. + Focus on regressions and potential exploits, for example /tests focus on regressions and potential exploits. + + +For more practical examples, see the GitLab Duo examples. + +Ask follow up questions + + +You can ask follow-up questions to delve deeper into the topic or task at hand. +This helps you get more detailed and precise responses tailored to your specific needs, +whether it’s for further clarification, elaboration, or additional assistance. + +A follow-up to the question Write a Ruby function that prints 'Hello, World!' when called could be: + + + Can you also explain how I can call and execute this Ruby function in a typical Ruby environment, such as the command line? + + +A follow-up to the question How to start a C# project? could be: + + + Can you also please explain how to add a .gitignore and .gitlab-ci.yml file for C#? + + +For more practical examples, see the GitLab Duo examples. + +Enable GitLab Duo Chat + + +For SaaS users + + +To use this feature, at least one group you’re a member of must +have the experiment and beta features setting enabled. + +You can ask questions about resources that belong only to groups where this setting is enabled. + +Troubleshoot Chat access + + +If you have access to chat responses you did not expect, you might be part of +a group that has the Use Experiment and Beta features setting enabled. +Review the list of your groups and verify which ones you have access to. + +GitLab.com administrators can verify your access by running this snippet in the Rails console: + +u = User.find_by_username($USERNAME) +u.member_namespaces.namespace_settings_with_ai_features_enabled.with_ai_supported_plan(:ai_chat) + + +You can ask specific questions about group resources (like “summarize this issue”) when this feature is enabled. + +For self-managed users + + + + note Usage of GitLab Duo Chat is governed by the GitLab Testing Agreement. +Learn about data usage when using GitLab Duo Chat. + + +Prerequisites: + + + You have GitLab version 16.8 or later. + The Premium or Ultimate license is activated in your GitLab instance by using cloud licensing. + Your firewalls and HTTP proxy servers allow outbound connections +to cloud.gitlab.com. To use an HTTP proxy, both +gitLab _workhorse and gitLab_rails have the necessary +web proxy environment variables set. + All of the users in your instance have the latest version of their IDE extension. + You are an administrator. + + +To enable GitLab Duo Chat for your self-managed GitLab instance: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > General. + Expand AI-powered features and select Enable Experiment and Beta AI-powered features. + Select Save changes. + To make sure GitLab Duo Chat works immediately, you must +manually synchronize your subscription. + + +Manually synchronize your subscription + + +You must manually synchronize your subscription if either: + + + You have just purchased a subscription for the Premium or Ultimate tier and have upgraded to GitLab 16.8. + You already have a subscription for the Premium or Ultimate tier and have upgraded to GitLab 16.8. + + +Without the manual synchronization, it might take up to 24 hours to activate GitLab Duo Chat on your instance. + +Use GitLab Duo Chat in the GitLab UI + + + + In the lower-left corner, select the Help icon. +The new left sidebar must be enabled. + Select GitLab Duo Chat. A drawer opens on the right side of your screen. + Enter your question in the chat input box and press Enter or select Send. It may take a few seconds for the interactive AI chat to produce an answer. + You can ask a follow-up question. + If you want to ask a new question unrelated to the previous conversation, you may receive better answers if you clear the context by typing /reset into the input box and selecting Send. + + + + note Only the last 50 messages are retained in the chat history. The chat history expires 3 days after last use. + + +Delete all conversations + + +To delete all previous conversations: + + + In the text box, type /clean and select Send. + + +Use GitLab Duo Chat in the Web IDE + + + +Tier: Premium, Ultimate +Status: Experiment + + +History + + + + + Introduced in GitLab 16.6 as an Experiment + + + + + + + +To use GitLab Duo Chat in the Web IDE on GitLab: + + + Open the Web IDE: + + On the left sidebar, select Search or go to and find your project. + Select a file. Then in the upper right, select Edit > Open in Web IDE. + + + Then open Chat by using one of the following methods: + + On the left sidebar, select GitLab Duo Chat. + In the file that you have open in the editor, select some code. + + Right-click and select GitLab Duo Chat. + Select Explain selected code or Generate Tests. + + + Use the keyboard shortcut: ALT+d (on Windows and Linux) or Option+d (on Mac) + + + In the message box, enter your question and press Enter or select Send. + + +If you have selected code in the editor, this selection is sent along with your question to the AI. This way you can ask questions about this code selection. For instance, Could you simplify this?. + + + note GitLab Duo Chat is not available in the Web IDE on self-managed. + + +Use GitLab Duo Chat in VS Code + + + +Tier: Premium, Ultimate +Status: Experiment + + +History + + + + + Introduced in GitLab 16.6 as an Experiment. + + + + + + +To use GitLab Duo Chat in GitLab Workflow extension for VS Code: + + + Install and set up the Workflow extension for VS Code: + + In VS Code, download and Install the GitLab Workflow extension for VS Code. + Configure the GitLab Workflow extension. + + + In VS Code, open a file. The file does not need to be a file in a Git repository. + Open Chat by using one of the following methods: + + On the left sidebar, select GitLab Duo Chat. + In the file that you have open in the editor, select some code. + + Right-click and select GitLab Duo Chat. + Select Explain selected code or Generate Tests. + + + Use the keyboard shortcut: ALT+d (on Windows and Linux) or Option+d (on Mac) + + + In the message box, enter your question and press Enter or select Send. + + +If you have selected code in the editor, this selection is sent along with your question to the AI. This way you can ask questions about this code selection. For instance, Could you simplify this?. + +Perform standard task in the IDE from the context menu or by using slash commands + + +Get code explained, code refactored or get tests generated for code. To do so: + + + Select code in your editor in VS Code or in the Web IDE. + Type one the following slash commands into the chat field: /explain, /refactor or /tests. Alternatively, use the context menu to perform these tasks. + + +When you use one of the slash commands you can also add additional instructions to be considered, for example: /tests using the Boost.Test framework + +Disable Chat in VS Code + + +To disable GitLab Duo Chat in VS Code: + + + Go to Settings > Extensions > GitLab Workflow (GitLab VS Code Extension). + Clear the Enable GitLab Duo Chat assistant checkbox. + + +Give feedback + + +Your feedback is important to us as we continually enhance your GitLab Duo Chat experience. +Leaving feedback helps us customize the Chat for your needs and improve its performance for everyone. + +To give feedback about a specific response, use the feedback buttons in the response message. +Or, you can add a comment in the feedback issue. + + +2. GitLab Duo + + + +GitLab Duo + + + +History + + + + + +First GitLab Duo features introduced in GitLab 16.0. + +Removed third-party AI setting in GitLab 16.6. + +Removed support for OpenAI from all GitLab Duo features in GitLab 16.6. + + + + + + +GitLab is creating AI-assisted features across our DevSecOps platform. These features aim to help increase velocity and solve key pain points across the software development lifecycle. + +Some features are still in development. View details about support for each status (Experiment, Beta, Generally Available). + +As features become Generally Available, GitLab is transparent and updates the documentation to clearly state how and where you can access these capabilities. + + + + + Goal + Feature + Tier/Offering/Status + + + + + Helps you write code more efficiently by showing code suggestions as you type. Watch overview + + Code Suggestions + +Tier: Premium or Ultimate with GitLab Duo Pro Offering: GitLab.com, Self-managed, GitLab Dedicated + + + Processes and generates text and code in a conversational manner. Helps you quickly identify useful information in large volumes of text in issues, epics, code, and GitLab documentation. + Chat + +Tier: Premium, Ultimate Offering: GitLab.com, Self-managed, GitLab Dedicated Status: Beta (Subject to the Testing Agreement) + + + Helps you discover or recall Git commands when and where you need them. + Git suggestions + +Tier: Ultimate Offering: GitLab.com Status: Experiment + + + Assists with quickly getting everyone up to speed on lengthy conversations to help ensure you are all on the same page. + Discussion summary + +Tier: Ultimate Offering: GitLab.com Status: Experiment + + + Generates issue descriptions. + Issue description generation + +Tier: UltimateOffering: GitLab.com Status: Experiment + + + Automates repetitive tasks and helps catch bugs early. + Test generation + +Tier: Ultimate Offering: GitLab.com, Self-managed, GitLab Dedicated Status: Beta + + + Generates a description for the merge request based on the contents of the template. + Merge request template population + +Tier: UltimateOffering: GitLab.com Status: Experiment + + + Assists in creating faster and higher-quality reviews by automatically suggesting reviewers for your merge request. Watch overview + + Suggested Reviewers + +Tier: Ultimate Offering: GitLab.comStatus: Generally Available + + + Efficiently communicates the impact of your merge request changes. + Merge request summary + +Tier: Ultimate Offering: GitLab.com Status: Experiment + + + Helps ease merge request handoff between authors and reviewers and help reviewers efficiently understand suggestions. + Code review summary + +Tier: Ultimate Offering: GitLab.com Status: Experiment + + + Helps you remediate vulnerabilities more efficiently, boost your skills, and write more secure code. Watch overview + + Vulnerability explanation + +Tier: Ultimate Offering: GitLab.com Status: Beta + + + Generates a merge request containing the changes required to mitigate a vulnerability. + Vulnerability resolution + +Tier: Ultimate Offering: GitLab.com Status: Experiment + + + Helps you understand code by explaining it in English language. Watch overview + + Code explanation + +Tier: Ultimate Offering: GitLab.com Status: Experiment + + + Assists you in determining the root cause for a pipeline failure and failed CI/CD build. + Root cause analysis + +Tier: Ultimate Offering: GitLab.com Status: Experiment + + + Assists you with predicting productivity metrics and identifying anomalies across your software development lifecycle. + Value stream forecasting + +Tier: Ultimate Offering: GitLab.com, Self-managed, GitLab Dedicated Status: Experiment + + + + +Enable AI/ML features + + +For features listed as Experiment and Beta: + + + These features are disabled by default. + To enable, a user with the Owner role for the group must turn on this setting. +On GitLab.com, this setting is available for Ultimate subscriptions only. + These features are subject to the +Testing Terms of Use. + + +For all self-managed features: + + + Your firewalls and HTTP proxy servers must allow outbound connections +to https://cloud.gitlab.com:443. To use an HTTP/S proxy, both gitLab_workhorse and gitLab_rails must have the necessary +web proxy environment variables set. + + +For other features: + + + +Code Suggestions is enabled when you purchase the +GitLab Duo Pro add-on and assign seats to users. + +Chat + + View how to enable for self-managed. + View how to enable for GitLab.com. + + + + +Disable GitLab Duo features + + + +History + + + + + +Settings to disable AI features were introduced in GitLab 16.10. + + + + + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +You can disable GitLab Duo AI features for a group, project, or instance. +When it’s disabled, any attempt to use GitLab Duo features on the group, project, or instance is blocked and an error is displayed. +GitLab Duo features are also blocked for resources in the group or project, like epics, +issues, and vulnerabilities. + +For the group or project + + +Prerequisites: + + + You must have the Owner role for the group or project. + + +To disable GitLab Duo: + + + Use the GitLab GraphQL API +groupUpdate or projectSettingsUpdate mutation. + Disable GitLab Duo for the project or group by setting the duo_features_enabled setting to false. +(The default is true.) + Optional. To make all groups or projects in the hierarchy inherit the value for a top-level group, +set lock_duo_features_enabled to true. (The default is false.) +The child groups and projects cannot override this value. + + +For an instance + + +Prerequisites: + + + You must be an administrator. + + +To disable GitLab Duo: + + + Use the application settings API. + Disable GitLab Duo for the instance by setting the duo_features_enabled setting to false. +(The default is true.) + Optional. To ensure this setting cannot be overridden at the group or project level, +set lock_duo_features_enabled to true. (The default is false.) +The child groups and projects cannot override this value. + + +Future plans + + + + An issue exists for making this setting +available in the UI. + An issue exists for making the setting +cascade to all groups and projects. Right now the projects and groups do not +display the setting of the top-level group. To ensure the setting cascades, +ensure lock_duo_features_enabled is set to true. + + +Experimental AI features and how to use them + + +The following subsections describe the experimental AI features in more detail. + +Explain code in the Web UI with Code explanation + + + +Tier: Ultimate +Offering: GitLab.com +Status: Experiment + + +History + + + + + Introduced in GitLab 15.11 as an Experiment on GitLab.com. + + + + + + +To use this feature: + + + The parent group of the project must: + + Enable the experiment and beta features setting. + + + You must be a member of the project with sufficient permissions to view the repository. + + +GitLab can help you get up to speed faster if you: + + + Spend a lot of time trying to understand pieces of code that others have created, or + Struggle to understand code written in a language that you are not familiar with. + + +By using a large language model, GitLab can explain the code in natural language. + +To explain your code: + + + On the left sidebar, select Search or go to and find your project. + Select any file in your project that contains code. + On the file, select the lines that you want to have explained. + On the left side, select the question mark ( ). You might have to scroll to the first line of your selection to view it. This sends the selected code, together with a prompt, to provide an explanation to the large language model. + A drawer is displayed on the right side of the page. Wait a moment for the explanation to be generated. + Provide feedback about how satisfied you are with the explanation, so we can improve the results. + + +You can also have code explained in the context of a merge request. To explain +code in a merge request: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests, then select your merge request. + On the secondary menu, select Changes. + + On the file you would like explained, select the three dots ( ) and select View File @ $SHA. + + A separate browser tab opens and shows the full file with the latest changes. + + On the new tab, select the lines that you want to have explained. + On the left side, select the question mark ( ). You might have to scroll to the first line of your selection to view it. This sends the selected code, together with a prompt, to provide an explanation to the large language model. + A drawer is displayed on the right side of the page. Wait a moment for the explanation to be generated. + Provide feedback about how satisfied you are with the explanation, so we can improve the results. + + + + +We cannot guarantee that the large language model produces results that are correct. Use the explanation with caution. + +Summarize issue discussions with Discussion summary + + + +Tier: Ultimate +Offering: GitLab.com +Status: Experiment + + +History + + + + + +Introduced in GitLab 16.0 as an Experiment. + + + + + + +To use this feature: + + + The parent group of the issue must: + + Enable the experiment and beta features setting. + + + You must be a member of the project with sufficient permissions to view the issue. + + +You can generate a summary of discussions on an issue: + + + In an issue, scroll to the Activity section. + Select View summary. + + +The comments in the issue are summarized in as many as 10 list items. +The summary is displayed only for you. + +Provide feedback on this experimental feature in issue 407779. + +Data usage: When you use this feature, the text of all comments on the issue are sent to the large +language model referenced above. + +Forecast deployment frequency with Value stream forecasting + + + +Tier: Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated +Status: Experiment + + +History + + + + + +Introduced in GitLab 16.2 as an Experiment. + + + + + + +To use this feature: + + + The parent group of the project must: + + Enable the experiment and beta features setting. + + + You must be a member of the project with sufficient permissions to view the CI/CD analytics. + + +In CI/CD Analytics, you can view a forecast of deployment frequency: + + + On the left sidebar, select Search or go to and find your project. + Select Analyze > CI/CD analytics. + Select the Deployment frequency tab. + Turn on the Show forecast toggle. + On the confirmation dialog, select Accept testing terms. + + +The forecast is displayed as a dotted line on the chart. Data is forecasted for a duration that is half of the selected date range. +For example, if you select a 30-day range, a forecast for the following 15 days is displayed. + + + +Provide feedback on this experimental feature in issue 416833. + +Root cause analysis + + + +Tier: Ultimate +Offering: GitLab.com +Status: Experiment + + +History + + + + + +Introduced in GitLab 16.2 as an Experiment. + + + + + + +To use this feature: + + + The parent group of the project must: + + Enable the experiment and beta features setting. + + + You must be a member of the project with sufficient permissions to view the CI/CD job. + + +When the feature is available, the “Root cause analysis” button will appears on +a failed CI/CD job. Selecting this button generates an analysis regarding the +reason for the failure. + +Summarize an issue with Issue description generation + + + +Tier: Ultimate +Offering: GitLab.com +Status: Experiment + + +History + + + + + +Introduced in GitLab 16.3 as an Experiment. + + + + + + +To use this feature: + + + The parent group of the project must: + + Enable the experiment and beta features setting. + + + You must be a member of the project with sufficient permissions to view the issue. + + +You can generate the description for an issue from a short summary. + + + Create a new issue. + Above the Description field, select AI actions > Generate issue description. + Write a short description and select Submit. + + +The issue description is replaced with AI-generated text. + +Provide feedback on this experimental feature in issue 409844. + +Data usage: When you use this feature, the text you enter is sent to the large +language model referenced above. + +Language models + + + + + + Feature + Large Language Model + + + + + Git suggestions + Vertex AI Codey codechat-bison + + + + Discussion summary + Anthropic Claude-2 + + + + Issue description generation + Anthropic Claude-2 + + + + Code Suggestions + For Code Completion: Vertex AI Codey code-gecko For Code Generation: Anthropic Claude-2 + + + + Test generation + Anthropic Claude-2 + + + + Merge request template population + Vertex AI Codey text-bison + + + + Suggested Reviewers + GitLab creates a machine learning model for each project, which is used to generate reviewers View the issue + + + + Merge request summary + Vertex AI Codey text-bison + + + + Code review summary + Vertex AI Codey text-bison + + + + Vulnerability explanation + Vertex AI Codey text-bison Anthropic Claude-2 if degraded performance + + + Vulnerability resolution + Vertex AI Codey code-bison + + + + Code explanation + Vertex AI Codey codechat-bison + + + + GitLab Duo Chat + Anthropic Claude-2 Vertex AI Codey textembedding-gecko + + + + Root cause analysis + Vertex AI Codey text-bison + + + + Value stream forecasting + Statistical forecasting + + + + +Data usage + + +GitLab AI features leverage generative AI to help increase velocity and aim to help make you more productive. Each feature operates independently of other features and is not required for other features to function. GitLab selects the best-in-class large-language models for specific tasks. We use Google Vertex AI Models and Anthropic Claude. + +Progressive enhancement + + +These features are designed as a progressive enhancement to existing GitLab features across our DevSecOps platform. They are designed to fail gracefully and should not prevent the core functionality of the underlying feature. You should note each feature is subject to its expected functionality as defined by the relevant feature support policy. + +Stability and performance + + +These features are in a variety of feature support levels. Due to the nature of these features, there may be high demand for usage which may cause degraded performance or unexpected downtime of the feature. We have built these features to gracefully degrade and have controls in place to allow us to mitigate abuse or misuse. GitLab may disable beta and experimental features for any or all customers at any time at our discretion. + +Data privacy + + +GitLab Duo AI features are powered by a generative AI models. The processing of any personal data is in accordance with our Privacy Statement. You may also visit the Sub-Processors page to see the list of our Sub-Processors that we use to provide these features. + +Data retention + + +The below reflects the current retention periods of GitLab AI model Sub-Processors: + + + Anthropic discards model input and output data immediately after the output is provided. Anthropic currently does not store data for abuse monitoring. Model input and output is not used to train models. + Google discards model input and output data immediately after the output is provided. Google currently does not store data for abuse monitoring. Model input and output is not used to train models. + + +All of these AI providers are under data protection agreements with GitLab that prohibit the use of Customer Content for their own purposes, except to perform their independent legal obligations. + +GitLab retains input and output for up to 30 days for the purpose of troubleshooting, debugging, and addressing latency issues. + +Telemetry + + +GitLab Duo collects aggregated or de-identified first-party usage data through our Snowplow collector. This usage data includes the following metrics: + + + Number of unique users + Number of unique instances + Prompt lengths + Model used + Status code responses + API responses times + + +Training data + + +GitLab does not train generative AI models based on private (non-public) data. The vendors we work with also do not train models based on private data. + +For more information on our AI sub-processors, see: + + + Google Vertex AI Models APIs data governance and responsible AI. + Anthropic Claude’s constitution. + + +Model accuracy and quality + + +Generative AI may produce unexpected results that may be: + + + Low-quality + Incoherent + Incomplete + Produce failed pipelines + Insecure code + Offensive or insensitive + Out of date information + + +GitLab is actively iterating on all our AI-assisted capabilities to improve the quality of the generated content. We improve the quality through prompt engineering, evaluating new AI/ML models to power these features, and through novel heuristics built into these features directly. + + +" +what can GitLab Duo Chat do with it?,,"1. GitLab Duo Chat + + + +GitLab Duo Chat + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated +Status: Beta + + +History + + + + + +Introduced as an Experiment for SaaS in GitLab 16.0. + Changed to Beta for SaaS in GitLab 16.6. + +Introduced as a Beta for self-managed in GitLab 16.8. + Changed from Ultimate to Premium tier in GitLab 16.9. + + + + + + +GitLab Duo Chat is your personal AI-powered assistant for boosting productivity. +It can assist various tasks of your daily work with the AI-generated content. +Here are the examples of use cases: + + + + + Feature + Use case example + Supported interfaces + Supported deployments + + + + + Ask about GitLab + I want to know how to create an issue in GitLab. + GitLab, VS Code, and Web IDE 1 + + GitLab.com + + + Ask about a specific issue + I want to summarize this issue. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Ask about a specific epic + I want to summarize this epic. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Ask about code + I want to understand how this code works. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Ask about CI/CD + I want to create a new CI/CD pipeline configuration. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Explain code in the IDE + I want to understand how this code works. + VS Code and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Refactor code in the IDE + I want to refactor this code. + VS Code and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Write tests in the IDE + I want to write a test for this code. + VS Code and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + + + +Footnotes: + + GitLab Duo Chat is not available in Web IDE on self-managed + + + + + + note This is a Beta feature. We’re continuously extending the capabilities and reliability of the responses. + + +Watch a demo + + + + View how to setup and use GitLab Duo Chat. + + + + + +What GitLab Duo Chat can help with + + +GitLab Duo Chat can help in a variety of areas. + +Ask about GitLab + + + +History + + + + + +Introduced for SaaS in GitLab 16.0. + + + + + + +You can ask questions about how GitLab works. Things like: + + + Explain the concept of a 'fork' in a concise manner. + Provide step-by-step instructions on how to reset a user's password. + + + + note This feature is not currently supported in self-managed instances. +See this epic for more information. + + +Ask about a specific issue + + + +History + + + + + +Introduced for SaaS in GitLab 16.0. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can ask about a specific GitLab issue. For example: + + + Generate a summary for the issue identified via this link: + When you are viewing an issue in GitLab, you can ask Generate a concise summary of the current issue. + + How can I improve the description of so that readers understand the value and problems to be solved? + + +Ask about a specific epic + + + +History + + + + + +Introduced for SaaS in GitLab 16.3. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can ask about a specific GitLab epic. For example: + + + Generate a summary for the epic identified via this link: + When you are viewing an epic in GitLab, you can ask Generate a concise summary of the opened epic. + + What are the unique use cases raised by commenters in ? + + +Ask about code + + + +History + + + + + +Introduced for SaaS in GitLab 16.1. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can also ask GitLab Duo Chat to generate code: + + + Write a Ruby function that prints 'Hello, World!' when called. + Develop a JavaScript program that simulates a two-player Tic-Tac-Toe game. Provide both game logic and user interface, if applicable. + Create a regular expression for parsing IPv4 and IPv6 addresses in Python. + Generate code for parsing a syslog log file in Java. Use regular expressions when possible, and store the results in a hash map. + Create a product-consumer example with threads and shared memory in C++. Use atomic locks when possible. + Generate Rust code for high performance gRPC calls. Provide a source code example for a server and client. + + +And you can ask GitLab Duo Chat to explain code: + + + Provide a clear explanation of the given Ruby code: def sum(a, b) a + b end. Describe what this code does and how it works. + + +Alternatively, you can use the /explain command to explain the selected code in your editor. + +For more practical examples, see the GitLab Duo examples. + +Ask about errors + + +Programming languages that require compiling the source code may throw cryptic error messages. Similarly, a script or a web application could throw a stack trace. You can ask GitLab Duo Chat by prefixing the copied error message with, for example, Please explain this error message:. Add the specific context, like the programming language. + + + Explain this error message in Java: Int and system cannot be resolved to a type + Explain when this C function would cause a segmentation fault: sqlite3_prepare_v2() + Explain what would cause this error in Python: ValueError: invalid literal for int() + Why is ""this"" undefined in VueJS? Provide common error cases, and explain how to avoid them. + How to debug a Ruby on Rails stacktrace? Share common strategies and an example exception. + + +For more practical examples, see the GitLab Duo examples. + +Ask about CI/CD + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can ask GitLab Duo Chat to create a CI/CD configuration: + + + Create a .gitlab-ci.yml configuration file for testing and building a Ruby on Rails application in a GitLab CI/CD pipeline. + Create a CI/CD configuration for building and linting a Python application. + Create a CI/CD configuration to build and test Rust code. + Create a CI/CD configuration for C++. Use gcc as compiler, and cmake as build tool. + Create a CI/CD configuration for VueJS. Use npm, and add SAST security scanning. + Generate a security scanning pipeline configuration, optimized for Java. + + +You can also ask to explain specific job errors by copy-pasting the error message, prefixed with Please explain this CI/CD job error message, in the context of :: + + + Please explain this CI/CD job error message in the context of a Go project: build.sh: line 14: go command not found + + +Alternatively, you can use root cause analysis in CI/CD. + +For more practical examples, see the GitLab Duo examples. + +Explain code in the IDE + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + + + note This feature is available in VS Code and the Web IDE only. + + +/explain is a special command to explain the selected code in your editor. +You can also add additional instructions to be considered, for example: /explain the performance +See Use GitLab Duo Chat in VS Code for more information. + + + /explain focus on the algorithm + /explain the performance gains or losses using this code + +/explain the object inheritance (classes, object-oriented) + +/explain why a static variable is used here (C++) + +/explain how this function would cause a segmentation fault (C) + +/explain how concurrency works in this context (Go) + +/explain how the request reaches the client (REST API, database) + + +For more practical examples, see the GitLab Duo examples. + +Refactor code in the IDE + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + + + note This feature is available in VS Code and the Web IDE only. + + +/refactor is a special command to generate a refactoring suggestion for the selected code in your editor. +You can include additional instructions to be considered. For example: + + + Use a specific coding pattern, for example /refactor with ActiveRecord or /refactor into a class providing static functions. + Use a specific library, for example /refactor using mysql. + Use a specific function/algorithm, for example /refactor into a stringstream with multiple lines in C++. + Refactor to a different programming language, for example /refactor to TypeScript. + Focus on performance, for example /refactor improving performance. + Focus on potential vulnerabilities, for example /refactor avoiding memory leaks and exploits. + + +See Use GitLab Duo Chat in the VS Code for more information. + +For more practical examples, see the GitLab Duo examples. + +Write tests in the IDE + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + + + note This feature is available in VS Code and the Web IDE only. + + +/tests is a special command to generate a testing suggestion for the selected code in your editor. +You can also add additional instructions to be considered, for example: /tests using the Boost.Test framework +See Use GitLab Duo Chat in the VS Code for more information. + + + Use a specific test framework, for example /tests using the Boost.test framework (C++) or /tests using Jest (JavaScript). + Focus on extreme test cases, for example /tests focus on extreme cases, force regression testing. + Focus on performance, for example /tests focus on performance. + Focus on regressions and potential exploits, for example /tests focus on regressions and potential exploits. + + +For more practical examples, see the GitLab Duo examples. + +Ask follow up questions + + +You can ask follow-up questions to delve deeper into the topic or task at hand. +This helps you get more detailed and precise responses tailored to your specific needs, +whether it’s for further clarification, elaboration, or additional assistance. + +A follow-up to the question Write a Ruby function that prints 'Hello, World!' when called could be: + + + Can you also explain how I can call and execute this Ruby function in a typical Ruby environment, such as the command line? + + +A follow-up to the question How to start a C# project? could be: + + + Can you also please explain how to add a .gitignore and .gitlab-ci.yml file for C#? + + +For more practical examples, see the GitLab Duo examples. + +Enable GitLab Duo Chat + + +For SaaS users + + +To use this feature, at least one group you’re a member of must +have the experiment and beta features setting enabled. + +You can ask questions about resources that belong only to groups where this setting is enabled. + +Troubleshoot Chat access + + +If you have access to chat responses you did not expect, you might be part of +a group that has the Use Experiment and Beta features setting enabled. +Review the list of your groups and verify which ones you have access to. + +GitLab.com administrators can verify your access by running this snippet in the Rails console: + +u = User.find_by_username($USERNAME) +u.member_namespaces.namespace_settings_with_ai_features_enabled.with_ai_supported_plan(:ai_chat) + + +You can ask specific questions about group resources (like “summarize this issue”) when this feature is enabled. + +For self-managed users + + + + note Usage of GitLab Duo Chat is governed by the GitLab Testing Agreement. +Learn about data usage when using GitLab Duo Chat. + + +Prerequisites: + + + You have GitLab version 16.8 or later. + The Premium or Ultimate license is activated in your GitLab instance by using cloud licensing. + Your firewalls and HTTP proxy servers allow outbound connections +to cloud.gitlab.com. To use an HTTP proxy, both +gitLab _workhorse and gitLab_rails have the necessary +web proxy environment variables set. + All of the users in your instance have the latest version of their IDE extension. + You are an administrator. + + +To enable GitLab Duo Chat for your self-managed GitLab instance: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > General. + Expand AI-powered features and select Enable Experiment and Beta AI-powered features. + Select Save changes. + To make sure GitLab Duo Chat works immediately, you must +manually synchronize your subscription. + + +Manually synchronize your subscription + + +You must manually synchronize your subscription if either: + + + You have just purchased a subscription for the Premium or Ultimate tier and have upgraded to GitLab 16.8. + You already have a subscription for the Premium or Ultimate tier and have upgraded to GitLab 16.8. + + +Without the manual synchronization, it might take up to 24 hours to activate GitLab Duo Chat on your instance. + +Use GitLab Duo Chat in the GitLab UI + + + + In the lower-left corner, select the Help icon. +The new left sidebar must be enabled. + Select GitLab Duo Chat. A drawer opens on the right side of your screen. + Enter your question in the chat input box and press Enter or select Send. It may take a few seconds for the interactive AI chat to produce an answer. + You can ask a follow-up question. + If you want to ask a new question unrelated to the previous conversation, you may receive better answers if you clear the context by typing /reset into the input box and selecting Send. + + + + note Only the last 50 messages are retained in the chat history. The chat history expires 3 days after last use. + + +Delete all conversations + + +To delete all previous conversations: + + + In the text box, type /clean and select Send. + + +Use GitLab Duo Chat in the Web IDE + + + +Tier: Premium, Ultimate +Status: Experiment + + +History + + + + + Introduced in GitLab 16.6 as an Experiment + + + + + + + +To use GitLab Duo Chat in the Web IDE on GitLab: + + + Open the Web IDE: + + On the left sidebar, select Search or go to and find your project. + Select a file. Then in the upper right, select Edit > Open in Web IDE. + + + Then open Chat by using one of the following methods: + + On the left sidebar, select GitLab Duo Chat. + In the file that you have open in the editor, select some code. + + Right-click and select GitLab Duo Chat. + Select Explain selected code or Generate Tests. + + + Use the keyboard shortcut: ALT+d (on Windows and Linux) or Option+d (on Mac) + + + In the message box, enter your question and press Enter or select Send. + + +If you have selected code in the editor, this selection is sent along with your question to the AI. This way you can ask questions about this code selection. For instance, Could you simplify this?. + + + note GitLab Duo Chat is not available in the Web IDE on self-managed. + + +Use GitLab Duo Chat in VS Code + + + +Tier: Premium, Ultimate +Status: Experiment + + +History + + + + + Introduced in GitLab 16.6 as an Experiment. + + + + + + +To use GitLab Duo Chat in GitLab Workflow extension for VS Code: + + + Install and set up the Workflow extension for VS Code: + + In VS Code, download and Install the GitLab Workflow extension for VS Code. + Configure the GitLab Workflow extension. + + + In VS Code, open a file. The file does not need to be a file in a Git repository. + Open Chat by using one of the following methods: + + On the left sidebar, select GitLab Duo Chat. + In the file that you have open in the editor, select some code. + + Right-click and select GitLab Duo Chat. + Select Explain selected code or Generate Tests. + + + Use the keyboard shortcut: ALT+d (on Windows and Linux) or Option+d (on Mac) + + + In the message box, enter your question and press Enter or select Send. + + +If you have selected code in the editor, this selection is sent along with your question to the AI. This way you can ask questions about this code selection. For instance, Could you simplify this?. + +Perform standard task in the IDE from the context menu or by using slash commands + + +Get code explained, code refactored or get tests generated for code. To do so: + + + Select code in your editor in VS Code or in the Web IDE. + Type one the following slash commands into the chat field: /explain, /refactor or /tests. Alternatively, use the context menu to perform these tasks. + + +When you use one of the slash commands you can also add additional instructions to be considered, for example: /tests using the Boost.Test framework + +Disable Chat in VS Code + + +To disable GitLab Duo Chat in VS Code: + + + Go to Settings > Extensions > GitLab Workflow (GitLab VS Code Extension). + Clear the Enable GitLab Duo Chat assistant checkbox. + + +Give feedback + + +Your feedback is important to us as we continually enhance your GitLab Duo Chat experience. +Leaving feedback helps us customize the Chat for your needs and improve its performance for everyone. + +To give feedback about a specific response, use the feedback buttons in the response message. +Or, you can add a comment in the feedback issue. + + +2. GitLab Duo Chat + + + +GitLab Duo Chat Contribute + + +Chat is a part of the GitLab Duo offering. + +How Chat describes itself: “I am GitLab Duo Chat, an AI assistant focused on helping developers with DevSecOps, +software development, source code, project management, CI/CD, and GitLab. Please feel free to engage me in these areas.” + +Chat can answer different questions and perform certain tasks. It’s done with the help of prompts and tools. + +To answer a user’s question asked in the Chat interface, GitLab sends a GraphQL request to the Rails backend. +Rails backend sends then instructions to the Large Language Model (LLM) via the AI Gateway. + +Set up GitLab Duo Chat + + +There is a difference in the setup for Saas and self-managed instances. +We recommend to start with a process described for SaaS-only AI features. + + + +Setup SaaS-only AI features. + +Setup self-managed AI features. + + +Working with GitLab Duo Chat + + +Prompts are the most vital part of GitLab Duo Chat system. Prompts are the instructions sent to the LLM to perform certain tasks. + +The state of the prompts is the result of weeks of iteration. If you want to change any prompt in the current tool, you must put it behind a feature flag. + +If you have any new or updated prompts, ask members of AI Framework team to review, because they have significant experience with them. + +Troubleshooting + + +When working with Chat locally, you might run into an error. Most commons problems are documented in this section. +If you find an undocumented issue, you should document it in this section after you find a solution. + + + + + Problem + Solution + + + + + There is no Chat button in the GitLab UI. + Make sure your user is a part of a group with enabled Experimental and Beta features. + + + Chat replies with “Forbidden by auth provider” error. + Backend can’t access LLMs. Make sure your AI Gateway is setup correctly. + + + Requests takes too long to appear in UI + Consider restarting Sidekiq by running gdk restart rails-background-jobs. If that doesn’t work, try gdk kill and then gdk start. Alternatively, you can bypass Sidekiq entirely. To do that temporary alter Llm::CompletionWorker.perform_async statements with Llm::CompletionWorker.perform_inline + + + + + +Contributing to GitLab Duo Chat + + +From the code perspective, Chat is implemented in the similar fashion as other AI features. Read more about GitLab AI Abstraction layer. + +The Chat feature uses a zero-shot agent that includes a system prompt explaining how the large language model should interpret the question and provide an +answer. The system prompt defines available tools that can be used to gather +information to answer the user’s question. + +The zero-shot agent receives the user’s question and decides which tools to use to gather information to answer it. +It then makes a request to the large language model, which decides if it can answer directly or if it needs to use one +of the defined tools. + +The tools each have their own prompt that provides instructions to the large language model on how to use that tool to +gather information. The tools are designed to be self-sufficient and avoid multiple requests back and forth to +the large language model. + +After the tools have gathered the required information, it is returned to the zero-shot agent, which asks the large language +model if enough information has been gathered to provide the final answer to the user’s question. + +Adding a new tool + + +To add a new tool: + + + + Create files for the tool in the ee/lib/gitlab/llm/chain/tools/ folder. Use existing tools like issue_identifier or +resource_reader as a template. + + + Write a class for the tool that includes: + + + Name and description of what the tool does + Example questions that would use this tool + Instructions for the large language model on how to use the tool to gather information - so the main prompts that +this tool is using. + + + Test and iterate on the prompt using RSpec tests that make real requests to the large language model. + + Prompts require trial and error, the non-deterministic nature of working with LLM can be surprising. + Anthropic provides good guide on working on prompts. + GitLab guide on working with prompts. + + + + Implement code in the tool to parse the response from the large language model and return it to the zero-shot agent. + + + Add the new tool name to the tools array in ee/lib/gitlab/llm/completions/chat.rb so the zero-shot agent knows about it. + + Add tests by adding questions to the test-suite for which the new tool should respond to. Iterate on the prompts as needed. + + +The key things to keep in mind are properly instructing the large language model through prompts and tool descriptions, +keeping tools self-sufficient, and returning responses to the zero-shot agent. With some trial and error on prompts, +adding new tools can expand the capabilities of the Chat feature. + +There are available short videos covering this topic. + +Debugging + + +To gather more insights about the full request, use the Gitlab::Llm::Logger file to debug logs. +The default logging level on production is INFO and must not be used to log any data that could contain personal identifying information. + +To follow the debugging messages related to the AI requests on the abstraction layer, you can use: + +export LLM_DEBUG=1 +gdk start +tail -f log/llm.log + + +Tracing with LangSmith + + +Tracing is a powerful tool for understanding the behavior of your LLM application. +LangSmith has best-in-class tracing capabilities, and it’s integrated with GitLab Duo Chat. Tracing can help you track down issues like: + + + I’m new to GitLab Duo Chat and would like to understand what’s going on under the hood. + Where exactly the process failed when you got an unexpected answer. + Which process was a bottle neck of the latency. + What tool was used for an ambiguous question. + + + + +Tracing is especially useful for evaluation that runs GitLab Duo Chat against large dataset. +LangSmith integration works with any tools, including Prompt Library +and RSpec tests. + +Use tracing with LangSmith + + + + note Tracing is available in Development and Testing environment only. +It’s not available in Production environment. + + + + Access to LangSmith site and create an account. + Create an API key. + + Set the following environment variables in GDK. You can define it in env.runit or directly export in the terminal. + + + export LANGCHAIN_TRACING_V2=true + export LANGCHAIN_API_KEY='' + export LANGCHAIN_PROJECT='' + export LANGCHAIN_ENDPOINT='api.smith.langchain.com' + export GITLAB_RAILS_RACK_TIMEOUT=180 # Extending puma timeout for using LangSmith with Prompt Library as the evaluation tool. + + + Restart GDK. + + +Testing GitLab Duo Chat + + +Because the success of answers to user questions in GitLab Duo Chat heavily depends +on toolchain and prompts of each tool, it’s common that even a minor change in a +prompt or a tool impacts processing of some questions. + +To make sure that a change in the toolchain doesn’t break existing +functionality, you can use the following RSpec tests to validate answers to some +predefined questions when using real LLMs: + + + +ee/spec/lib/gitlab/llm/completions/chat_real_requests_spec.rb +This test validates that the zero-shot agent is selecting the correct tools +for a set of Chat questions. It checks on the tool selection but does not +evaluate the quality of the Chat response. + +ee/spec/lib/gitlab/llm/chain/agents/zero_shot/qa_evaluation_spec.rb +This test evaluates the quality of a Chat response by passing the question +asked along with the Chat-provided answer and context to at least two other +LLMs for evaluation. This evaluation is limited to questions about issues and +epics only. Learn more about the GitLab Duo Chat QA Evaluation Test. + + +If you are working on any changes to the GitLab Duo Chat logic, be sure to run +the GitLab Duo Chat CI jobs the merge request that contains +your changes. Some of the CI jobs must be manually triggered. + +Testing locally + + +To run the QA Evaluation test locally, the following environment variables +must be exported: + +export VERTEX_AI_EMBEDDINGS='true' # if using Vertex embeddings +export ANTHROPIC_API_KEY='' # can use dev value of Gitlab::CurrentSettings +export VERTEX_AI_CREDENTIALS='' # can set as dev value of Gitlab::CurrentSettings.vertex_ai_credentials +export VERTEX_AI_PROJECT='' # can use dev value of Gitlab::CurrentSettings.vertex_ai_project + +REAL_AI_REQUEST=1 bundle exec rspec ee/spec/lib/gitlab/llm/completions/chat_real_requests_spec.rb + + +When you update the test questions that require documentation embeddings, +make sure you generate a new fixture and +commit it together with the change. + +Testing with CI + + +The following CI jobs for GitLab project run the tests tagged with real_ai_request: + + + + rspec-ee unit gitlab-duo-chat-zeroshot: +the job runs ee/spec/lib/gitlab/llm/completions/chat_real_requests_spec.rb. +The job must be manually triggered and is allowed to fail. + + + rspec-ee unit gitlab-duo-chat-qa: +The job runs the QA evaluation tests in +ee/spec/lib/gitlab/llm/chain/agents/zero_shot/qa_evaluation_spec.rb. +The job must be manually triggered and is allowed to fail. +Read about GitLab Duo Chat QA Evaluation Test. + + + rspec-ee unit gitlab-duo-chat-qa-fast: +The job runs a single QA evaluation test from ee/spec/lib/gitlab/llm/chain/agents/zero_shot/qa_evaluation_spec.rb. +The job is always run and not allowed to fail. Although there’s a chance that the QA test still might fail, +it is cheap and fast to run and intended to prevent a regression in the QA test helpers. + + + rspec-ee unit gitlab-duo pg14: +This job runs tests to ensure that the GitLab Duo features are functional without running into system errors. +The job is always run and not allowed to fail. +This job does NOT conduct evaluations. The quality of the feature is tested in the other jobs such as QA jobs. + + + +Management of credentials and API keys for CI jobs + + +All API keys required to run the rspecs should be masked + +The exception is GCP credentials as they contain characters that prevent them from being masked. +Because the CI jobs need to run on MR branches, GCP credentials cannot be added as a protected variable +and must be added as a regular CI variable. +For security, the GCP credentials and the associated project added to +GitLab project’s CI must not be able to access any production infrastructure and sandboxed. + +GitLab Duo Chat QA Evaluation Test + + +Evaluation of a natural language generation (NLG) system such as +GitLab Duo Chat is a rapidly evolving area with many unanswered questions and ambiguities. + +A practical working assumption is LLMs can generate a reasonable answer when given a clear question and a context. +With the assumption, we are exploring using LLMs as evaluators +to determine the correctness of a sample of questions +to track the overall accuracy of GitLab Duo Chat’s responses and detect regressions in the feature. + +For the discussions related to the topic, +see the merge request +and the issue. + +The current QA evaluation test consists of the following components. + +Epic and issue fixtures + + +The fixtures are the replicas of the public issues and epics from projects and groups owned by GitLab. +The internal notes were excluded when they were sampled. The fixtures have been commited into the canonical gitlab repository. +See the snippet used to create the fixtures. + +RSpec and helpers + + + + + The RSpec file +and the included helpers invoke the Chat service, an internal interface with the question. + + + After collecting the Chat service’s answer, +the answer is injected into a prompt, also known as an “evaluation prompt”, that instructs +a LLM to grade the correctness of the answer based on the question and a context. +The context is simply a JSON serialization of the issue or epic being asked about in each question. + + + The evaluation prompt is sent to two LLMs, Claude and Vertex. + + + The evaluation responses of the LLMs are saved as JSON files. + + + For each question, RSpec will regex-match for CORRECT or INCORRECT. + + + +Collection and tracking of QA evaluation with CI/CD automation + + +The gitlab project’s CI configurations have been setup to run the RSpec, +collect the evaluation response as artifacts and execute +a reporter script +that automates collection and tracking of evaluations. + +When rspec-ee unit gitlab-duo-chat-qa job runs in a pipeline for a merge request, +the reporter script uses the evaluations saved as CI artifacts +to generate a Markdown report and posts it as a note in the merge request. + +To keep track of and compare QA test results over time, you must manually +run the rspec-ee unit gitlab-duo-chat-qa on the master the branch: + + + Visit the new pipeline page. + Select “Run pipeline” to run a pipeline against the master branch + When the pipeline first starts, the rspec-ee unit gitlab-duo-chat-qa job under the +“Test” stage will not be available. Wait a few minutes for other CI jobs to +run and then manually kick off this job by selecting the “Play” icon. + + +When the test runs on master, the reporter script posts the generated report as an issue, +saves the evaluations artfacts as a snippet, and updates the tracking issue in +GitLab-org/ai-powered/ai-framework/qa-evaluation#1 +in the project GitLab-org/ai-powered/ai-framework/qa-evaluation. + +GraphQL Subscription + + +The GraphQL Subscription for Chat behaves slightly different because it’s user-centric. A user could have Chat open on multiple browser tabs, or also on their IDE. +We therefore need to broadcast messages to multiple clients to keep them in sync. The aiAction mutation with the chat action behaves the following: + + + All complete Chat messages (including messages from the user) are broadcasted with the userId, aiAction: ""chat"" as identifier. + Chunks from streamed Chat messages and currently used tools are broadcasted with the userId, resourceId, and the clientSubscriptionId from the mutation as identifier. + + +Note that we still broadcast chat messages and currently used tools using the userId and resourceId as identifier. +However, this is deprecated and should no longer be used. We want to remove resourceId on the subscription as part of this issue. + +Testing GitLab Duo Chat in production-like environments + + +GitLab Duo Chat is enabled in the Staging and +Staging Ref GitLab environments. + +Because GitLab Duo Chat is currently only available to members of groups in the +Premium and Ultimate tiers, Staging Ref may be an easier place to test changes as a GitLab +team member because +you can make yourself an instance Admin in Staging Ref +and, as an Admin, easily create licensed groups for testing. + +Product Analysis + + +To better understand how the feature is used, each production user input message is analyzed using LLM and Ruby, +and the analysis is tracked as a Snowplow event. + +The analysis can contain any of the attributes defined in the latest iglu schema. + + + All possible “category” and “detailed_category” are listed here. + The following is yet to be implemented: + + “is_proper_sentence” + + + The following are deprecated: + + “number_of_questions_in_history” + “length_of_questions_in_history” + “time_since_first_question” + + + + +Dashboards can be created to visualize the collected data. + +How access_duo_chat policy works + + +In the table below I present what requirements must be fulfilled so the access_duo_chat policy would return true in different +contexts. + + + + +   + on SaaS + on Self-managed + + + + + for user (user.can?(:access_duo_chat)) + User need to belong to at least one group on Ultimate tier and with experiment_and_beta_features group setting switched on + Instance need to be on Ultimate tier and instance need to have instance_level_ai_beta_features_enabled setting switched on + + + for user in group context (user.can?(:access_duo_chat, group)) + User need to be a member of that group, root ancestor group of this group needs to be on Ultimate tier and with experiment_and_beta_features group setting switched on + Instance need to be on Ultimate tier and instance need to have instance_level_ai_beta_features_enabled setting switched on, user needs to have at least read permission to group + + + for user in project context (user.can?(:access_duo_chat, project)) + User need to be a member of that project, project needs to have root ancestor group on Ultimate tier and with experiment_and_beta_features group setting switched on + Instance need to be on Ultimate tier and instance need to have instance_level_ai_beta_features_enabled setting switched on, user needs to have at least read permission to project + + + + + +" +what kinds of things can GitLab Duo Chat help with?,,"1. GitLab Duo Chat + + + +GitLab Duo Chat + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated +Status: Beta + + +History + + + + + +Introduced as an Experiment for SaaS in GitLab 16.0. + Changed to Beta for SaaS in GitLab 16.6. + +Introduced as a Beta for self-managed in GitLab 16.8. + Changed from Ultimate to Premium tier in GitLab 16.9. + + + + + + +GitLab Duo Chat is your personal AI-powered assistant for boosting productivity. +It can assist various tasks of your daily work with the AI-generated content. +Here are the examples of use cases: + + + + + Feature + Use case example + Supported interfaces + Supported deployments + + + + + Ask about GitLab + I want to know how to create an issue in GitLab. + GitLab, VS Code, and Web IDE 1 + + GitLab.com + + + Ask about a specific issue + I want to summarize this issue. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Ask about a specific epic + I want to summarize this epic. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Ask about code + I want to understand how this code works. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Ask about CI/CD + I want to create a new CI/CD pipeline configuration. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Explain code in the IDE + I want to understand how this code works. + VS Code and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Refactor code in the IDE + I want to refactor this code. + VS Code and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Write tests in the IDE + I want to write a test for this code. + VS Code and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + + + +Footnotes: + + GitLab Duo Chat is not available in Web IDE on self-managed + + + + + + note This is a Beta feature. We’re continuously extending the capabilities and reliability of the responses. + + +Watch a demo + + + + View how to setup and use GitLab Duo Chat. + + + + + +What GitLab Duo Chat can help with + + +GitLab Duo Chat can help in a variety of areas. + +Ask about GitLab + + + +History + + + + + +Introduced for SaaS in GitLab 16.0. + + + + + + +You can ask questions about how GitLab works. Things like: + + + Explain the concept of a 'fork' in a concise manner. + Provide step-by-step instructions on how to reset a user's password. + + + + note This feature is not currently supported in self-managed instances. +See this epic for more information. + + +Ask about a specific issue + + + +History + + + + + +Introduced for SaaS in GitLab 16.0. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can ask about a specific GitLab issue. For example: + + + Generate a summary for the issue identified via this link: + When you are viewing an issue in GitLab, you can ask Generate a concise summary of the current issue. + + How can I improve the description of so that readers understand the value and problems to be solved? + + +Ask about a specific epic + + + +History + + + + + +Introduced for SaaS in GitLab 16.3. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can ask about a specific GitLab epic. For example: + + + Generate a summary for the epic identified via this link: + When you are viewing an epic in GitLab, you can ask Generate a concise summary of the opened epic. + + What are the unique use cases raised by commenters in ? + + +Ask about code + + + +History + + + + + +Introduced for SaaS in GitLab 16.1. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can also ask GitLab Duo Chat to generate code: + + + Write a Ruby function that prints 'Hello, World!' when called. + Develop a JavaScript program that simulates a two-player Tic-Tac-Toe game. Provide both game logic and user interface, if applicable. + Create a regular expression for parsing IPv4 and IPv6 addresses in Python. + Generate code for parsing a syslog log file in Java. Use regular expressions when possible, and store the results in a hash map. + Create a product-consumer example with threads and shared memory in C++. Use atomic locks when possible. + Generate Rust code for high performance gRPC calls. Provide a source code example for a server and client. + + +And you can ask GitLab Duo Chat to explain code: + + + Provide a clear explanation of the given Ruby code: def sum(a, b) a + b end. Describe what this code does and how it works. + + +Alternatively, you can use the /explain command to explain the selected code in your editor. + +For more practical examples, see the GitLab Duo examples. + +Ask about errors + + +Programming languages that require compiling the source code may throw cryptic error messages. Similarly, a script or a web application could throw a stack trace. You can ask GitLab Duo Chat by prefixing the copied error message with, for example, Please explain this error message:. Add the specific context, like the programming language. + + + Explain this error message in Java: Int and system cannot be resolved to a type + Explain when this C function would cause a segmentation fault: sqlite3_prepare_v2() + Explain what would cause this error in Python: ValueError: invalid literal for int() + Why is ""this"" undefined in VueJS? Provide common error cases, and explain how to avoid them. + How to debug a Ruby on Rails stacktrace? Share common strategies and an example exception. + + +For more practical examples, see the GitLab Duo examples. + +Ask about CI/CD + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can ask GitLab Duo Chat to create a CI/CD configuration: + + + Create a .gitlab-ci.yml configuration file for testing and building a Ruby on Rails application in a GitLab CI/CD pipeline. + Create a CI/CD configuration for building and linting a Python application. + Create a CI/CD configuration to build and test Rust code. + Create a CI/CD configuration for C++. Use gcc as compiler, and cmake as build tool. + Create a CI/CD configuration for VueJS. Use npm, and add SAST security scanning. + Generate a security scanning pipeline configuration, optimized for Java. + + +You can also ask to explain specific job errors by copy-pasting the error message, prefixed with Please explain this CI/CD job error message, in the context of :: + + + Please explain this CI/CD job error message in the context of a Go project: build.sh: line 14: go command not found + + +Alternatively, you can use root cause analysis in CI/CD. + +For more practical examples, see the GitLab Duo examples. + +Explain code in the IDE + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + + + note This feature is available in VS Code and the Web IDE only. + + +/explain is a special command to explain the selected code in your editor. +You can also add additional instructions to be considered, for example: /explain the performance +See Use GitLab Duo Chat in VS Code for more information. + + + /explain focus on the algorithm + /explain the performance gains or losses using this code + +/explain the object inheritance (classes, object-oriented) + +/explain why a static variable is used here (C++) + +/explain how this function would cause a segmentation fault (C) + +/explain how concurrency works in this context (Go) + +/explain how the request reaches the client (REST API, database) + + +For more practical examples, see the GitLab Duo examples. + +Refactor code in the IDE + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + + + note This feature is available in VS Code and the Web IDE only. + + +/refactor is a special command to generate a refactoring suggestion for the selected code in your editor. +You can include additional instructions to be considered. For example: + + + Use a specific coding pattern, for example /refactor with ActiveRecord or /refactor into a class providing static functions. + Use a specific library, for example /refactor using mysql. + Use a specific function/algorithm, for example /refactor into a stringstream with multiple lines in C++. + Refactor to a different programming language, for example /refactor to TypeScript. + Focus on performance, for example /refactor improving performance. + Focus on potential vulnerabilities, for example /refactor avoiding memory leaks and exploits. + + +See Use GitLab Duo Chat in the VS Code for more information. + +For more practical examples, see the GitLab Duo examples. + +Write tests in the IDE + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + + + note This feature is available in VS Code and the Web IDE only. + + +/tests is a special command to generate a testing suggestion for the selected code in your editor. +You can also add additional instructions to be considered, for example: /tests using the Boost.Test framework +See Use GitLab Duo Chat in the VS Code for more information. + + + Use a specific test framework, for example /tests using the Boost.test framework (C++) or /tests using Jest (JavaScript). + Focus on extreme test cases, for example /tests focus on extreme cases, force regression testing. + Focus on performance, for example /tests focus on performance. + Focus on regressions and potential exploits, for example /tests focus on regressions and potential exploits. + + +For more practical examples, see the GitLab Duo examples. + +Ask follow up questions + + +You can ask follow-up questions to delve deeper into the topic or task at hand. +This helps you get more detailed and precise responses tailored to your specific needs, +whether it’s for further clarification, elaboration, or additional assistance. + +A follow-up to the question Write a Ruby function that prints 'Hello, World!' when called could be: + + + Can you also explain how I can call and execute this Ruby function in a typical Ruby environment, such as the command line? + + +A follow-up to the question How to start a C# project? could be: + + + Can you also please explain how to add a .gitignore and .gitlab-ci.yml file for C#? + + +For more practical examples, see the GitLab Duo examples. + +Enable GitLab Duo Chat + + +For SaaS users + + +To use this feature, at least one group you’re a member of must +have the experiment and beta features setting enabled. + +You can ask questions about resources that belong only to groups where this setting is enabled. + +Troubleshoot Chat access + + +If you have access to chat responses you did not expect, you might be part of +a group that has the Use Experiment and Beta features setting enabled. +Review the list of your groups and verify which ones you have access to. + +GitLab.com administrators can verify your access by running this snippet in the Rails console: + +u = User.find_by_username($USERNAME) +u.member_namespaces.namespace_settings_with_ai_features_enabled.with_ai_supported_plan(:ai_chat) + + +You can ask specific questions about group resources (like “summarize this issue”) when this feature is enabled. + +For self-managed users + + + + note Usage of GitLab Duo Chat is governed by the GitLab Testing Agreement. +Learn about data usage when using GitLab Duo Chat. + + +Prerequisites: + + + You have GitLab version 16.8 or later. + The Premium or Ultimate license is activated in your GitLab instance by using cloud licensing. + Your firewalls and HTTP proxy servers allow outbound connections +to cloud.gitlab.com. To use an HTTP proxy, both +gitLab _workhorse and gitLab_rails have the necessary +web proxy environment variables set. + All of the users in your instance have the latest version of their IDE extension. + You are an administrator. + + +To enable GitLab Duo Chat for your self-managed GitLab instance: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > General. + Expand AI-powered features and select Enable Experiment and Beta AI-powered features. + Select Save changes. + To make sure GitLab Duo Chat works immediately, you must +manually synchronize your subscription. + + +Manually synchronize your subscription + + +You must manually synchronize your subscription if either: + + + You have just purchased a subscription for the Premium or Ultimate tier and have upgraded to GitLab 16.8. + You already have a subscription for the Premium or Ultimate tier and have upgraded to GitLab 16.8. + + +Without the manual synchronization, it might take up to 24 hours to activate GitLab Duo Chat on your instance. + +Use GitLab Duo Chat in the GitLab UI + + + + In the lower-left corner, select the Help icon. +The new left sidebar must be enabled. + Select GitLab Duo Chat. A drawer opens on the right side of your screen. + Enter your question in the chat input box and press Enter or select Send. It may take a few seconds for the interactive AI chat to produce an answer. + You can ask a follow-up question. + If you want to ask a new question unrelated to the previous conversation, you may receive better answers if you clear the context by typing /reset into the input box and selecting Send. + + + + note Only the last 50 messages are retained in the chat history. The chat history expires 3 days after last use. + + +Delete all conversations + + +To delete all previous conversations: + + + In the text box, type /clean and select Send. + + +Use GitLab Duo Chat in the Web IDE + + + +Tier: Premium, Ultimate +Status: Experiment + + +History + + + + + Introduced in GitLab 16.6 as an Experiment + + + + + + + +To use GitLab Duo Chat in the Web IDE on GitLab: + + + Open the Web IDE: + + On the left sidebar, select Search or go to and find your project. + Select a file. Then in the upper right, select Edit > Open in Web IDE. + + + Then open Chat by using one of the following methods: + + On the left sidebar, select GitLab Duo Chat. + In the file that you have open in the editor, select some code. + + Right-click and select GitLab Duo Chat. + Select Explain selected code or Generate Tests. + + + Use the keyboard shortcut: ALT+d (on Windows and Linux) or Option+d (on Mac) + + + In the message box, enter your question and press Enter or select Send. + + +If you have selected code in the editor, this selection is sent along with your question to the AI. This way you can ask questions about this code selection. For instance, Could you simplify this?. + + + note GitLab Duo Chat is not available in the Web IDE on self-managed. + + +Use GitLab Duo Chat in VS Code + + + +Tier: Premium, Ultimate +Status: Experiment + + +History + + + + + Introduced in GitLab 16.6 as an Experiment. + + + + + + +To use GitLab Duo Chat in GitLab Workflow extension for VS Code: + + + Install and set up the Workflow extension for VS Code: + + In VS Code, download and Install the GitLab Workflow extension for VS Code. + Configure the GitLab Workflow extension. + + + In VS Code, open a file. The file does not need to be a file in a Git repository. + Open Chat by using one of the following methods: + + On the left sidebar, select GitLab Duo Chat. + In the file that you have open in the editor, select some code. + + Right-click and select GitLab Duo Chat. + Select Explain selected code or Generate Tests. + + + Use the keyboard shortcut: ALT+d (on Windows and Linux) or Option+d (on Mac) + + + In the message box, enter your question and press Enter or select Send. + + +If you have selected code in the editor, this selection is sent along with your question to the AI. This way you can ask questions about this code selection. For instance, Could you simplify this?. + +Perform standard task in the IDE from the context menu or by using slash commands + + +Get code explained, code refactored or get tests generated for code. To do so: + + + Select code in your editor in VS Code or in the Web IDE. + Type one the following slash commands into the chat field: /explain, /refactor or /tests. Alternatively, use the context menu to perform these tasks. + + +When you use one of the slash commands you can also add additional instructions to be considered, for example: /tests using the Boost.Test framework + +Disable Chat in VS Code + + +To disable GitLab Duo Chat in VS Code: + + + Go to Settings > Extensions > GitLab Workflow (GitLab VS Code Extension). + Clear the Enable GitLab Duo Chat assistant checkbox. + + +Give feedback + + +Your feedback is important to us as we continually enhance your GitLab Duo Chat experience. +Leaving feedback helps us customize the Chat for your needs and improve its performance for everyone. + +To give feedback about a specific response, use the feedback buttons in the response message. +Or, you can add a comment in the feedback issue. + + +2. GitLab Duo Chat + + + +GitLab Duo Chat Contribute + + +Chat is a part of the GitLab Duo offering. + +How Chat describes itself: “I am GitLab Duo Chat, an AI assistant focused on helping developers with DevSecOps, +software development, source code, project management, CI/CD, and GitLab. Please feel free to engage me in these areas.” + +Chat can answer different questions and perform certain tasks. It’s done with the help of prompts and tools. + +To answer a user’s question asked in the Chat interface, GitLab sends a GraphQL request to the Rails backend. +Rails backend sends then instructions to the Large Language Model (LLM) via the AI Gateway. + +Set up GitLab Duo Chat + + +There is a difference in the setup for Saas and self-managed instances. +We recommend to start with a process described for SaaS-only AI features. + + + +Setup SaaS-only AI features. + +Setup self-managed AI features. + + +Working with GitLab Duo Chat + + +Prompts are the most vital part of GitLab Duo Chat system. Prompts are the instructions sent to the LLM to perform certain tasks. + +The state of the prompts is the result of weeks of iteration. If you want to change any prompt in the current tool, you must put it behind a feature flag. + +If you have any new or updated prompts, ask members of AI Framework team to review, because they have significant experience with them. + +Troubleshooting + + +When working with Chat locally, you might run into an error. Most commons problems are documented in this section. +If you find an undocumented issue, you should document it in this section after you find a solution. + + + + + Problem + Solution + + + + + There is no Chat button in the GitLab UI. + Make sure your user is a part of a group with enabled Experimental and Beta features. + + + Chat replies with “Forbidden by auth provider” error. + Backend can’t access LLMs. Make sure your AI Gateway is setup correctly. + + + Requests takes too long to appear in UI + Consider restarting Sidekiq by running gdk restart rails-background-jobs. If that doesn’t work, try gdk kill and then gdk start. Alternatively, you can bypass Sidekiq entirely. To do that temporary alter Llm::CompletionWorker.perform_async statements with Llm::CompletionWorker.perform_inline + + + + + +Contributing to GitLab Duo Chat + + +From the code perspective, Chat is implemented in the similar fashion as other AI features. Read more about GitLab AI Abstraction layer. + +The Chat feature uses a zero-shot agent that includes a system prompt explaining how the large language model should interpret the question and provide an +answer. The system prompt defines available tools that can be used to gather +information to answer the user’s question. + +The zero-shot agent receives the user’s question and decides which tools to use to gather information to answer it. +It then makes a request to the large language model, which decides if it can answer directly or if it needs to use one +of the defined tools. + +The tools each have their own prompt that provides instructions to the large language model on how to use that tool to +gather information. The tools are designed to be self-sufficient and avoid multiple requests back and forth to +the large language model. + +After the tools have gathered the required information, it is returned to the zero-shot agent, which asks the large language +model if enough information has been gathered to provide the final answer to the user’s question. + +Adding a new tool + + +To add a new tool: + + + + Create files for the tool in the ee/lib/gitlab/llm/chain/tools/ folder. Use existing tools like issue_identifier or +resource_reader as a template. + + + Write a class for the tool that includes: + + + Name and description of what the tool does + Example questions that would use this tool + Instructions for the large language model on how to use the tool to gather information - so the main prompts that +this tool is using. + + + Test and iterate on the prompt using RSpec tests that make real requests to the large language model. + + Prompts require trial and error, the non-deterministic nature of working with LLM can be surprising. + Anthropic provides good guide on working on prompts. + GitLab guide on working with prompts. + + + + Implement code in the tool to parse the response from the large language model and return it to the zero-shot agent. + + + Add the new tool name to the tools array in ee/lib/gitlab/llm/completions/chat.rb so the zero-shot agent knows about it. + + Add tests by adding questions to the test-suite for which the new tool should respond to. Iterate on the prompts as needed. + + +The key things to keep in mind are properly instructing the large language model through prompts and tool descriptions, +keeping tools self-sufficient, and returning responses to the zero-shot agent. With some trial and error on prompts, +adding new tools can expand the capabilities of the Chat feature. + +There are available short videos covering this topic. + +Debugging + + +To gather more insights about the full request, use the Gitlab::Llm::Logger file to debug logs. +The default logging level on production is INFO and must not be used to log any data that could contain personal identifying information. + +To follow the debugging messages related to the AI requests on the abstraction layer, you can use: + +export LLM_DEBUG=1 +gdk start +tail -f log/llm.log + + +Tracing with LangSmith + + +Tracing is a powerful tool for understanding the behavior of your LLM application. +LangSmith has best-in-class tracing capabilities, and it’s integrated with GitLab Duo Chat. Tracing can help you track down issues like: + + + I’m new to GitLab Duo Chat and would like to understand what’s going on under the hood. + Where exactly the process failed when you got an unexpected answer. + Which process was a bottle neck of the latency. + What tool was used for an ambiguous question. + + + + +Tracing is especially useful for evaluation that runs GitLab Duo Chat against large dataset. +LangSmith integration works with any tools, including Prompt Library +and RSpec tests. + +Use tracing with LangSmith + + + + note Tracing is available in Development and Testing environment only. +It’s not available in Production environment. + + + + Access to LangSmith site and create an account. + Create an API key. + + Set the following environment variables in GDK. You can define it in env.runit or directly export in the terminal. + + + export LANGCHAIN_TRACING_V2=true + export LANGCHAIN_API_KEY='' + export LANGCHAIN_PROJECT='' + export LANGCHAIN_ENDPOINT='api.smith.langchain.com' + export GITLAB_RAILS_RACK_TIMEOUT=180 # Extending puma timeout for using LangSmith with Prompt Library as the evaluation tool. + + + Restart GDK. + + +Testing GitLab Duo Chat + + +Because the success of answers to user questions in GitLab Duo Chat heavily depends +on toolchain and prompts of each tool, it’s common that even a minor change in a +prompt or a tool impacts processing of some questions. + +To make sure that a change in the toolchain doesn’t break existing +functionality, you can use the following RSpec tests to validate answers to some +predefined questions when using real LLMs: + + + +ee/spec/lib/gitlab/llm/completions/chat_real_requests_spec.rb +This test validates that the zero-shot agent is selecting the correct tools +for a set of Chat questions. It checks on the tool selection but does not +evaluate the quality of the Chat response. + +ee/spec/lib/gitlab/llm/chain/agents/zero_shot/qa_evaluation_spec.rb +This test evaluates the quality of a Chat response by passing the question +asked along with the Chat-provided answer and context to at least two other +LLMs for evaluation. This evaluation is limited to questions about issues and +epics only. Learn more about the GitLab Duo Chat QA Evaluation Test. + + +If you are working on any changes to the GitLab Duo Chat logic, be sure to run +the GitLab Duo Chat CI jobs the merge request that contains +your changes. Some of the CI jobs must be manually triggered. + +Testing locally + + +To run the QA Evaluation test locally, the following environment variables +must be exported: + +export VERTEX_AI_EMBEDDINGS='true' # if using Vertex embeddings +export ANTHROPIC_API_KEY='' # can use dev value of Gitlab::CurrentSettings +export VERTEX_AI_CREDENTIALS='' # can set as dev value of Gitlab::CurrentSettings.vertex_ai_credentials +export VERTEX_AI_PROJECT='' # can use dev value of Gitlab::CurrentSettings.vertex_ai_project + +REAL_AI_REQUEST=1 bundle exec rspec ee/spec/lib/gitlab/llm/completions/chat_real_requests_spec.rb + + +When you update the test questions that require documentation embeddings, +make sure you generate a new fixture and +commit it together with the change. + +Testing with CI + + +The following CI jobs for GitLab project run the tests tagged with real_ai_request: + + + + rspec-ee unit gitlab-duo-chat-zeroshot: +the job runs ee/spec/lib/gitlab/llm/completions/chat_real_requests_spec.rb. +The job must be manually triggered and is allowed to fail. + + + rspec-ee unit gitlab-duo-chat-qa: +The job runs the QA evaluation tests in +ee/spec/lib/gitlab/llm/chain/agents/zero_shot/qa_evaluation_spec.rb. +The job must be manually triggered and is allowed to fail. +Read about GitLab Duo Chat QA Evaluation Test. + + + rspec-ee unit gitlab-duo-chat-qa-fast: +The job runs a single QA evaluation test from ee/spec/lib/gitlab/llm/chain/agents/zero_shot/qa_evaluation_spec.rb. +The job is always run and not allowed to fail. Although there’s a chance that the QA test still might fail, +it is cheap and fast to run and intended to prevent a regression in the QA test helpers. + + + rspec-ee unit gitlab-duo pg14: +This job runs tests to ensure that the GitLab Duo features are functional without running into system errors. +The job is always run and not allowed to fail. +This job does NOT conduct evaluations. The quality of the feature is tested in the other jobs such as QA jobs. + + + +Management of credentials and API keys for CI jobs + + +All API keys required to run the rspecs should be masked + +The exception is GCP credentials as they contain characters that prevent them from being masked. +Because the CI jobs need to run on MR branches, GCP credentials cannot be added as a protected variable +and must be added as a regular CI variable. +For security, the GCP credentials and the associated project added to +GitLab project’s CI must not be able to access any production infrastructure and sandboxed. + +GitLab Duo Chat QA Evaluation Test + + +Evaluation of a natural language generation (NLG) system such as +GitLab Duo Chat is a rapidly evolving area with many unanswered questions and ambiguities. + +A practical working assumption is LLMs can generate a reasonable answer when given a clear question and a context. +With the assumption, we are exploring using LLMs as evaluators +to determine the correctness of a sample of questions +to track the overall accuracy of GitLab Duo Chat’s responses and detect regressions in the feature. + +For the discussions related to the topic, +see the merge request +and the issue. + +The current QA evaluation test consists of the following components. + +Epic and issue fixtures + + +The fixtures are the replicas of the public issues and epics from projects and groups owned by GitLab. +The internal notes were excluded when they were sampled. The fixtures have been commited into the canonical gitlab repository. +See the snippet used to create the fixtures. + +RSpec and helpers + + + + + The RSpec file +and the included helpers invoke the Chat service, an internal interface with the question. + + + After collecting the Chat service’s answer, +the answer is injected into a prompt, also known as an “evaluation prompt”, that instructs +a LLM to grade the correctness of the answer based on the question and a context. +The context is simply a JSON serialization of the issue or epic being asked about in each question. + + + The evaluation prompt is sent to two LLMs, Claude and Vertex. + + + The evaluation responses of the LLMs are saved as JSON files. + + + For each question, RSpec will regex-match for CORRECT or INCORRECT. + + + +Collection and tracking of QA evaluation with CI/CD automation + + +The gitlab project’s CI configurations have been setup to run the RSpec, +collect the evaluation response as artifacts and execute +a reporter script +that automates collection and tracking of evaluations. + +When rspec-ee unit gitlab-duo-chat-qa job runs in a pipeline for a merge request, +the reporter script uses the evaluations saved as CI artifacts +to generate a Markdown report and posts it as a note in the merge request. + +To keep track of and compare QA test results over time, you must manually +run the rspec-ee unit gitlab-duo-chat-qa on the master the branch: + + + Visit the new pipeline page. + Select “Run pipeline” to run a pipeline against the master branch + When the pipeline first starts, the rspec-ee unit gitlab-duo-chat-qa job under the +“Test” stage will not be available. Wait a few minutes for other CI jobs to +run and then manually kick off this job by selecting the “Play” icon. + + +When the test runs on master, the reporter script posts the generated report as an issue, +saves the evaluations artfacts as a snippet, and updates the tracking issue in +GitLab-org/ai-powered/ai-framework/qa-evaluation#1 +in the project GitLab-org/ai-powered/ai-framework/qa-evaluation. + +GraphQL Subscription + + +The GraphQL Subscription for Chat behaves slightly different because it’s user-centric. A user could have Chat open on multiple browser tabs, or also on their IDE. +We therefore need to broadcast messages to multiple clients to keep them in sync. The aiAction mutation with the chat action behaves the following: + + + All complete Chat messages (including messages from the user) are broadcasted with the userId, aiAction: ""chat"" as identifier. + Chunks from streamed Chat messages and currently used tools are broadcasted with the userId, resourceId, and the clientSubscriptionId from the mutation as identifier. + + +Note that we still broadcast chat messages and currently used tools using the userId and resourceId as identifier. +However, this is deprecated and should no longer be used. We want to remove resourceId on the subscription as part of this issue. + +Testing GitLab Duo Chat in production-like environments + + +GitLab Duo Chat is enabled in the Staging and +Staging Ref GitLab environments. + +Because GitLab Duo Chat is currently only available to members of groups in the +Premium and Ultimate tiers, Staging Ref may be an easier place to test changes as a GitLab +team member because +you can make yourself an instance Admin in Staging Ref +and, as an Admin, easily create licensed groups for testing. + +Product Analysis + + +To better understand how the feature is used, each production user input message is analyzed using LLM and Ruby, +and the analysis is tracked as a Snowplow event. + +The analysis can contain any of the attributes defined in the latest iglu schema. + + + All possible “category” and “detailed_category” are listed here. + The following is yet to be implemented: + + “is_proper_sentence” + + + The following are deprecated: + + “number_of_questions_in_history” + “length_of_questions_in_history” + “time_since_first_question” + + + + +Dashboards can be created to visualize the collected data. + +How access_duo_chat policy works + + +In the table below I present what requirements must be fulfilled so the access_duo_chat policy would return true in different +contexts. + + + + +   + on SaaS + on Self-managed + + + + + for user (user.can?(:access_duo_chat)) + User need to belong to at least one group on Ultimate tier and with experiment_and_beta_features group setting switched on + Instance need to be on Ultimate tier and instance need to have instance_level_ai_beta_features_enabled setting switched on + + + for user in group context (user.can?(:access_duo_chat, group)) + User need to be a member of that group, root ancestor group of this group needs to be on Ultimate tier and with experiment_and_beta_features group setting switched on + Instance need to be on Ultimate tier and instance need to have instance_level_ai_beta_features_enabled setting switched on, user needs to have at least read permission to group + + + for user in project context (user.can?(:access_duo_chat, project)) + User need to be a member of that project, project needs to have root ancestor group on Ultimate tier and with experiment_and_beta_features group setting switched on + Instance need to be on Ultimate tier and instance need to have instance_level_ai_beta_features_enabled setting switched on, user needs to have at least read permission to project + + + + + +" +"is the 50,000 compute units that come with ultimate for the entire namespace or per user?",,"1. Compute quota + + + +Compute quota + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed + + +History + + + + Renamed from “CI/CD minutes” to “compute quota” or “compute minutes” in GitLab 16.1. + + + + + + + note The term CI/CD minutes is being renamed to compute minutes. During this transition, you might see references in the UI and documentation to CI/CD minutes, CI minutes, pipeline minutes, CI pipeline minutes, pipeline minutes quota, compute credits, compute units, and compute minutes. For more information, see epic 2150. + + +Administrators can limit the amount of time that projects can use to run jobs on +instance runners each month. This limit +is tracked with a compute quota. + +By default, one minute of execution time by a single job uses +one compute minute. The total execution time for a pipeline is +the sum of all its jobs’ durations. +Jobs can run concurrently, so the total usage can be higher than the +end-to-end duration of a pipeline. + +On GitLab.com: + + + Compute quotas are enabled for all projects, but certain +projects consume compute minutes at a slower rate. + The base monthly compute quota for a GitLab.com namespace +is determined by its license tier. + You can purchase additional compute minutes +if you need more than the amount of compute in your monthly quota. + + +On self-managed GitLab instances: + + + Compute quotas are disabled by default. + When enabled, compute quotas apply to private projects only. + Administrators can assign more compute minutes +if a namespace uses all its monthly quota. + + +Trigger jobs do not execute on runners, so they do not +consume compute minutes, even when using strategy:depend +to wait for the downstream pipeline status. +The triggered downstream pipeline consumes compute minutes the same as other pipelines. + +Project runners are not subject to a compute quota. + +Set the compute quota for all namespaces + + + +History + + + + + +Moved to GitLab Premium in 13.9. + + + + + + +By default, GitLab instances do not have a compute quota. +The default value for the quota is 0, which is unlimited. +However, you can change this default value. + +Prerequisites: + + + You must be a GitLab administrator. + + +To change the default quota that applies to all namespaces: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Continuous Integration and Deployment. + In the Compute quota box, enter a limit. + Select Save changes. + + +If a quota is already defined for a specific namespace, this value does not change that quota. + +Set the compute quota for a specific namespace + + + +History + + + + + +Moved to GitLab Premium in 13.9. + + + + + + +You can override the global value and set a compute quota +for a specific namespace. + +Prerequisites: + + + You must be a GitLab administrator. + + +To set a compute quota for a namespace: + + + On the left sidebar, at the bottom, select Admin Area. + Select Overview > Groups. + For the group you want to update, select Edit. + In the Compute quota box, enter the maximum number of compute minutes. + Select Save changes. + + +You can also use the update group API or the +update user API instead. + + + note You can set a compute quota for only top-level groups or user namespaces. +If you set a quota for a subgroup, it is not used. + + +View compute usage + + +Prerequisites: + + + You must have access to the build to view the total usage and quota summary for a namespace associated with a build. + Access to Usage Quotas page is based on your role in the associated namespace or group. + + +View Usage Quota Reports for a group + + + +History + + + + + Displaying instance runners duration per project introduced in GitLab 15.0. + + + + + + +Prerequisites: + + + You must have the Owner role for the group. + + +To view compute usage for your group: + + + On the left sidebar, select Search or go to and +find your group. The group must not be a subgroup. + Select Settings > Usage Quotas. + Select the Pipelines tab. + + +The projects list shows projects with compute usage or instance runners usage +in the current month only. The list includes all projects in the namespace and its +subgroups, sorted in descending order of compute usage. + +View Usage Quota reports for a personal namespace + + + +History + + + + + Displaying instance runners duration introduced in GitLab 15.0. + + + + + + +Prerequisites: + + + The namespace must be your personal namespace. + + +You can view the compute usage for a personal namespace: + + + On the left sidebar, select your avatar. + Select Edit profile. + On the left sidebar, select Usage Quotas. + + +The projects list shows personal projects +with compute usage or instance runners usage in the current month only. The list +is sorted in descending order of compute usage. + +Purchase additional compute minutes + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com + +If you’re using GitLab SaaS, you can purchase additional packs of compute minutes. +These additional compute minutes: + + + Are used only after the monthly quota included in your subscription runs out. + Are carried over to the next month, if any remain at the end of the month. + Are valid for 12 months from date of purchase or until all compute minutes are consumed, whichever comes first. Expiry of compute minutes is not enforced. + + +For example, with a GitLab SaaS Premium license: + + + You have 10,000 monthly compute minutes. + You purchase an additional 5,000 compute minutes. + Your total limit is 15,000 compute minutes. + + +If you use 13,000 compute minutes during the month, the next month your additional compute minutes become +2,000. If you use 9,000 compute minutes during the month, your additional compute minutes remain the same. + +Additional compute minutes bought on a trial subscription are available after the trial ends or upgrading to a paid plan. + +You can find pricing for additional compute minutes on the +GitLab Pricing page. + +Purchase compute minutes for a group + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com + +Prerequisites: + + + You must have the Owner role for the group. + + +You can purchase additional compute minutes for your group. +You cannot transfer purchased compute minutes from one group to another, +so be sure to select the correct group. + + + On the left sidebar, select Search or go to and find your group. + Select Settings > Usage Quotas. + Select Pipelines. + Select Buy additional compute minutes. + Complete the details of the transaction. + + +After your payment is processed, the additional compute minutes are added to your group +namespace. + +Purchase compute minutes for a personal namespace + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com + +Prerequisites: + + + The namespace must be your personal namespace. + + +To purchase additional compute minutes for your personal namespace: + + + On the left sidebar, select your avatar. + Select Edit profile. + On the left sidebar, select Usage Quotas. + Select Buy additional compute minutes. GitLab redirects you to the Customers Portal. + Locate the subscription card that’s linked to your personal namespace on GitLab SaaS, select Buy more compute minutes, +and complete the details of the transaction. + + +After your payment is processed, the additional compute minutes are added to your personal +namespace. + +How compute usage is calculated + + +GitLab uses this formula to calculate the compute usage of a job: + +Job duration * Cost factor + + + + +Job duration: The time, in seconds, that a job took to run on a instance runner, +not including time spent in the created or pending statuses. + +Cost factor: A number based on project visibility. + + +The value is transformed into compute minutes and added to the count of used units +in the job’s top-level namespace. + +For example, if a user alice runs a pipeline: + + + Under the gitlab-org namespace, the compute minutes used by each job in the pipeline are +added to the overall consumption for the gitlab-org namespace, not the alice namespace. + For one of the personal projects in their namespace, the compute minutes are added +to the overall consumption for the alice namespace. + + +The compute used by one pipeline is the total compute minutes used by all the jobs +that ran in the pipeline. Jobs can run concurrently, so the total compute usage +can be higher than the end-to-end duration of a pipeline. + +Cost factor + + +The cost factors for jobs running on instance runners on GitLab.com are: + + + +1 for internal, public, and private projects. + Exceptions for public projects: + + +0.5 for projects in the GitLab for Open Source program. + +0.008 for forks of projects in the GitLab for Open Source program. For every 125 minutes of job execution time, +you use 1 compute minute. + + + Discounted dynamically for community contributions to GitLab projects. + + +The cost factors on self-managed instances are: + + + +0 for public projects, so they do not consume compute minutes. + +1 for internal and private projects. + + +Cost factor for community contributions to GitLab projects + + +Community contributors can use up to 300,000 minutes on instance runners when contributing to open source projects +maintained by GitLab. The maximum of 300,000 minutes would only be possible if contributing exclusively to projects part of the GitLab product. The total number of minutes available on instance runners +is reduced by the compute minutes used by pipelines from other projects. +The 300,000 minutes applies to all SaaS tiers, and the cost factor calculation is: + + + Monthly compute quota / 300,000 job duration minutes = Cost factor + + +For example, with a monthly compute quota of 10,000 in the Premium tier: + + + 10,000 / 300,000 = 0.03333333333 cost factor. + + +For this reduced cost factor: + + + The merge request source project must be a fork of a GitLab-maintained project, +such as gitlab-com/www-gitlab-com, +or gitlab-org/gitlab. + The merge request target project must be the fork’s parent project. + The pipeline must be a merge request, merged results, or merge train pipeline. + + +GitLab administrators can add a namespace to the reduced cost factor +with a flag named ci_minimal_cost_factor_for_gitlab_namespaces. + +Additional costs on GitLab SaaS + + +GitLab SaaS runners have different cost factors, depending on the runner type (Linux, Windows, macOS) and the virtual machine configuration. + + + + + GitLab SaaS runner type + Machine Size + Cost factor + + + + + Linux OS amd64 + small + 1 + + + Linux OS amd64 + medium + 2 + + + Linux OS amd64 + large + 3 + + + Linux OS amd64 + xlarge + 6 + + + Linux OS amd64 + 2xlarge + 12 + + + Linux OS amd64 + GPU-enabled + +medium, GPU standard + 7 + + + macOS M1 + medium + 6 (Status: Beta) + + + Windows Server + - + 1 (Status: Beta) + + + + +Monthly reset of compute usage + + +On the first day of each calendar month, the accumulated compute usage is reset to 0 +for all namespaces that use instance runners. This means your full quota is available, and +calculations start again from 0. + +For example, if you have a monthly quota of 10,000 compute minutes: + + + On April 1, you have 10,000 compute minutes. + During April, you use only 6,000 of the 10,000 compute minutes. + On May 1, the accumulated compute usage resets to 0, and you have 10,000 compute minutes to use again +during May. + + +Usage data for the previous month is kept to show historical view of the consumption over time. + +Monthly rollover of purchased compute minutes + + +If you purchase additional compute minutes and don’t use the full amount, the remaining amount rolls over to +the next month. + +For example: + + + On April 1, you purchase 5,000 additional compute minutes. + During April, you use only 3,000 of the 5,000 additional compute minutes. + On May 1, the unused compute minutes roll over, so you have 2,000 additional compute minutes available for May. + + +Additional compute minutes are a one-time purchase and do not renew or refresh each month. + +What happens when you exceed the quota + + +When the compute quota is used for the current month, GitLab stops +processing new jobs. + + + Any non-running job that should be picked by instance runners is automatically dropped. + Any job being retried is automatically dropped. + Any running job can be dropped at any point if the overall namespace usage goes over-quota +by a grace period. + + +The grace period for running jobs is 1,000 compute minutes. + +Jobs on project runners are not affected by the compute quota. + +GitLab SaaS usage notifications + + +On GitLab SaaS an in-app banner is displayed and an email notification sent to the namespace owners when: + + + The remaining compute minutes is below 30% of the quota. + The remaining compute minutes is below 5% of the quota. + All the compute quota has been used. + + +Special quota limits + + +In some cases, the quota limit is replaced by one of the following labels: + + + +Unlimited: For namespaces with unlimited compute quota. + +Not supported: For namespaces where active instance runners are not enabled. + + +Reduce compute quota usage + + +If your project consumes too much compute quota, there are some strategies you can +use to reduce your usage: + + + If you are using project mirrors, ensure that pipelines for mirror updates +is disabled. + Reduce the frequency of scheduled pipelines. + +Skip pipelines when not needed. + Use interruptible jobs which can be auto-canceled +if a new pipeline starts. + If a job doesn’t have to run in every pipeline, use rules +to make it only run when it’s needed. + +Use private runners for some jobs. + If you are working from a fork and you submit a merge request to the parent project, +you can ask a maintainer to run a pipeline in the parent project. + + +If you manage an open source project, these improvements can also reduce compute quota +consumption for contributor fork projects, enabling more contributions. + +See our pipeline efficiency guide for more details. + +Reset compute usage + + + +Tier: Premium, Ultimate +Offering: Self-managed, GitLab Dedicated + +An administrator can reset the compute usage for a namespace for the current month. + +Reset usage for a personal namespace + + + + Find the user in the Admin Area. + Select Edit. + In Limits, select Reset compute usage. + + +Reset usage for a group namespace + + + + Find the group in the Admin Area. + Select Edit. + In Permissions and group features, select Reset compute usage. + + + +2. GitLab.com subscription + + + +GitLab.com subscription + + + +Tier: Premium, Ultimate +Offering: GitLab.com + + + note The GitLab SaaS subscription is being renamed to GitLab.com. During this transition, you might see references to GitLab SaaS and GitLab.com in the UI and documentation. + + +GitLab.com is the GitLab multi-tenant software-as-a-service (SaaS) offering. +You don’t need to install anything to use GitLab.com, you only need to +sign up. When you sign up, you choose: + + + +A subscription. + +The number of seats you want. + + +The subscription determines which features are available for your private projects. Organizations with public open source projects can actively apply to our GitLab for Open Source Program. + +Qualifying open source projects also get 50,000 compute minutes and free access to the Ultimate tier +through the GitLab for Open Source program. + +Obtain a GitLab.com subscription + + +A GitLab.com subscription applies to a top-level group. +Members of every subgroup and project in the group: + + + Can use the features of the subscription. + Consume seats in the subscription. + + +To subscribe to GitLab.com: + + + View the GitLab.com feature comparison +and decide which tier you want. + Create a user account for yourself by using the +sign up page. + Create a group. Your subscription tier applies to the top-level group, its subgroups, and projects. + Create additional users and +add them to the group. The users in this group, its subgroups, and projects can use +the features of your subscription tier, and they consume a seat in your subscription. + On the left sidebar, select Settings > Billing and choose a tier. + Fill out the form to complete your purchase. + + +View your GitLab.com subscription + + +Prerequisites: + + + You must have the Owner role for the group. + + +To see the status of your GitLab.com subscription: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > Billing. + + +The following information is displayed: + + + + + Field + Description + + + + + Seats in subscription + If this is a paid plan, represents the number of seats you’ve bought for this group. + + + Seats currently in use + Number of seats in use. Select See usage to see a list of the users using these seats. + + + Max seats used + Highest number of seats you’ve used. + + + Seats owed + +Max seats used minus Seats in subscription. + + + Subscription start date + Date your subscription started. If this is for a Free plan, it’s the date you transitioned off your group’s paid plan. + + + Subscription end date + Date your current subscription ends. Does not apply to Free plans. + + + + +How seat usage is determined + + +A GitLab.com subscription uses a concurrent (seat) model. You pay for a +subscription according to the maximum number of users assigned to the top-level group or its children during the billing period. You can +add and remove users during the subscription period without incurring additional charges, as long as the total users +at any given time doesn’t exceed the subscription count. If the total users exceeds your subscription count, you will incur an overage +which must be paid at your next reconciliation. + +A top-level group can be changed like any other group. + +Every user is included in seat usage, with the following exceptions: + + + Users who are pending approval. + Members with the Guest role on an Ultimate subscription. + Members with the minimal access role. + +Banned members. + +Blocked users. + GitLab-created service accounts: + + +Ghost User. + Bots such as: + + +Support Bot. + +Bot users for projects. + +Bot users for groups. + + + + + + +Seat usage is reviewed quarterly or annually. + +If a user goes to a different top-level group (one they have created themselves, for example) +and that group does not have a paid subscription, they would not see any of the paid features. + +It is also possible for users to belong to two different top-level groups with different subscriptions. +In this case, they would see only the features available to that subscription. + +View seat usage + + +To view a list of seats being used: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > Usage Quotas. + On the Seats tab, view usage information. + + +For each user, a list shows groups and projects where the user is a direct member. + + + +Group invite indicates the user is a member of a group shared with a group. + +Project invite indicates the user is a member of a group shared with a project. + + +The data in seat usage listing, Seats in use, and Seats in subscription are updated live. +The counts for Max seats used and Seats owed are updated once per day. + +To view your subscription information and a summary of seat counts: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > Billing. + + + + The usage statistics are updated once per day, which may cause a difference between the information +in the Usage Quotas page and the Billing page. + The Last login field is updated when a user signs in after they have signed out. If there is an active session +when a user re-authenticates (for example, after a 24 hour SAML session timeout), this field is not updated. + + +Search seat usage + + +To search users in the Seat usage page, enter a string in the search field. A minimum of 3 +characters are required. + +The search returns those users whose first name, last name, or username contain the search string. + +For example: + + + + + First name + Search string + Match ? + + + + + Amir + ami + Yes + + + Amir + amr + No + + + + +Export seat usage + + + +History + + + + + +Introduced in GitLab 14.2. + + + + + + +To export seat usage data as a CSV file: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > Billing. + Under Seats currently in use, select See usage. + Select Export list. + + +The generated list contains all seats being used, +and is not affected by the current search. + +Seats owed + + +A GitLab subscription is valid for a specific number of users. + +If the number of billable users exceeds the number included in the subscription, known +as the number of seats owed, you must pay for the excess number of users. + +For example, if you purchase a subscription for 10 users: + + + + + Event + Billable members + Maximum users + + + + + Ten users occupy all 10 seats. + 10 + 10 + + + Two new users join. + 12 + 12 + + + Three users leave and their accounts are removed. + 9 + 12 + + + + +Seats owed = 12 - 10 (Maximum users - users in subscription) + +Free Guest users + + + +Tier: Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +In the Ultimate tier, users who are assigned the Guest role do not consume a seat. +The user must not be assigned any other role, anywhere in the instance or in the namespace for GitLab.com. + + + If your project is private or internal, a user with the Guest role has +a set of permissions. + If your project is public, all users, including those with the Guest role +can access your project. + + +Add seats to your subscription + + +Your subscription cost is based on the maximum number of seats you use during the billing period. +Even if you reach the number of seats in your subscription, you can continue to add users. +GitLab bills you for the overage. + +To add seats to a subscription: + + + Log in to the Customers Portal. + Go to the Manage Purchases page. + Select Add more seats on the relevant subscription card. + Enter the number of additional users. + Review the Purchase summary section. The system lists the total price for all users on the +system and a credit for what you’ve already paid. You are only charged for the net change. + Enter your payment information. + Select Purchase seats. + + +The following is emailed to you: + + + A payment receipt. You can also access this information in the Customers Portal under +View invoices. + + +Remove users from your subscription + + +To remove a billable user from your subscription: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > Billing. + In the Seats currently in use section, select See usage. + In the row for the user you want to remove, on the right side, select the ellipsis and Remove user. + Re-type the username and select Remove user. + + +If you add a member to a group by using the share a group with another group feature, you can’t remove the member by using this method. Instead, you can either: + + + Remove the member from the shared group. You must be a group owner to do this. + From the group’s membership page, remove access from the entire shared group. + + +Seat usage alerts + + + +History + + + + + +Introduced in GitLab 15.2 with a flag named seat_flag_alerts. + +Generally available in GitLab 15.4. Feature flag seat_flag_alerts removed. + + + + + + +If you have the Owner role of the top-level group, an alert notifies you +of your total seat usage. + +The alert displays on group, subgroup, and project +pages, and only for top-level groups linked to subscriptions enrolled +in quarterly subscription reconciliations. +After you dismiss the alert, it doesn’t display until another seat is used. + +The alert displays based on the following seat usage. You cannot configure the +amounts at which the alert displays. + + + + + Seats in subscription + Alert displays when + + + + + 0-15 + One seat remains. + + + 16-25 + Two seats remain. + + + 26-99 + 10% of seats remain. + + + 100-999 + 8% of seats remain. + + + 1000+ + 5% of seats remain. + + + + +Change the linked namespace + + +To change the namespace linked to a subscription: + + + Sign in to the Customers Portal with a +linked GitLab.com account. + Do one of the following: + + If the subscription is not linked to a namespace, select Link subscription to a group. + If the subscription is already linked to a namespace, select Subscription actions ( ) > Change linked group. + + + Select the desired group from the New Namespace dropdown list. For a group to appear here, you must have the Owner role for that group. + + If the total number of users in your group exceeds the number of seats in your subscription, +you are prompted to pay for the additional users. Subscription charges are calculated based on +the total number of users in a group, including its subgroups and nested projects. + + If you purchased your subscription through an authorized reseller, you are unable to pay for additional users. +You can either: + + + Remove additional users, so that no overage is detected. + Contact the partner to purchase additional seats now or at the end of your subscription term. + + + Select Confirm changes. + + +Only one namespace can be linked to a subscription. + + +For a demo, see Linking GitLab Subscription to the Namespace. + +Transfer restrictions + + +Changing the linked namespace is not supported for all subscription types. + +You cannot transfer: + + + An expired or trial subscription. + A subscription with compute minutes which is already linked to a namespace. + A subscription with a Premium or Ultimate plan to a namespace which already has a Premium or Ultimate plan. + A subscription with code suggestions to a namespace which already has a subscriptions with code suggestions. + + +Upgrade your GitLab.com subscription tier + + +To upgrade your GitLab tier: + + + Sign in to the Customers Portal. + Select Upgrade on the relevant subscription card. + Select the desired upgrade. + Confirm the active form of payment, or add a new form of payment. + Check the I accept the Privacy Policy and Terms of Service checkbox. + Select Confirm purchase. + + +When the purchase has been processed, you receive confirmation of your new subscription tier. + +Subscription expiry + + +When your subscription expires, you can continue to use paid features of GitLab for 14 days. +On the 15th day, paid features are no longer available. You can +continue to use free features. + +To resume paid feature functionality, purchase a new subscription. + +Renew your GitLab.com subscription + + +To renew your subscription: + + + Prepare for renewal by reviewing your account. + Renew your GitLab.com subscription. + + +Prepare for renewal by reviewing your account + + +Before you renew your subscription: + + + Log in to the Customers Portal. + On the left sidebar, select Billing account settings. + Under Payment methods, verify or update the credit card on file. + Scroll down to the Company information section to verify or update the invoice contact details. + In GitLab, review your list of user accounts and remove inactive or unwanted users. + + +Renew or change a GitLab.com subscription + + +Starting 30 days before a subscription expires, GitLab notifies group owners +of the date of expiry with a banner in the GitLab user interface. +You can only renew your subscription 15 days before it is due to expire. +To renew your subscription: + + + Sign in to the Customers Portal and beneath your existing subscription, select Renew. +The Renew button displays only 15 days before a subscription expires. If there are more than 15 days before +the subscription expires, select Subscription actions ( ), then select Renew subscription to view the date when you can renew. + Review your renewal details and complete the payment process. + Select Confirm purchase. + + +Your updated subscription is applied to your namespace. The renewal period start date +is displayed on the group Billing page under Next subscription term start date. + +An invoice is generated for the renewal and available for viewing or download on the View invoices page. +If you have difficulty during the renewal process, contact the Support team for assistance. + +For details on upgrading your subscription tier, see +Upgrade your GitLab.com subscription tier. + +Automatic subscription renewal + + +When a subscription is set to auto-renew, it renews automatically on the expiration date without a gap in available service. Subscriptions purchased through the Customers Portal or GitLab.com are set to auto-renew by default. + +The number of seats is adjusted to fit the number of billable users in your group at the time of renewal, if that number is higher than the current subscription quantity. + +You can view and download your renewal invoice on the Customers Portal View invoices page. If your account has a saved credit card, the card is charged for the invoice amount. If we are unable to process a payment, or the auto-renewal fails for any other reason, you have 14 days to renew your subscription, after which your access is downgraded. + +Email notifications + + +15 days before a subscription automatically renews, an email is sent with information about the renewal. + + + If your credit card is expired, the email tells you how to update it. + If you have any outstanding overages, the email tells you to contact our Sales team. + If there are no issues, the email specifies the names and quantity of the products being renewed. The email also includes the total amount you owe. If your usage increases or decreases before renewal, this amount can change. + + +Enable or disable automatic subscription renewal + + +To view or change automatic subscription renewal (at the same tier as the +previous period), sign in to the Customers Portal, and: + + + If the subscription card displays Expires on DATE, your subscription is not set to automatically renew. To enable automatic renewal, in Subscription actions ( ), select Turn on auto-renew. + If the subscription card displays Autorenews on DATE, your subscription is set to automatically renew at the end of the subscription period. To cancel automatic renewal, in Subscription actions ( ), select Cancel subscription. + + +If you have difficulty during the renewal process, contact the +Support team for assistance. + +Renew for fewer seats + + +There are several options to renew a subscription for fewer seats, as long as the seat total is equal to or greater than the billable user quantity at the time of renewal: + + + +Turn off auto-renewal to avoid renewing at a higher seat quantity. + +Manually renew within 15 days of subscription renewal date, and specify the desired seat quantity. + Work with the Sales team to renew your subscription. + + +Add or change the contacts for your subscription + + +Contacts can renew a subscription, cancel a subscription, or transfer the subscription to a different namespace. + +For information about how to transfer ownership of the Customers Portal account to another person, see +Change profile owner information. + +To add a secondary contact for your subscription: + + + Ensure an account exists in the +Customers Portal for the user you want to add. + +Create a ticket with the Support team. Include any relevant material in your request. + + +Compute + + +Compute is the resource consumed when running pipelines +on GitLab instance runners. + +Refer to Compute usage +for more information. + +Purchase additional compute minutes + + +You can purchase additional compute minutes +for your personal or group namespace. Compute minutes are a one-time purchase, so they do not renew. + +Add-on subscription for additional Storage and Transfer + + + + note Free namespaces are subject to a 5 GB storage and 10 GB transfer soft limit. Once all storage is available to view in the usage quota workflow, GitLab will automatically enforce the namespace storage limit and the project limit is removed. This change is announced separately. The storage and transfer add-on can be purchased to increase the limits. + + +Projects have a free storage quota of 10 GB. To exceed this quota you must first +purchase one or more storage subscription units. Each unit provides 10 GB of additional +storage per namespace. A storage subscription is renewed annually. For more details, see +Usage Quotas. + +When the amount of purchased storage reaches zero, all projects over the free storage quota are +locked. Projects can only be unlocked by purchasing more storage subscription units. + +Purchase more storage and transfer + + +Prerequisites: + + + You must have the Owner role. + + +You can purchase a storage subscription for your personal or group namespace. + + + note Storage subscriptions renew automatically each year. +You can cancel the subscription to disable the automatic renewal. + + +For your personal namespace + + + + Sign in to GitLab.com. + From either your personal homepage or the group’s page, go to Settings > Usage Quotas. + For each read-only project, total by how much its Usage exceeds the free quota and purchased +storage. You must purchase the storage increment that exceeds this total. + Select Purchase more storage and you are taken to the Customers Portal. + Select Add new subscription. + Scroll to Purchase add-on subscriptions and select Buy storage subscription. + In the Subscription details section select the name of the user or group from the dropdown list. + Enter the desired quantity of storage packs. + In the Billing information section select the payment method from the dropdown list. + Select the Privacy Policy and Terms of Service checkbox. + Select Buy subscription. + Sign out of the Customers Portal. + Switch back to the GitLab.com tab and refresh the page. + + +The Purchased storage available total is incremented by the amount purchased. The read-only +state for all projects is removed, and their excess usage is deducted from the additional storage. + +For your group namespace + + + +History + + + + + +Introduced in GitLab 14.6. + + + + + + +If you’re using GitLab.com, you can purchase additional storage so your +pipelines aren’t blocked after you have used all your storage from your +main quota. You can find pricing for additional storage on the +GitLab Pricing page. + +To purchase additional storage for your group on GitLab.com: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > Usage Quotas. + Select Storage tab. + Select Purchase more storage. + Complete the details. + + +After your payment is processed, the extra storage is available for your group +namespace. + +To confirm the available storage, go to your group, and then select +Settings > Usage Quotas and select the Storage tab. + +The Purchased storage available total is incremented by the amount purchased. All locked +projects are unlocked and their excess usage is deducted from the additional storage. + +Enterprise Agile Planning + + +GitLab Enterprise Agile Planning is an add-on that helps bring non-technical users into the same +DevSecOps platform where engineers build, test, secure, and deploy code. +The add-on enables cross-team collaboration between developers and non-developers without having to +purchase full GitLab licenses for non-engineering team members. +With Enterprise Agile Planning seats, non-engineering team members can participate in planning +workflows, measure software delivery velocity and impact with Value Stream Analytics, and use +executive dashboards to drive organizational visibility. + +Purchase additional Enterprise Agile Planning seats + + +Contact your GitLab sales representative for more information. + +Contact Support + + +Learn more about: + + + The tiers of GitLab Support. + +Submit a request via the Support Portal. + + +We also encourage you to search our project trackers for known issues and +existing feature requests in the GitLab project. + +These issues are the best avenue for getting updates on specific product plans +and for communicating directly with the relevant GitLab team members. + +Troubleshooting + + +Credit card declined + + +If your credit card is declined when purchasing a GitLab subscription, possible reasons include: + + + The credit card details provided are incorrect. The most common cause for this is an incomplete or fake address. + The credit card account has insufficient funds. + You are using a virtual credit card and it has insufficient funds, or has expired. + The transaction exceeds the credit limit. + The transaction exceeds the credit card’s maximum transaction amount. + + +Check with your financial institution to confirm if any of these reasons apply. If they don’t +apply, contact GitLab Support. + +Unable to link subscription to namespace + + +If you cannot link a subscription to your namespace, ensure that you have the Owner role +for that namespace. + +No purchases listed in the Customers Portal account + + +To view purchases in the Customers Portal, in the Manage Purchases page, you +must be a contact in your organization for the subscription. + +To be added as a contact, create a ticket with the GitLab Support team. + + +" +how do i optimize my pipelines so that they do not cost so much money?,,"1. Pipeline efficiency + + + +Pipeline efficiency + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +CI/CD Pipelines are the fundamental building blocks for GitLab CI/CD. +Making pipelines more efficient helps you save developer time, which: + + + Speeds up your DevOps processes + Reduces costs + Shortens the development feedback loop + + +It’s common that new teams or projects start with slow and inefficient pipelines, +and improve their configuration over time through trial and error. A better process is +to use pipeline features that improve efficiency right away, and get a faster software +development lifecycle earlier. + +First ensure you are familiar with GitLab CI/CD fundamentals +and understand the quick start guide. + +Identify bottlenecks and common failures + + +The easiest indicators to check for inefficient pipelines are the runtimes of the jobs, +stages, and the total runtime of the pipeline itself. The total pipeline duration is +heavily influenced by the: + + + Size of the repository + Total number of stages and jobs. + Dependencies between jobs. + The “critical path”, which represents +the minimum and maximum pipeline duration. + + +Additional points to pay attention relate to GitLab Runners: + + + Availability of the runners and the resources they are provisioned with. + Build dependencies and their installation time. + +Container image size. + Network latency and slow connections. + + +Pipelines frequently failing unnecessarily also causes slowdowns in the development +lifecycle. You should look for problematic patterns with failed jobs: + + + Flaky unit tests which fail randomly, or produce unreliable test results. + Test coverage drops and code quality correlated to that behavior. + Failures that can be safely ignored, but that halt the pipeline instead. + Tests that fail at the end of a long pipeline, but could be in an earlier stage, +causing delayed feedback. + + +Pipeline analysis + + +Analyze the performance of your pipeline to find ways to improve efficiency. Analysis +can help identify possible blockers in the CI/CD infrastructure. This includes analyzing: + + + Job workloads. + Bottlenecks in the execution times. + The overall pipeline architecture. + + +It’s important to understand and document the pipeline workflows, and discuss possible +actions and changes. Refactoring pipelines may need careful interaction between teams +in the DevSecOps lifecycle. + +Pipeline analysis can help identify issues with cost efficiency. For example, runners +hosted with a paid cloud service may be provisioned with: + + + More resources than needed for CI/CD pipelines, wasting money. + Not enough resources, causing slow runtimes and wasting time. + + +Pipeline Insights + + +The Pipeline success and duration charts +give information about pipeline runtime and failed job counts. + +Tests like unit tests, integration tests, end-to-end tests, +code quality tests, and others +ensure that problems are automatically found by the CI/CD pipeline. There could be many +pipeline stages involved causing long runtimes. + +You can improve runtimes by running jobs that test different things in parallel, in +the same stage, reducing overall runtime. The downside is that you need more runners +running simultaneously to support the parallel jobs. + +The testing levels for GitLab +provide an example of a complex testing strategy with many components involved. + +Directed Acyclic Graphs (DAG) visualization + + +The Directed Acyclic Graph (DAG) visualization can help analyze the critical path in +the pipeline and understand possible blockers. + + + +Pipeline Monitoring + + +Global pipeline health is a key indicator to monitor along with job and pipeline duration. +CI/CD analytics give a visual +representation of pipeline health. + +Instance administrators have access to additional performance metrics and self-monitoring. + +You can fetch specific pipeline health metrics from the API. +External monitoring tools can poll the API and verify pipeline health or collect +metrics for long term SLA analytics. + +For example, the GitLab CI Pipelines Exporter +for Prometheus fetches metrics from the API and pipeline events. It can check branches in projects automatically +and get the pipeline status and duration. In combination with a Grafana dashboard, +this helps build an actionable view for your operations team. Metric graphs can also +be embedded into incidents making problem resolving easier. Additionally, it can also export metrics about jobs and environments. + +If you use the GitLab CI Pipelines Exporter, you should start with the example configuration. + + + +Alternatively, you can use a monitoring tool that can execute scripts, like +check_gitlab for example. + +Runner monitoring + + +You can also monitor CI runners on +their host systems, or in clusters like Kubernetes. This includes checking: + + + Disk and disk IO + CPU usage + Memory + Runner process resources + + +The Prometheus Node Exporter +can monitor runners on Linux hosts, and kube-state-metrics +runs in a Kubernetes cluster. + +You can also test GitLab Runner auto-scaling +with cloud providers, and define offline times to reduce costs. + +Dashboards and incident management + + +Use your existing monitoring tools and dashboards to integrate CI/CD pipeline monitoring, +or build them from scratch. Ensure that the runtime data is actionable and useful +in teams, and operations/SREs are able to identify problems early enough. +Incident management can help here too, +with embedded metric charts and all valuable details to analyze the problem. + +Storage usage + + +Review the storage use of the following to help analyze costs and efficiency: + + + +Job artifacts and their expire_in +configuration. If kept for too long, storage usage grows and could slow pipelines down. + +Container registry usage. + +Package registry usage. + + +Pipeline configuration + + +Make careful choices when configuring pipelines to speed up pipelines and reduce +resource usage. This includes making use of GitLab CI/CD’s built-in features that +make pipelines run faster and more efficiently. + +Reduce how often jobs run + + +Try to find which jobs don’t need to run in all situations, and use pipeline configuration +to stop them from running: + + + Use the interruptible keyword to stop old pipelines +when they are superseded by a newer pipeline. + Use rules to skip tests that aren’t needed. For example, +skip backend tests when only the frontend code is changed. + Run non-essential scheduled pipelines less frequently. + + +Fail fast + + +Ensure that errors are detected early in the CI/CD pipeline. A job that takes a very long +time to complete keeps a pipeline from returning a failed status until the job completes. + +Design pipelines so that jobs that can fail fast +run earlier. For example, add an early stage and move the syntax, style linting, +Git commit message verification, and similar jobs in there. + +Decide if it’s important for long jobs to run early, before fast feedback from +faster jobs. The initial failures may make it clear that the rest of the pipeline +shouldn’t run, saving pipeline resources. + +Directed Acyclic Graphs (DAG) + + +In a basic configuration, jobs always wait for all other jobs in earlier stages to complete +before running. This is the simplest configuration, but it’s also the slowest in most +cases. Directed Acyclic Graphs and +parent/child pipelines are more flexible and can +be more efficient, but can also make pipelines harder to understand and analyze. + +Caching + + +Another optimization method is to cache dependencies. If your +dependencies change rarely, like NodeJS /node_modules, +caching can make pipeline execution much faster. + +You can use cache:when to cache downloaded dependencies +even when a job fails. + +Docker Images + + +Downloading and initializing Docker images can be a large part of the overall runtime +of jobs. + +If a Docker image is slowing down job execution, analyze the base image size and network +connection to the registry. If GitLab is running in the cloud, look for a cloud container +registry offered by the vendor. In addition to that, you can make use of the +GitLab container registry which can be accessed +by the GitLab instance faster than other registries. + +Optimize Docker images + + +Build optimized Docker images because large Docker images use up a lot of space and +take a long time to download with slower connection speeds. If possible, avoid using +one large image for all jobs. Use multiple smaller images, each for a specific task, +that download and run faster. + +Try to use custom Docker images with the software pre-installed. It’s usually much +faster to download a larger pre-configured image than to use a common image and install +software on it each time. The Docker Best practices for writing Dockerfiles article +has more information about building efficient Docker images. + +Methods to reduce Docker image size: + + + Use a small base image, for example debian-slim. + Do not install convenience tools such as vim or curl if they aren’t strictly needed. + Create a dedicated development image. + Disable man pages and documentation installed by packages to save space. + Reduce the RUN layers and combine software installation steps. + Use multi-stage builds +to merge multiple Dockerfiles that use the builder pattern into one Dockerfile, which can reduce image size. + If using apt, add --no-install-recommends to avoid unnecessary packages. + Clean up caches and files that are no longer needed at the end. For example +rm -rf /var/lib/apt/lists/* for Debian and Ubuntu, or yum clean all for RHEL and CentOS. + Use tools like dive or DockerSlim +to analyze and shrink images. + + +To simplify Docker image management, you can create a dedicated group for managing +Docker images and test, build and publish them with CI/CD pipelines. + +Test, document, and learn + + +Improving pipelines is an iterative process. Make small changes, monitor the effect, +then iterate again. Many small improvements can add up to a large increase in pipeline +efficiency. + +It can help to document the pipeline design and architecture. You can do this with +Mermaid charts in Markdown directly in the GitLab +repository. + +Document CI/CD pipeline problems and incidents in issues, including research done +and solutions found. This helps onboarding new team members, and also helps +identify recurring problems with CI pipeline efficiency. + +Related topics + + + + CI Monitoring Webcast Slides + GitLab.com Monitoring Handbook + Buildings dashboards for operational visibility + + + +2. Compute quota + + + +Compute quota + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed + + +History + + + + Renamed from “CI/CD minutes” to “compute quota” or “compute minutes” in GitLab 16.1. + + + + + + + note The term CI/CD minutes is being renamed to compute minutes. During this transition, you might see references in the UI and documentation to CI/CD minutes, CI minutes, pipeline minutes, CI pipeline minutes, pipeline minutes quota, compute credits, compute units, and compute minutes. For more information, see epic 2150. + + +Administrators can limit the amount of time that projects can use to run jobs on +instance runners each month. This limit +is tracked with a compute quota. + +By default, one minute of execution time by a single job uses +one compute minute. The total execution time for a pipeline is +the sum of all its jobs’ durations. +Jobs can run concurrently, so the total usage can be higher than the +end-to-end duration of a pipeline. + +On GitLab.com: + + + Compute quotas are enabled for all projects, but certain +projects consume compute minutes at a slower rate. + The base monthly compute quota for a GitLab.com namespace +is determined by its license tier. + You can purchase additional compute minutes +if you need more than the amount of compute in your monthly quota. + + +On self-managed GitLab instances: + + + Compute quotas are disabled by default. + When enabled, compute quotas apply to private projects only. + Administrators can assign more compute minutes +if a namespace uses all its monthly quota. + + +Trigger jobs do not execute on runners, so they do not +consume compute minutes, even when using strategy:depend +to wait for the downstream pipeline status. +The triggered downstream pipeline consumes compute minutes the same as other pipelines. + +Project runners are not subject to a compute quota. + +Set the compute quota for all namespaces + + + +History + + + + + +Moved to GitLab Premium in 13.9. + + + + + + +By default, GitLab instances do not have a compute quota. +The default value for the quota is 0, which is unlimited. +However, you can change this default value. + +Prerequisites: + + + You must be a GitLab administrator. + + +To change the default quota that applies to all namespaces: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Continuous Integration and Deployment. + In the Compute quota box, enter a limit. + Select Save changes. + + +If a quota is already defined for a specific namespace, this value does not change that quota. + +Set the compute quota for a specific namespace + + + +History + + + + + +Moved to GitLab Premium in 13.9. + + + + + + +You can override the global value and set a compute quota +for a specific namespace. + +Prerequisites: + + + You must be a GitLab administrator. + + +To set a compute quota for a namespace: + + + On the left sidebar, at the bottom, select Admin Area. + Select Overview > Groups. + For the group you want to update, select Edit. + In the Compute quota box, enter the maximum number of compute minutes. + Select Save changes. + + +You can also use the update group API or the +update user API instead. + + + note You can set a compute quota for only top-level groups or user namespaces. +If you set a quota for a subgroup, it is not used. + + +View compute usage + + +Prerequisites: + + + You must have access to the build to view the total usage and quota summary for a namespace associated with a build. + Access to Usage Quotas page is based on your role in the associated namespace or group. + + +View Usage Quota Reports for a group + + + +History + + + + + Displaying instance runners duration per project introduced in GitLab 15.0. + + + + + + +Prerequisites: + + + You must have the Owner role for the group. + + +To view compute usage for your group: + + + On the left sidebar, select Search or go to and +find your group. The group must not be a subgroup. + Select Settings > Usage Quotas. + Select the Pipelines tab. + + +The projects list shows projects with compute usage or instance runners usage +in the current month only. The list includes all projects in the namespace and its +subgroups, sorted in descending order of compute usage. + +View Usage Quota reports for a personal namespace + + + +History + + + + + Displaying instance runners duration introduced in GitLab 15.0. + + + + + + +Prerequisites: + + + The namespace must be your personal namespace. + + +You can view the compute usage for a personal namespace: + + + On the left sidebar, select your avatar. + Select Edit profile. + On the left sidebar, select Usage Quotas. + + +The projects list shows personal projects +with compute usage or instance runners usage in the current month only. The list +is sorted in descending order of compute usage. + +Purchase additional compute minutes + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com + +If you’re using GitLab SaaS, you can purchase additional packs of compute minutes. +These additional compute minutes: + + + Are used only after the monthly quota included in your subscription runs out. + Are carried over to the next month, if any remain at the end of the month. + Are valid for 12 months from date of purchase or until all compute minutes are consumed, whichever comes first. Expiry of compute minutes is not enforced. + + +For example, with a GitLab SaaS Premium license: + + + You have 10,000 monthly compute minutes. + You purchase an additional 5,000 compute minutes. + Your total limit is 15,000 compute minutes. + + +If you use 13,000 compute minutes during the month, the next month your additional compute minutes become +2,000. If you use 9,000 compute minutes during the month, your additional compute minutes remain the same. + +Additional compute minutes bought on a trial subscription are available after the trial ends or upgrading to a paid plan. + +You can find pricing for additional compute minutes on the +GitLab Pricing page. + +Purchase compute minutes for a group + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com + +Prerequisites: + + + You must have the Owner role for the group. + + +You can purchase additional compute minutes for your group. +You cannot transfer purchased compute minutes from one group to another, +so be sure to select the correct group. + + + On the left sidebar, select Search or go to and find your group. + Select Settings > Usage Quotas. + Select Pipelines. + Select Buy additional compute minutes. + Complete the details of the transaction. + + +After your payment is processed, the additional compute minutes are added to your group +namespace. + +Purchase compute minutes for a personal namespace + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com + +Prerequisites: + + + The namespace must be your personal namespace. + + +To purchase additional compute minutes for your personal namespace: + + + On the left sidebar, select your avatar. + Select Edit profile. + On the left sidebar, select Usage Quotas. + Select Buy additional compute minutes. GitLab redirects you to the Customers Portal. + Locate the subscription card that’s linked to your personal namespace on GitLab SaaS, select Buy more compute minutes, +and complete the details of the transaction. + + +After your payment is processed, the additional compute minutes are added to your personal +namespace. + +How compute usage is calculated + + +GitLab uses this formula to calculate the compute usage of a job: + +Job duration * Cost factor + + + + +Job duration: The time, in seconds, that a job took to run on a instance runner, +not including time spent in the created or pending statuses. + +Cost factor: A number based on project visibility. + + +The value is transformed into compute minutes and added to the count of used units +in the job’s top-level namespace. + +For example, if a user alice runs a pipeline: + + + Under the gitlab-org namespace, the compute minutes used by each job in the pipeline are +added to the overall consumption for the gitlab-org namespace, not the alice namespace. + For one of the personal projects in their namespace, the compute minutes are added +to the overall consumption for the alice namespace. + + +The compute used by one pipeline is the total compute minutes used by all the jobs +that ran in the pipeline. Jobs can run concurrently, so the total compute usage +can be higher than the end-to-end duration of a pipeline. + +Cost factor + + +The cost factors for jobs running on instance runners on GitLab.com are: + + + +1 for internal, public, and private projects. + Exceptions for public projects: + + +0.5 for projects in the GitLab for Open Source program. + +0.008 for forks of projects in the GitLab for Open Source program. For every 125 minutes of job execution time, +you use 1 compute minute. + + + Discounted dynamically for community contributions to GitLab projects. + + +The cost factors on self-managed instances are: + + + +0 for public projects, so they do not consume compute minutes. + +1 for internal and private projects. + + +Cost factor for community contributions to GitLab projects + + +Community contributors can use up to 300,000 minutes on instance runners when contributing to open source projects +maintained by GitLab. The maximum of 300,000 minutes would only be possible if contributing exclusively to projects part of the GitLab product. The total number of minutes available on instance runners +is reduced by the compute minutes used by pipelines from other projects. +The 300,000 minutes applies to all SaaS tiers, and the cost factor calculation is: + + + Monthly compute quota / 300,000 job duration minutes = Cost factor + + +For example, with a monthly compute quota of 10,000 in the Premium tier: + + + 10,000 / 300,000 = 0.03333333333 cost factor. + + +For this reduced cost factor: + + + The merge request source project must be a fork of a GitLab-maintained project, +such as gitlab-com/www-gitlab-com, +or gitlab-org/gitlab. + The merge request target project must be the fork’s parent project. + The pipeline must be a merge request, merged results, or merge train pipeline. + + +GitLab administrators can add a namespace to the reduced cost factor +with a flag named ci_minimal_cost_factor_for_gitlab_namespaces. + +Additional costs on GitLab SaaS + + +GitLab SaaS runners have different cost factors, depending on the runner type (Linux, Windows, macOS) and the virtual machine configuration. + + + + + GitLab SaaS runner type + Machine Size + Cost factor + + + + + Linux OS amd64 + small + 1 + + + Linux OS amd64 + medium + 2 + + + Linux OS amd64 + large + 3 + + + Linux OS amd64 + xlarge + 6 + + + Linux OS amd64 + 2xlarge + 12 + + + Linux OS amd64 + GPU-enabled + +medium, GPU standard + 7 + + + macOS M1 + medium + 6 (Status: Beta) + + + Windows Server + - + 1 (Status: Beta) + + + + +Monthly reset of compute usage + + +On the first day of each calendar month, the accumulated compute usage is reset to 0 +for all namespaces that use instance runners. This means your full quota is available, and +calculations start again from 0. + +For example, if you have a monthly quota of 10,000 compute minutes: + + + On April 1, you have 10,000 compute minutes. + During April, you use only 6,000 of the 10,000 compute minutes. + On May 1, the accumulated compute usage resets to 0, and you have 10,000 compute minutes to use again +during May. + + +Usage data for the previous month is kept to show historical view of the consumption over time. + +Monthly rollover of purchased compute minutes + + +If you purchase additional compute minutes and don’t use the full amount, the remaining amount rolls over to +the next month. + +For example: + + + On April 1, you purchase 5,000 additional compute minutes. + During April, you use only 3,000 of the 5,000 additional compute minutes. + On May 1, the unused compute minutes roll over, so you have 2,000 additional compute minutes available for May. + + +Additional compute minutes are a one-time purchase and do not renew or refresh each month. + +What happens when you exceed the quota + + +When the compute quota is used for the current month, GitLab stops +processing new jobs. + + + Any non-running job that should be picked by instance runners is automatically dropped. + Any job being retried is automatically dropped. + Any running job can be dropped at any point if the overall namespace usage goes over-quota +by a grace period. + + +The grace period for running jobs is 1,000 compute minutes. + +Jobs on project runners are not affected by the compute quota. + +GitLab SaaS usage notifications + + +On GitLab SaaS an in-app banner is displayed and an email notification sent to the namespace owners when: + + + The remaining compute minutes is below 30% of the quota. + The remaining compute minutes is below 5% of the quota. + All the compute quota has been used. + + +Special quota limits + + +In some cases, the quota limit is replaced by one of the following labels: + + + +Unlimited: For namespaces with unlimited compute quota. + +Not supported: For namespaces where active instance runners are not enabled. + + +Reduce compute quota usage + + +If your project consumes too much compute quota, there are some strategies you can +use to reduce your usage: + + + If you are using project mirrors, ensure that pipelines for mirror updates +is disabled. + Reduce the frequency of scheduled pipelines. + +Skip pipelines when not needed. + Use interruptible jobs which can be auto-canceled +if a new pipeline starts. + If a job doesn’t have to run in every pipeline, use rules +to make it only run when it’s needed. + +Use private runners for some jobs. + If you are working from a fork and you submit a merge request to the parent project, +you can ask a maintainer to run a pipeline in the parent project. + + +If you manage an open source project, these improvements can also reduce compute quota +consumption for contributor fork projects, enabling more contributions. + +See our pipeline efficiency guide for more details. + +Reset compute usage + + + +Tier: Premium, Ultimate +Offering: Self-managed, GitLab Dedicated + +An administrator can reset the compute usage for a namespace for the current month. + +Reset usage for a personal namespace + + + + Find the user in the Admin Area. + Select Edit. + In Limits, select Reset compute usage. + + +Reset usage for a group namespace + + + + Find the group in the Admin Area. + Select Edit. + In Permissions and group features, select Reset compute usage. + + + +" +can i fetch a job by name using the api?,,"1. Jobs API + + + +Jobs API + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +List project jobs + + + +History + + + + + Support for keyset pagination introduced in GitLab 15.9. + + + + + + +Get a list of jobs in a project. Jobs are sorted in descending order of their IDs. + +By default, this request returns 20 results at a time because the API results are paginated + + + note This endpoint supports both offset-based and keyset-based pagination, but keyset-based +pagination is strongly recommended when requesting consecutive pages of results. + + +GET /projects/:id/jobs + + + + + + Attribute + Type + Required + Description + + + + + id + integer/string + Yes + ID or URL-encoded path of the project. + + + scope + string or array of strings + No + Scope of jobs to show. Either one of or an array of the following: created, pending, running, failed, success, canceled, skipped, waiting_for_resource, or manual. All jobs are returned if scope is not provided. + + + + +curl --globoff --header ""PRIVATE-TOKEN: "" ""https://gitlab.example.com/api/v4/projects/1/jobs?scope[]=pending&scope[]=running"" + + +Example of response + +[ + { + ""commit"": { + ""author_email"": ""admin@example.com"", + ""author_name"": ""Administrator"", + ""created_at"": ""2015-12-24T16:51:14.000+01:00"", + ""id"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""message"": ""Test the CI integration."", + ""short_id"": ""0ff3ae19"", + ""title"": ""Test the CI integration."" + }, + ""coverage"": null, + ""archived"": false, + ""allow_failure"": false, + ""created_at"": ""2015-12-24T15:51:21.802Z"", + ""started_at"": ""2015-12-24T17:54:27.722Z"", + ""finished_at"": ""2015-12-24T17:54:27.895Z"", + ""erased_at"": null, + ""duration"": 0.173, + ""queued_duration"": 0.010, + ""artifacts_file"": { + ""filename"": ""artifacts.zip"", + ""size"": 1000 + }, + ""artifacts"": [ + {""file_type"": ""archive"", ""size"": 1000, ""filename"": ""artifacts.zip"", ""file_format"": ""zip""}, + {""file_type"": ""metadata"", ""size"": 186, ""filename"": ""metadata.gz"", ""file_format"": ""gzip""}, + {""file_type"": ""trace"", ""size"": 1500, ""filename"": ""job.log"", ""file_format"": ""raw""}, + {""file_type"": ""junit"", ""size"": 750, ""filename"": ""junit.xml.gz"", ""file_format"": ""gzip""} + ], + ""artifacts_expire_at"": ""2016-01-23T17:54:27.895Z"", + ""tag_list"": [ + ""docker runner"", ""ubuntu18"" + ], + ""id"": 7, + ""name"": ""teaspoon"", + ""pipeline"": { + ""id"": 6, + ""project_id"": 1, + ""ref"": ""main"", + ""sha"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""status"": ""pending"" + }, + ""ref"": ""main"", + ""runner"": null, + ""stage"": ""test"", + ""status"": ""failed"", + ""failure_reason"": ""script_failure"", + ""tag"": false, + ""web_url"": ""https://example.com/foo/bar/-/jobs/7"", + ""project"": { + ""ci_job_token_scope_enabled"": false + }, + ""user"": { + ""id"": 1, + ""name"": ""Administrator"", + ""username"": ""root"", + ""state"": ""active"", + ""avatar_url"": ""http://www.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=80&d=identicon"", + ""web_url"": ""http://gitlab.dev/root"", + ""created_at"": ""2015-12-21T13:14:24.077Z"", + ""bio"": null, + ""location"": null, + ""public_email"": """", + ""skype"": """", + ""linkedin"": """", + ""twitter"": """", + ""website_url"": """", + ""organization"": """" + } + }, + { + ""commit"": { + ""author_email"": ""admin@example.com"", + ""author_name"": ""Administrator"", + ""created_at"": ""2015-12-24T16:51:14.000+01:00"", + ""id"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""message"": ""Test the CI integration."", + ""short_id"": ""0ff3ae19"", + ""title"": ""Test the CI integration."" + }, + ""coverage"": null, + ""archived"": false, + ""allow_failure"": false, + ""created_at"": ""2015-12-24T15:51:21.727Z"", + ""started_at"": ""2015-12-24T17:54:24.729Z"", + ""finished_at"": ""2015-12-24T17:54:24.921Z"", + ""erased_at"": null, + ""duration"": 0.192, + ""queued_duration"": 0.023, + ""artifacts_expire_at"": ""2016-01-23T17:54:24.921Z"", + ""tag_list"": [ + ""docker runner"", ""win10-2004"" + ], + ""id"": 6, + ""name"": ""rspec:other"", + ""pipeline"": { + ""id"": 6, + ""project_id"": 1, + ""ref"": ""main"", + ""sha"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""status"": ""pending"" + }, + ""ref"": ""main"", + ""artifacts"": [], + ""runner"": null, + ""stage"": ""test"", + ""status"": ""failed"", + ""failure_reason"": ""stuck_or_timeout_failure"", + ""tag"": false, + ""web_url"": ""https://example.com/foo/bar/-/jobs/6"", + ""project"": { + ""ci_job_token_scope_enabled"": false + }, + ""user"": { + ""id"": 1, + ""name"": ""Administrator"", + ""username"": ""root"", + ""state"": ""active"", + ""avatar_url"": ""http://www.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=80&d=identicon"", + ""web_url"": ""http://gitlab.dev/root"", + ""created_at"": ""2015-12-21T13:14:24.077Z"", + ""bio"": null, + ""location"": null, + ""public_email"": """", + ""skype"": """", + ""linkedin"": """", + ""twitter"": """", + ""website_url"": """", + ""organization"": """" + } + } +] + + +List pipeline jobs + + +Get a list of jobs for a pipeline. + +By default, this request returns 20 results at a time because the API results are paginated + +This endpoint: + + + +Returns data for any pipeline including child pipelines. + Does not return retried jobs in the response by default. + Sorts jobs by ID in descending order (newest first). + + +GET /projects/:id/pipelines/:pipeline_id/jobs + + + + + + Attribute + Type + Required + Description + + + + + id + integer/string + Yes + ID or URL-encoded path of the project. + + + pipeline_id + integer + Yes + ID of a pipeline. Can also be obtained in CI jobs via the predefined CI variable CI_PIPELINE_ID. + + + include_retried + boolean + No + Include retried jobs in the response. Defaults to false. Introduced in GitLab 13.9. + + + scope + string or array of strings + No + Scope of jobs to show. Either one of or an array of the following: created, pending, running, failed, success, canceled, skipped, waiting_for_resource, or manual. All jobs are returned if scope is not provided. + + + + +curl --header ""PRIVATE-TOKEN: "" ""https://gitlab.example.com/api/v4/projects/1/pipelines/6/jobs?scope[]=pending&scope[]=running"" + + +Example of response + +[ + { + ""commit"": { + ""author_email"": ""admin@example.com"", + ""author_name"": ""Administrator"", + ""created_at"": ""2015-12-24T16:51:14.000+01:00"", + ""id"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""message"": ""Test the CI integration."", + ""short_id"": ""0ff3ae19"", + ""title"": ""Test the CI integration."" + }, + ""coverage"": null, + ""archived"": false, + ""allow_failure"": false, + ""created_at"": ""2015-12-24T15:51:21.727Z"", + ""started_at"": ""2015-12-24T17:54:24.729Z"", + ""finished_at"": ""2015-12-24T17:54:24.921Z"", + ""erased_at"": null, + ""duration"": 0.192, + ""queued_duration"": 0.023, + ""artifacts_expire_at"": ""2016-01-23T17:54:24.921Z"", + ""tag_list"": [ + ""docker runner"", ""ubuntu18"" + ], + ""id"": 6, + ""name"": ""rspec:other"", + ""pipeline"": { + ""id"": 6, + ""project_id"": 1, + ""ref"": ""main"", + ""sha"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""status"": ""pending"" + }, + ""ref"": ""main"", + ""artifacts"": [], + ""runner"": null, + ""stage"": ""test"", + ""status"": ""failed"", + ""failure_reason"": ""stuck_or_timeout_failure"", + ""tag"": false, + ""web_url"": ""https://example.com/foo/bar/-/jobs/6"", + ""project"": { + ""ci_job_token_scope_enabled"": false + }, + ""user"": { + ""id"": 1, + ""name"": ""Administrator"", + ""username"": ""root"", + ""state"": ""active"", + ""avatar_url"": ""http://www.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=80&d=identicon"", + ""web_url"": ""http://gitlab.dev/root"", + ""created_at"": ""2015-12-21T13:14:24.077Z"", + ""bio"": null, + ""location"": null, + ""public_email"": """", + ""skype"": """", + ""linkedin"": """", + ""twitter"": """", + ""website_url"": """", + ""organization"": """" + } + }, + { + ""commit"": { + ""author_email"": ""admin@example.com"", + ""author_name"": ""Administrator"", + ""created_at"": ""2015-12-24T16:51:14.000+01:00"", + ""id"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""message"": ""Test the CI integration."", + ""short_id"": ""0ff3ae19"", + ""title"": ""Test the CI integration."" + }, + ""coverage"": null, + ""archived"": false, + ""allow_failure"": false, + ""created_at"": ""2015-12-24T15:51:21.802Z"", + ""started_at"": ""2015-12-24T17:54:27.722Z"", + ""finished_at"": ""2015-12-24T17:54:27.895Z"", + ""erased_at"": null, + ""duration"": 0.173, + ""queued_duration"": 0.023, + ""artifacts_file"": { + ""filename"": ""artifacts.zip"", + ""size"": 1000 + }, + ""artifacts"": [ + {""file_type"": ""archive"", ""size"": 1000, ""filename"": ""artifacts.zip"", ""file_format"": ""zip""}, + {""file_type"": ""metadata"", ""size"": 186, ""filename"": ""metadata.gz"", ""file_format"": ""gzip""}, + {""file_type"": ""trace"", ""size"": 1500, ""filename"": ""job.log"", ""file_format"": ""raw""}, + {""file_type"": ""junit"", ""size"": 750, ""filename"": ""junit.xml.gz"", ""file_format"": ""gzip""} + ], + ""artifacts_expire_at"": ""2016-01-23T17:54:27.895Z"", + ""tag_list"": [ + ""docker runner"", ""ubuntu18"" + ], + ""id"": 7, + ""name"": ""teaspoon"", + ""pipeline"": { + ""id"": 6, + ""project_id"": 1, + ""ref"": ""main"", + ""sha"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""status"": ""pending"" + }, + ""ref"": ""main"", + ""runner"": null, + ""stage"": ""test"", + ""status"": ""failed"", + ""failure_reason"": ""script_failure"", + ""tag"": false, + ""web_url"": ""https://example.com/foo/bar/-/jobs/7"", + ""project"": { + ""ci_job_token_scope_enabled"": false + }, + ""user"": { + ""id"": 1, + ""name"": ""Administrator"", + ""username"": ""root"", + ""state"": ""active"", + ""avatar_url"": ""http://www.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=80&d=identicon"", + ""web_url"": ""http://gitlab.dev/root"", + ""created_at"": ""2015-12-21T13:14:24.077Z"", + ""bio"": null, + ""location"": null, + ""public_email"": """", + ""skype"": """", + ""linkedin"": """", + ""twitter"": """", + ""website_url"": """", + ""organization"": """" + } + } +] + + +List pipeline trigger jobs + + +Get a list of trigger jobs for a pipeline. + +GET /projects/:id/pipelines/:pipeline_id/bridges + + + + + + Attribute + Type + Required + Description + + + + + id + integer/string + Yes + ID or URL-encoded path of the project. + + + pipeline_id + integer + Yes + ID of a pipeline. + + + scope + string or array of strings + No + Scope of jobs to show. Either one of or an array of the following: created, pending, running, failed, success, canceled, skipped, waiting_for_resource, or manual. All jobs are returned if scope is not provided. + + + + +curl --header ""PRIVATE-TOKEN: "" ""https://gitlab.example.com/api/v4/projects/1/pipelines/6/bridges?scope[]=pending&scope[]=running"" + + +Example of response + +[ + { + ""commit"": { + ""author_email"": ""admin@example.com"", + ""author_name"": ""Administrator"", + ""created_at"": ""2015-12-24T16:51:14.000+01:00"", + ""id"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""message"": ""Test the CI integration."", + ""short_id"": ""0ff3ae19"", + ""title"": ""Test the CI integration."" + }, + ""coverage"": null, + ""archived"": false, + ""allow_failure"": false, + ""created_at"": ""2015-12-24T15:51:21.802Z"", + ""started_at"": ""2015-12-24T17:54:27.722Z"", + ""finished_at"": ""2015-12-24T17:58:27.895Z"", + ""erased_at"": null, + ""duration"": 240, + ""queued_duration"": 0.123, + ""id"": 7, + ""name"": ""teaspoon"", + ""pipeline"": { + ""id"": 6, + ""project_id"": 1, + ""ref"": ""main"", + ""sha"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""status"": ""pending"", + ""created_at"": ""2015-12-24T15:50:16.123Z"", + ""updated_at"": ""2015-12-24T18:00:44.432Z"", + ""web_url"": ""https://example.com/foo/bar/pipelines/6"" + }, + ""ref"": ""main"", + ""stage"": ""test"", + ""status"": ""pending"", + ""tag"": false, + ""web_url"": ""https://example.com/foo/bar/-/jobs/7"", + ""project"": { + ""ci_job_token_scope_enabled"": false + }, + ""user"": { + ""id"": 1, + ""name"": ""Administrator"", + ""username"": ""root"", + ""state"": ""active"", + ""avatar_url"": ""http://www.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=80&d=identicon"", + ""web_url"": ""http://gitlab.dev/root"", + ""created_at"": ""2015-12-21T13:14:24.077Z"", + ""bio"": null, + ""location"": null, + ""public_email"": """", + ""skype"": """", + ""linkedin"": """", + ""twitter"": """", + ""website_url"": """", + ""organization"": """" + }, + ""downstream_pipeline"": { + ""id"": 5, + ""sha"": ""f62a4b2fb89754372a346f24659212eb8da13601"", + ""ref"": ""main"", + ""status"": ""pending"", + ""created_at"": ""2015-12-24T17:54:27.722Z"", + ""updated_at"": ""2015-12-24T17:58:27.896Z"", + ""web_url"": ""https://example.com/diaspora/diaspora-client/pipelines/5"" + } + } +] + + +Get job token’s job + + +Retrieve the job that generated a job token. + +GET /job + + +Examples (must run as part of the script section of a CI/CD job): + +curl --header ""Authorization: Bearer $CI_JOB_TOKEN"" ""${CI_API_V4_URL}/job"" +curl --header ""JOB-TOKEN: $CI_JOB_TOKEN"" ""${CI_API_V4_URL}/job"" +curl ""${CI_API_V4_URL}/job?job_token=$CI_JOB_TOKEN"" + + +Example of response + +{ + ""commit"": { + ""author_email"": ""admin@example.com"", + ""author_name"": ""Administrator"", + ""created_at"": ""2015-12-24T16:51:14.000+01:00"", + ""id"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""message"": ""Test the CI integration."", + ""short_id"": ""0ff3ae19"", + ""title"": ""Test the CI integration."" + }, + ""coverage"": null, + ""archived"": false, + ""allow_failure"": false, + ""created_at"": ""2015-12-24T15:51:21.880Z"", + ""started_at"": ""2015-12-24T17:54:30.733Z"", + ""finished_at"": ""2015-12-24T17:54:31.198Z"", + ""erased_at"": null, + ""duration"": 0.465, + ""queued_duration"": 0.123, + ""artifacts_expire_at"": ""2016-01-23T17:54:31.198Z"", + ""id"": 8, + ""name"": ""rubocop"", + ""pipeline"": { + ""id"": 6, + ""project_id"": 1, + ""ref"": ""main"", + ""sha"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""status"": ""pending"" + }, + ""ref"": ""main"", + ""artifacts"": [], + ""runner"": null, + ""stage"": ""test"", + ""status"": ""failed"", + ""failure_reason"": ""script_failure"", + ""tag"": false, + ""web_url"": ""https://example.com/foo/bar/-/jobs/8"", + ""project"": { + ""ci_job_token_scope_enabled"": false + }, + ""user"": { + ""id"": 1, + ""name"": ""Administrator"", + ""username"": ""root"", + ""state"": ""active"", + ""avatar_url"": ""http://www.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=80&d=identicon"", + ""web_url"": ""http://gitlab.dev/root"", + ""created_at"": ""2015-12-21T13:14:24.077Z"", + ""bio"": null, + ""location"": null, + ""public_email"": """", + ""skype"": """", + ""linkedin"": """", + ""twitter"": """", + ""website_url"": """", + ""organization"": """" + } +} + + +Get GitLab agent by CI_JOB_TOKEN + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +Retrieve the job that generated the CI_JOB_TOKEN, along with a list of allowed +agents. + +GET /job/allowed_agents + + +Supported attributes: + + + + + Attribute + Type + Required + Description + + + + + CI_JOB_TOKEN + string + Yes + Token value associated with the GitLab-provided CI_JOB_TOKEN variable. + + + + +Example request: + +curl --header ""JOB-TOKEN: "" ""https://gitlab.example.com/api/v4/job/allowed_agents"" +curl ""https://gitlab.example.com/api/v4/job/allowed_agents?job_token="" + + +Example response: + +{ + ""allowed_agents"": [ + { + ""id"": 1, + ""config_project"": { + ""id"": 1, + ""description"": null, + ""name"": ""project1"", + ""name_with_namespace"": ""John Doe2 / project1"", + ""path"": ""project1"", + ""path_with_namespace"": ""namespace1/project1"", + ""created_at"": ""2022-11-16T14:51:50.579Z"" + } + } + ], + ""job"": { + ""id"": 1 + }, + ""pipeline"": { + ""id"": 2 + }, + ""project"": { + ""id"": 1, + ""groups"": [ + { + ""id"": 1 + }, + { + ""id"": 2 + }, + { + ""id"": 3 + } + ] + }, + ""user"": { + ""id"": 2, + ""name"": ""John Doe3"", + ""username"": ""user2"", + ""state"": ""active"", + ""avatar_url"": ""https://www.gravatar.com/avatar/10fc7f102b"", + ""web_url"": ""http://localhost/user2"" + } +} + + +Get a single job + + +Get a single job of a project + +GET /projects/:id/jobs/:job_id + + + + + + Attribute + Type + Required + Description + + + + + id + integer/string + Yes + ID or URL-encoded path of the project. + + + job_id + integer + Yes + ID of a job. + + + + +curl --header ""PRIVATE-TOKEN: "" ""https://gitlab.example.com/api/v4/projects/1/jobs/8"" + + +Example of response + +{ + ""commit"": { + ""author_email"": ""admin@example.com"", + ""author_name"": ""Administrator"", + ""created_at"": ""2015-12-24T16:51:14.000+01:00"", + ""id"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""message"": ""Test the CI integration."", + ""short_id"": ""0ff3ae19"", + ""title"": ""Test the CI integration."" + }, + ""coverage"": null, + ""archived"": false, + ""allow_failure"": false, + ""created_at"": ""2015-12-24T15:51:21.880Z"", + ""started_at"": ""2015-12-24T17:54:30.733Z"", + ""finished_at"": ""2015-12-24T17:54:31.198Z"", + ""erased_at"": null, + ""duration"": 0.465, + ""queued_duration"": 0.010, + ""artifacts_expire_at"": ""2016-01-23T17:54:31.198Z"", + ""tag_list"": [ + ""docker runner"", ""macos-10.15"" + ], + ""id"": 8, + ""name"": ""rubocop"", + ""pipeline"": { + ""id"": 6, + ""project_id"": 1, + ""ref"": ""main"", + ""sha"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""status"": ""pending"" + }, + ""ref"": ""main"", + ""artifacts"": [], + ""runner"": null, + ""stage"": ""test"", + ""status"": ""failed"", + ""tag"": false, + ""web_url"": ""https://example.com/foo/bar/-/jobs/8"", + ""project"": { + ""ci_job_token_scope_enabled"": false + }, + ""user"": { + ""id"": 1, + ""name"": ""Administrator"", + ""username"": ""root"", + ""state"": ""active"", + ""avatar_url"": ""http://www.gravatar.com/avatar/e64c7d89f26bd1972efa854d13d7dd61?s=80&d=identicon"", + ""web_url"": ""http://gitlab.dev/root"", + ""created_at"": ""2015-12-21T13:14:24.077Z"", + ""bio"": null, + ""location"": null, + ""public_email"": """", + ""skype"": """", + ""linkedin"": """", + ""twitter"": """", + ""website_url"": """", + ""organization"": """" + } +} + + +Get a log file + + +Get a log (trace) of a specific job of a project: + +GET /projects/:id/jobs/:job_id/trace + + + + + + Attribute + Type + Required + Description + + + + + id + integer/string + Yes + ID or URL-encoded path of the project. + + + job_id + integer + Yes + ID of a job. + + + + +curl --location --header ""PRIVATE-TOKEN: "" ""https://gitlab.example.com/api/v4/projects/1/jobs/8/trace"" + + +Possible response status codes: + + + + + Status + Description + + + + + 200 + Serves the log file + + + 404 + Job not found or no log file + + + + +Cancel a job + + +Cancel a single job of a project + +POST /projects/:id/jobs/:job_id/cancel + + + + + + Attribute + Type + Required + Description + + + + + id + integer/string + Yes + ID or URL-encoded path of the project. + + + job_id + integer + Yes + ID of a job. + + + + +curl --request POST --header ""PRIVATE-TOKEN: "" ""https://gitlab.example.com/api/v4/projects/1/jobs/1/cancel"" + + +Example of response + +{ + ""commit"": { + ""author_email"": ""admin@example.com"", + ""author_name"": ""Administrator"", + ""created_at"": ""2015-12-24T16:51:14.000+01:00"", + ""id"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""message"": ""Test the CI integration."", + ""short_id"": ""0ff3ae19"", + ""title"": ""Test the CI integration."" + }, + ""coverage"": null, + ""archived"": false, + ""allow_failure"": false, + ""created_at"": ""2016-01-11T10:13:33.506Z"", + ""started_at"": ""2016-01-11T10:14:09.526Z"", + ""finished_at"": null, + ""erased_at"": null, + ""duration"": 8, + ""queued_duration"": 0.010, + ""id"": 1, + ""name"": ""rubocop"", + ""ref"": ""main"", + ""artifacts"": [], + ""runner"": null, + ""stage"": ""test"", + ""status"": ""canceled"", + ""tag"": false, + ""web_url"": ""https://example.com/foo/bar/-/jobs/1"", + ""project"": { + ""ci_job_token_scope_enabled"": false + }, + ""user"": null +} + + +Retry a job + + +Retry a single job of a project + +POST /projects/:id/jobs/:job_id/retry + + + + + + Attribute + Type + Required + Description + + + + + id + integer/string + Yes + ID or URL-encoded path of the project. + + + job_id + integer + Yes + ID of a job. + + + + +curl --request POST --header ""PRIVATE-TOKEN: "" ""https://gitlab.example.com/api/v4/projects/1/jobs/1/retry"" + + +Example of response + +{ + ""commit"": { + ""author_email"": ""admin@example.com"", + ""author_name"": ""Administrator"", + ""created_at"": ""2015-12-24T16:51:14.000+01:00"", + ""id"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""message"": ""Test the CI integration."", + ""short_id"": ""0ff3ae19"", + ""title"": ""Test the CI integration."" + }, + ""coverage"": null, + ""archived"": false, + ""allow_failure"": false, + ""created_at"": ""2016-01-11T10:13:33.506Z"", + ""started_at"": null, + ""finished_at"": null, + ""erased_at"": null, + ""duration"": null, + ""queued_duration"": 0.010, + ""id"": 1, + ""name"": ""rubocop"", + ""ref"": ""main"", + ""artifacts"": [], + ""runner"": null, + ""stage"": ""test"", + ""status"": ""pending"", + ""tag"": false, + ""web_url"": ""https://example.com/foo/bar/-/jobs/1"", + ""project"": { + ""ci_job_token_scope_enabled"": false + }, + ""user"": null +} + + +Erase a job + + +Erase a single job of a project (remove job artifacts and a job log) + +POST /projects/:id/jobs/:job_id/erase + + +Parameters + + + + + Attribute + Type + Required + Description + + + + + id + integer/string + Yes + ID or URL-encoded path of the project. + + + job_id + integer + Yes + ID of a job. + + + + +Example of request + +curl --request POST --header ""PRIVATE-TOKEN: "" ""https://gitlab.example.com/api/v4/projects/1/jobs/1/erase"" + + +Example of response + +{ + ""commit"": { + ""author_email"": ""admin@example.com"", + ""author_name"": ""Administrator"", + ""created_at"": ""2015-12-24T16:51:14.000+01:00"", + ""id"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""message"": ""Test the CI integration."", + ""short_id"": ""0ff3ae19"", + ""title"": ""Test the CI integration."" + }, + ""coverage"": null, + ""archived"": false, + ""allow_failure"": false, + ""download_url"": null, + ""id"": 1, + ""name"": ""rubocop"", + ""ref"": ""main"", + ""artifacts"": [], + ""runner"": null, + ""stage"": ""test"", + ""created_at"": ""2016-01-11T10:13:33.506Z"", + ""started_at"": ""2016-01-11T10:13:33.506Z"", + ""finished_at"": ""2016-01-11T10:15:10.506Z"", + ""erased_at"": ""2016-01-11T11:30:19.914Z"", + ""duration"": 97.0, + ""queued_duration"": 0.010, + ""status"": ""failed"", + ""tag"": false, + ""web_url"": ""https://example.com/foo/bar/-/jobs/1"", + ""project"": { + ""ci_job_token_scope_enabled"": false + }, + ""user"": null +} + + + + note You can’t delete archived jobs with the API, but you can +delete job artifacts and logs from jobs completed before a specific date + + + +Run a job + + +For a job in manual status, trigger an action to start the job. + +POST /projects/:id/jobs/:job_id/play + + + + + + Attribute + Type + Required + Description + + + + + id + integer/string + Yes + ID or URL-encoded path of the project. + + + job_id + integer + Yes + ID of a job. + + + job_variables_attributes + array of hashes + No + An array containing the custom variables available to the job. Introduced in GitLab 14.9. + + + + +Example request: + +curl --request POST ""https://gitlab.example.com/api/v4/projects/1/jobs/1/play"" \ + --header ""Content-Type: application/json"" \ + --header ""PRIVATE-TOKEN: "" \ + --data @variables.json + + +@variables.json is structured like: + +{ + ""job_variables_attributes"": [ + { + ""key"": ""TEST_VAR_1"", + ""value"": ""test1"" + }, + { + ""key"": ""TEST_VAR_2"", + ""value"": ""test2"" + } + ] +} + + +Example response: + +{ + ""commit"": { + ""author_email"": ""admin@example.com"", + ""author_name"": ""Administrator"", + ""created_at"": ""2015-12-24T16:51:14.000+01:00"", + ""id"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""message"": ""Test the CI integration."", + ""short_id"": ""0ff3ae19"", + ""title"": ""Test the CI integration."" + }, + ""coverage"": null, + ""archived"": false, + ""allow_failure"": false, + ""created_at"": ""2016-01-11T10:13:33.506Z"", + ""started_at"": null, + ""finished_at"": null, + ""erased_at"": null, + ""duration"": null, + ""queued_duration"": 0.010, + ""id"": 1, + ""name"": ""rubocop"", + ""ref"": ""main"", + ""artifacts"": [], + ""runner"": null, + ""stage"": ""test"", + ""status"": ""pending"", + ""tag"": false, + ""web_url"": ""https://example.com/foo/bar/-/jobs/1"", + ""project"": { + ""ci_job_token_scope_enabled"": false + }, + ""user"": null +} + + + +2. Job Artifacts API + + + +Job Artifacts API + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +Use the job artifacts API to download or delete job artifacts. + +Authentication with a CI/CD job token +available in the Premium and Ultimate tier. + +Get job artifacts + + + +History + + + + + The use of CI_JOB_TOKEN in the artifacts download API was introduced in GitLab Premium 9.5. + + + + + + +Get the job’s artifacts zipped archive of a project. + +If you use cURL to download artifacts from GitLab.com, use the --location parameter +as the request might redirect through a CDN. + +GET /projects/:id/jobs/:job_id/artifacts + + + + + + Attribute + Type + Required + Description + + + + + id + integer/string + Yes + ID or URL-encoded path of the project. + + + job_id + integer + Yes + ID of a job. + + + job_token + string + No + To be used with triggers for multi-project pipelines. It should be invoked only in a CI/CD job defined in the .gitlab-ci.yml file. The value is always $CI_JOB_TOKEN. The job associated with the $CI_JOB_TOKEN must be running when this token is used. Premium and Ultimate only. + + + + +Example request using the PRIVATE-TOKEN header: + +curl --location --output artifacts.zip --location --header ""PRIVATE-TOKEN: "" ""https://gitlab.example.com/api/v4/projects/1/jobs/42/artifacts"" + + +In the Premium and Ultimate tier you can authenticate with this endpoint +in a CI/CD job by using a CI/CD job token. + +Use either: + + + + The job_token attribute with the GitLab-provided CI_JOB_TOKEN predefined variable. +For example, the following job downloads the artifacts of the job with ID 42: + + +artifact_download: + stage: test + script: + - 'curl --location --output artifacts.zip ""https://gitlab.example.com/api/v4/projects/1/jobs/42/artifacts?job_token=$CI_JOB_TOKEN""' + + + + The JOB-TOKEN header with the GitLab-provided CI_JOB_TOKEN predefined variable. +For example, the following job downloads the artifacts of the job with ID +42. The command is wrapped in single quotes because it contains a +colon (:): + + +artifact_download: + stage: test + script: + - 'curl --location --output artifacts.zip --header ""JOB-TOKEN: $CI_JOB_TOKEN"" ""https://gitlab.example.com/api/v4/projects/1/jobs/42/artifacts""' + + + + +Possible response status codes: + + + + + Status + Description + + + + + 200 + Serves the artifacts file. + + + 404 + Build not found or no artifacts. + + + + +Download the artifacts archive + + + +History + + + + + The use of CI_JOB_TOKEN in the artifacts download API was introduced in GitLab Premium 9.5. + + + + + + +Download the artifacts zipped archive from the latest successful pipeline for +the given reference name and job, provided the job finished successfully. This +is the same as getting the job’s artifacts, but by +defining the job’s name instead of its ID. + +If you use cURL to download artifacts from GitLab.com, use the --location parameter +as the request might redirect through a CDN. + + + note If a pipeline is parent of other child pipelines, artifacts +are searched in hierarchical order from parent to child. For example, if both parent and +child pipelines have a job with the same name, the artifact from the parent pipeline is returned. + + +GET /projects/:id/jobs/artifacts/:ref_name/download?job=name + + +Parameters + + + + + Attribute + Type + Required + Description + + + + + id + integer/string + Yes + ID or URL-encoded path of the project. + + + job + string + Yes + The name of the job. + + + ref_name + string + Yes + Branch or tag name in repository. HEAD or SHA references are not supported. + + + job_token + string + No + To be used with triggers for multi-project pipelines. It should be invoked only in a CI/CD job defined in the .gitlab-ci.yml file. The value is always $CI_JOB_TOKEN. The job associated with the $CI_JOB_TOKEN must be running when this token is used. Premium and Ultimate only. + + + + +Example request using the PRIVATE-TOKEN header: + +curl --location --header ""PRIVATE-TOKEN: "" ""https://gitlab.example.com/api/v4/projects/1/jobs/artifacts/main/download?job=test"" + + +In the Premium and Ultimate tier you can authenticate with this endpoint +in a CI/CD job by using a CI/CD job token. + +Use either: + + + + The job_token attribute with the GitLab-provided CI_JOB_TOKEN predefined variable. +For example, the following job downloads the artifacts of the test job +of the main branch: + + +artifact_download: + stage: test + script: + - 'curl --location --output artifacts.zip ""https://gitlab.example.com/api/v4/projects/$CI_PROJECT_ID/jobs/artifacts/main/download?job=test&job_token=$CI_JOB_TOKEN""' + + + + The JOB-TOKEN header with the GitLab-provided CI_JOB_TOKEN predefined variable. +For example, the following job downloads the artifacts of the test job +of the main branch. The command is wrapped in single quotes +because it contains a colon (:): + + +artifact_download: + stage: test + script: + - 'curl --location --output artifacts.zip --header ""JOB-TOKEN: $CI_JOB_TOKEN"" ""https://gitlab.example.com/api/v4/projects/$CI_PROJECT_ID/jobs/artifacts/main/download?job=test""' + + + + +Possible response status codes: + + + + + Status + Description + + + + + 200 + Serves the artifacts file. + + + 404 + Build not found or no artifacts. + + + + +Download a single artifact file by job ID + + +Download a single artifact file from a job with a specified ID from inside +the job’s artifacts zipped archive. The file is extracted from the archive and +streamed to the client. + +If you use cURL to download artifacts from GitLab.com, use the --location parameter +as the request might redirect through a CDN. + +GET /projects/:id/jobs/:job_id/artifacts/*artifact_path + + +Parameters + + + + + Attribute + Type + Required + Description + + + + + artifact_path + string + Yes + Path to a file inside the artifacts archive. + + + id + integer/string + Yes + ID or URL-encoded path of the project. + + + job_id + integer + Yes + The unique job identifier. + + + job_token + string + No + To be used with triggers for multi-project pipelines. It should be invoked only in a CI/CD job defined in the .gitlab-ci.yml file. The value is always $CI_JOB_TOKEN. The job associated with the $CI_JOB_TOKEN must be running when this token is used. Premium and Ultimate only. + + + + +Example request: + +curl --location --header ""PRIVATE-TOKEN: "" ""https://gitlab.example.com/api/v4/projects/1/jobs/5/artifacts/some/release/file.pdf"" + + +In the Premium and Ultimate tier you can authenticate with this endpoint +in a CI/CD job by using a CI/CD job token. + +Possible response status codes: + + + + + Status + Description + + + + + 200 + Sends a single artifact file + + + 400 + Invalid path provided + + + 404 + Build not found or no file/artifacts + + + + +Download a single artifact file from specific tag or branch + + +Download a single artifact file for a specific job of the latest successful pipeline +for the given reference name from inside the job’s artifacts archive. +The file is extracted from the archive and streamed to the client, with the plain/text content type. + +The artifact file provides more detail than what is available in the +CSV export. + +Artifacts for parent and child pipelines +are searched in hierarchical order from parent to child. For example, if both parent and child pipelines +have a job with the same name, the artifact from the parent pipeline is returned. + +If you use cURL to download artifacts from GitLab.com, use the --location parameter +as the request might redirect through a CDN. + +GET /projects/:id/jobs/artifacts/:ref_name/raw/*artifact_path?job=name + + +Parameters: + + + + + Attribute + Type + Required + Description + + + + + artifact_path + string + Yes + Path to a file inside the artifacts archive. + + + id + integer/string + Yes + ID or URL-encoded path of the project. + + + job + string + Yes + The name of the job. + + + ref_name + string + Yes + Branch or tag name in repository. HEAD or SHA references are not supported. + + + job_token + string + No + To be used with triggers for multi-project pipelines. It should be invoked only in a CI/CD job defined in the .gitlab-ci.yml file. The value is always $CI_JOB_TOKEN. The job associated with the $CI_JOB_TOKEN must be running when this token is used. Premium and Ultimate only. + + + + +Example request: + +curl --location --header ""PRIVATE-TOKEN: "" ""https://gitlab.example.com/api/v4/projects/1/jobs/artifacts/main/raw/some/release/file.pdf?job=pdf"" + + +In the Premium and Ultimate tier you can authenticate with this endpoint +in a CI/CD job by using a CI/CD job token. + +Possible response status codes: + + + + + Status + Description + + + + + 200 + Sends a single artifact file + + + 400 + Invalid path provided + + + 404 + Build not found or no file/artifacts + + + + +Keep artifacts + + +Prevents artifacts from being deleted when expiration is set. + +POST /projects/:id/jobs/:job_id/artifacts/keep + + +Parameters + + + + + Attribute + Type + Required + Description + + + + + id + integer/string + Yes + ID or URL-encoded path of the project owned by the authenticated user. + + + job_id + integer + Yes + ID of a job. + + + + +Example request: + +curl --request POST --header ""PRIVATE-TOKEN: "" ""https://gitlab.example.com/api/v4/projects/1/jobs/1/artifacts/keep"" + + +Example response: + +{ + ""commit"": { + ""author_email"": ""admin@example.com"", + ""author_name"": ""Administrator"", + ""created_at"": ""2015-12-24T16:51:14.000+01:00"", + ""id"": ""0ff3ae198f8601a285adcf5c0fff204ee6fba5fd"", + ""message"": ""Test the CI integration."", + ""short_id"": ""0ff3ae19"", + ""title"": ""Test the CI integration."" + }, + ""coverage"": null, + ""allow_failure"": false, + ""download_url"": null, + ""id"": 42, + ""name"": ""rubocop"", + ""ref"": ""main"", + ""artifacts"": [], + ""runner"": null, + ""stage"": ""test"", + ""created_at"": ""2016-01-11T10:13:33.506Z"", + ""started_at"": ""2016-01-11T10:13:33.506Z"", + ""finished_at"": ""2016-01-11T10:15:10.506Z"", + ""duration"": 97.0, + ""status"": ""failed"", + ""failure_reason"": ""script_failure"", + ""tag"": false, + ""web_url"": ""https://example.com/foo/bar/-/jobs/42"", + ""user"": null +} + + +Delete job artifacts + + +Delete artifacts of a job. + +Prerequisites: + + + Must have at least the maintainer role in the project. + + +DELETE /projects/:id/jobs/:job_id/artifacts + + + + + + Attribute + Type + Required + Description + + + + + id + integer/string + Yes + ID or URL-encoded path of the project. + + + job_id + integer + Yes + ID of a job. + + + + +Example request: + +curl --request DELETE --header ""PRIVATE-TOKEN: "" ""https://gitlab.example.com/api/v4/projects/1/jobs/1/artifacts"" + + + + note At least Maintainer role is required to delete artifacts. + + +If the artifacts were deleted successfully, a response with status 204 No Content is returned. + +Delete project artifacts + + + +History + + + + + +Introduced in GitLab 14.7 with a flag named bulk_expire_project_artifacts. Enabled by default on GitLab self-managed. Enabled on GitLab.com. + +Feature flag removed in GitLab 14.10. + + + + + + +Delete artifacts eligible for deletion in a project. By default, artifacts from +the most recent successful pipeline of each ref. +are not deleted. + +Requests to this endpoint set the expiry of all artifacts that +can be deleted to the current time. The files are then deleted from the system as part +of the regular cleanup of expired job artifacts. Job logs are never deleted. + +The regular cleanup occurs asynchronously on a schedule, so there might be a short delay +before artifacts are deleted. + +Prerequisites: + + + You must have at least the Maintainer role for the project. + + +DELETE /projects/:id/artifacts + + + + + + Attribute + Type + Required + Description + + + + + id + integer/string + Yes + ID or URL-encoded path of the project. + + + + +Example request: + +curl --request DELETE --header ""PRIVATE-TOKEN: "" ""https://gitlab.example.com/api/v4/projects/1/artifacts"" + + +A response with status 202 Accepted is returned. + + +" +how do i run unit tests for my next js application in a gitlab pipeline?,,"1. Tutorial: Create and run your first GitLab CI/CD pipeline + + + +Tutorial: Create and run your first GitLab CI/CD pipeline + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +This tutorial shows you how to configure and run your first CI/CD pipeline in GitLab. + +If you are already familiar with basic CI/CD concepts, you can learn about +common keywords in Tutorial: Create a complex pipeline. + +Prerequisites + + +Before you start, make sure you have: + + + A project in GitLab that you would like to use CI/CD for. + The Maintainer or Owner role for the project. + + +If you don’t have a project, you can create a public project for free on https://gitlab.com. + +Steps + + +To create and run your first pipeline: + + + + Ensure you have runners available to run your jobs. + + If you’re using GitLab.com, you can skip this step. GitLab.com provides instance runners for you. + + + Create a .gitlab-ci.yml file +at the root of your repository. This file is where you define the CI/CD jobs. + + + +When you commit the file to your repository, the runner runs your jobs. +The job results are displayed in a pipeline. + +Ensure you have runners available + + +In GitLab, runners are agents that run your CI/CD jobs. + +To view available runners: + + + Go to Settings > CI/CD and expand Runners. + + +As long as you have at least one runner that’s active, with a green circle next to it, +you have a runner available to process your jobs. + +If you don’t have a runner + + +If you don’t have a runner: + + + +Install GitLab Runner on your local machine. + +Register the runner for your project. +Choose the shell executor. + + +When your CI/CD jobs run, in a later step, they will run on your local machine. + +Create a .gitlab-ci.yml file + + +Now create a .gitlab-ci.yml file. It is a YAML file where +you specify instructions for GitLab CI/CD. + +In this file, you define: + + + The structure and order of jobs that the runner should execute. + The decisions the runner should make when specific conditions are encountered. + + +To create a .gitlab-ci.yml file: + + + On the left sidebar, select Code > Repository. + + Above the file list, select the branch you want to commit to. +If you’re not sure, leave master or main. +Then select the plus icon ( ) and New file: + + + + + For the Filename, type .gitlab-ci.yml and in the larger window, +paste this sample code: + + +build-job: + stage: build + script: + - echo ""Hello, $GITLAB_USER_LOGIN!"" + +test-job1: + stage: test + script: + - echo ""This job tests something"" + +test-job2: + stage: test + script: + - echo ""This job tests something, but takes more time than test-job1."" + - echo ""After the echo commands complete, it runs the sleep command for 20 seconds"" + - echo ""which simulates a test that runs 20 seconds longer than test-job1"" + - sleep 20 + +deploy-prod: + stage: deploy + script: + - echo ""This job deploys something from the $CI_COMMIT_BRANCH branch."" + environment: production + + + This example shows four jobs: build-job, test-job1, test-job2, and deploy-prod. +The comments listed in the echo commands are displayed in the UI when you view the jobs. +The values for the predefined variables +$GITLAB_USER_LOGIN and $CI_COMMIT_BRANCH are populated when the jobs run. + + Select Commit changes. + + +The pipeline starts and runs the jobs you defined in the .gitlab-ci.yml file. + +View the status of your pipeline and jobs + + +Now take a look at your pipeline and the jobs within. + + + + Go to Build > Pipelines. A pipeline with three stages should be displayed: + + + + + View a visual representation of your pipeline by selecting the pipeline ID: + + + + + View details of a job by selecting the job name. For example, deploy-prod: + + + + + +You have successfully created your first CI/CD pipeline in GitLab. Congratulations! + +Now you can get started customizing your .gitlab-ci.yml and defining more advanced jobs. + + +.gitlab-ci.yml tips + + +Here are some tips to get started working with the .gitlab-ci.yml file. + +For the complete .gitlab-ci.yml syntax, see the full CI/CD YAML syntax reference. + + + Use the pipeline editor to edit your .gitlab-ci.yml file. + Each job contains a script section and belongs to a stage: + + +stage describes the sequential execution of jobs. +If there are runners available, jobs in a single stage run in parallel. + Use the needs keyword to run jobs out of stage order. +This creates a Directed Acyclic Graph (DAG). + + + You can set additional configuration to customize how your jobs and stages perform: + + Use the rules keyword to specify when to run or skip jobs. +The only and except legacy keywords are still supported, but can’t be used +with rules in the same job. + Keep information across jobs and stages persistent in a pipeline with cache +and artifacts. These keywords are ways to store +dependencies and job output, even when using ephemeral runners for each job. + Use the default keyword to specify additional +configurations that are applied to all jobs. This keyword is often used to define +before_script and after_script +sections that should run on every job. + + + + +Related topics + + + + Migrate from CircleCI + Migrate from Jenkins + + Watch First time GitLab & CI/CD. This includes a quick introduction to GitLab, the first steps with CI/CD, building a Go project, running tests, using the CI/CD pipeline editor, detecting secrets and security vulnerabilities and offers more exercises for asynchronous practice. + + Watch Intro to GitLab CI. This workshop uses the Web IDE to quickly get going with building source code using CI/CD, and run unit tests. + + + +2. Unit test reports + + + +Unit test reports + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +It is very common that a CI/CD pipeline contains a +test job that verifies your code. +If the tests fail, the pipeline fails and users get notified. The person that +works on the merge request has to check the job logs and see where the +tests failed so that they can fix them. + +You can configure your job to use Unit test reports, and GitLab displays a +report on the merge request so that it’s easier and faster to identify the +failure without having to check the entire log. Unit test reports currently +only support test reports in the JUnit report format. + +If you don’t use merge requests but still want to see the unit test report +output without searching through job logs, the full +Unit test reports are available +in the pipeline detail view. + +Consider the following workflow: + + + Your default branch is rock solid, your project is using GitLab CI/CD and +your pipelines indicate that there isn’t anything broken. + Someone from your team submits a merge request, a test fails and the pipeline +gets the known red icon. To investigate more, you have to go through the job +logs to figure out the cause of the failed test, which usually contain +thousands of lines. + You configure the Unit test reports and immediately GitLab collects and +exposes them in the merge request. No more searching in the job logs. + Your development and debugging workflow becomes easier, faster and efficient. + + +How it works + + +First, GitLab Runner uploads all JUnit report format XML files +as artifacts to GitLab. Then, when you visit a merge request, GitLab starts +comparing the head and base branch’s JUnit report format XML files, where: + + + The base branch is the target branch (usually the default branch). + The head branch is the source branch (the latest pipeline in each merge request). + + +The Test summary panel shows how many tests failed, how many had errors, +and how many were fixed. If no comparison can be done because data for the base branch +is not available, the panel shows only the list of failed tests for the source branch. + +The types of results are: + + + +Newly failed tests: Test cases which passed on the base branch and failed on the head branch. + +Newly encountered errors: Test cases which passed on the base branch and failed due to a +test error on the head branch. + +Existing failures: Test cases which failed on the base branch and failed on the head branch. + +Resolved failures: Test cases which failed on the base branch and passed on the head branch. + + +View failed tests + + +Each entry in the Test summary panel shows the test name and result type. +Select the test name to open a modal window with details of its execution time and +the error output. + + + +Copy failed test names + + + +History + + + + + +Introduced in GitLab 15.2. + + + + + + +You can copy the name and path of failed tests when there are failed tests listed +in the Test summary panel. Use name and path to find and rerun the +test locally for verification. + +To copy the name of all failed tests, at the top of the Test summary panel, +select Copy failed tests. The failed tests are listed as a string with the tests +separated by spaces. This option is only available if the JUnit report populates +the attributes for failed tests. + +To copy the name of a single failed test: + + + Expand the Test summary panel by selecting Show test summary details ( ). + Select the test you want to review. + Select Copy test name to rerun locally ( ). + + +Number of recent failures + + + +History + + + + + +Introduced in merge requests in GitLab 13.7. + +Feature flag removed in GitLab 13.8. + +Introduced in Test Reports in GitLab 13.9. + + + + + + +If a test failed in the project’s default branch in the last 14 days, a message like +Failed {n} time(s) in {default_branch} in the last 14 days is displayed for that test. + +The calculation includes failed tests in completed pipelines, but not blocked pipelines. +Issue 431265 proposes to +also include blocked pipelines in the calculation. + +How to set it up + + +To enable the Unit test reports in merge requests, you must add +artifacts:reports:junit +in .gitlab-ci.yml, and specify the paths of the generated test reports. +The reports must be .xml files, otherwise GitLab returns an Error 500. + +In the following example for Ruby, the job in the test stage runs and GitLab +collects the unit test report from the job. After the job is executed, the +XML report is stored in GitLab as an artifact, and the results are shown in the +merge request widget. + +## Use https://github.com/sj26/rspec_junit_formatter to generate a JUnit report format XML file with rspec +ruby: + stage: test + script: + - bundle install + - bundle exec rspec --format progress --format RspecJunitFormatter --out rspec.xml + artifacts: + when: always + paths: + - rspec.xml + reports: + junit: rspec.xml + + +To make the Unit test report output files browsable, include them with the +artifacts:paths keyword as well, as shown in the example. +To upload the report even if the job fails (for example if the tests do not pass), +use the artifacts:when:always keyword. + +You cannot have multiple tests with the same name and class in your JUnit report format XML file. + +In GitLab 15.0 and earlier, test reports from parallel:matrix +jobs are aggregated together, which can cause some report information to not be displayed. +In GitLab 15.1 and later, this bug is fixed, +and all report information is displayed. + +View Unit test reports on GitLab + + + +History + + + + + +Introduced in GitLab 12.5 behind a feature flag (junit_pipeline_view), disabled by default. + +Feature flag removed in GitLab 13.3. + + + + + + +If JUnit report format XML files are generated and uploaded as part of a pipeline, these reports +can be viewed inside the pipelines details page. The Tests tab on this page +displays a list of test suites and cases reported from the XML file. + + + +You can view all the known test suites and select each of these to see further +details, including the cases that make up the suite. + +You can also retrieve the reports via the GitLab API. + +Unit test reports parsing errors + + + +History + + + + + +Introduced in GitLab 13.10. + + + + + + +If parsing JUnit report XML results in an error, an indicator is shown next to the job name. Hovering over the icon shows the parser error in a tooltip. If multiple parsing errors come from grouped jobs, GitLab shows only the first error from the group. + + + +For test case parsing limits, see Max test cases per unit test report. + +GitLab does not parse very large nodes of JUnit reports. There is an issue open to make this optional. + +View JUnit screenshots on GitLab + + + +History + + + + + +Introduced in GitLab 13.0 behind the :junit_pipeline_screenshots_view feature flag, disabled by default. + +Feature flag removed in GitLab 13.12. + + + + + + +You can upload your screenshots as artifacts to GitLab. +If JUnit report format XML files contain an attachment tag, GitLab parses the attachment. +When uploading screenshot artifacts: + + + + The attachment tag must contain the relative path to $CI_PROJECT_DIR of the screenshots you uploaded. For +example: + + + + [[ATTACHMENT|/path/to/some/file]] + + + + + You should set the job that uploads the screenshot to +artifacts:when: always so that it still uploads a screenshot +when a test fails. + + + +After the attachment is uploaded, the pipeline test report +contains a link to the screenshot, for example: + + + +Troubleshooting + + +Test report appears empty + + +A unit test report can appear to be empty when viewed in a merge request +if the artifact that contained the report expires. +If the artifact frequently expires too early, set a longer expire_in value for +the report artifact. + +Alternatively, you can run a new pipeline to generate a new report. + + +" +how can i migrate from jenkins to gitlab ci/cd?,,"1. Migrating from Jenkins + + + +Migrating from Jenkins + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +If you’re migrating from Jenkins to GitLab CI/CD, you are able to create CI/CD +pipelines that replicate and enhance your Jenkins workflows. + +Key similarities and differences + + +GitLab CI/CD and Jenkins are CI/CD tools with some similarities. Both GitLab +and Jenkins: + + + Use stages for collections of jobs. + Support container-based builds. + + +Additionally, there are some important differences between the two: + + + GitLab CI/CD pipelines are all configured in a YAML format configuration file. +Jenkins uses either a Groovy format configuration file (declarative pipelines) +or Jenkins DSL (scripted pipelines). + GitLab can run either on SaaS (cloud) or self-managed deployments. Jenkins deployments must be self-managed. + GitLab provides source code management (SCM) out of the box. Jenkins requires a separate +SCM solution to store code. + GitLab provides a built-in container registry. Jenkins requires a separate solution +for storing container images. + GitLab provides built-in templates for scanning code. Jenkins requires 3rd party plugins +for scanning code. + + +Comparison of features and concepts + + +Many Jenkins features and concepts have equivalents in GitLab that offer the same +functionality. + +Configuration file + + +Jenkins can be configured with a Jenkinsfile in the Groovy format. GitLab CI/CD uses a .gitlab-ci.yml YAML file by default. + +Example of a Jenkinsfile: + +pipeline { + agent any + + stages { + stage('hello') { + steps { + echo ""Hello World"" + } + } + } +} + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +stages: + - hello + +hello-job: + stage: hello + script: + - echo ""Hello World"" + + +Jenkins pipeline syntax + + +A Jenkins configuration is composed of a pipeline block with “sections” and “directives”. +GitLab CI/CD has similar functionality, configured with YAML keywords. + +Sections + + + + + + Jenkins + GitLab + Explanation + + + + + agent + image + Jenkins pipelines execute on agents, and the agent section defines how the pipeline executes, and the Docker container to use. GitLab jobs execute on runners, and the image keyword defines the container to use. You can configure your own runners in Kubernetes or on any host. + + + post + +after_script or stage + + The Jenkins post section defines actions that should be performed at the end of a stage or pipeline. In GitLab, use after_script for commands to run at the end of a job, and before_script for actions to run before the other commands in a job. Use stage to select the exact stage a job should run in. GitLab supports both .pre and .post stages that always run before or after all other defined stages. + + + stages + stages + Jenkins stages are groups of jobs. GitLab CI/CD also uses stages, but it is more flexible. You can have multiple stages each with multiple independent jobs. Use stages at the top level to the stages and their execution order, and use stage at the job level to define the stage for that job. + + + steps + script + Jenkins steps define what to execute. GitLab CI/CD uses a script section which is similar. The script section is a YAML array with separate entries for each command to run in sequence. + + + + +Directives + + + + + + Jenkins + GitLab + Explanation + + + + + environment + variables + Jenkins uses environment for environment variables. GitLab CI/CD uses the variables keyword to define CI/CD variables that can be used during job execution, but also for more dynamic pipeline configuration. These can also be set in the GitLab UI, under CI/CD settings. + + + options + Not applicable + Jenkins uses options for additional configuration, including timeouts and retry values. GitLab does not need a separate section for options, all configuration is added as CI/CD keywords at the job or pipeline level, for example timeout or retry. + + + parameters + Not applicable + In Jenkins, parameters can be required when triggering a pipeline. Parameters are handled in GitLab with CI/CD variables, which can be defined in many places, including the pipeline configuration, project settings, at runtime manually through the UI, or API. + + + triggers + rules + In Jenkins, triggers defines when a pipeline should run again, for example through cron notation. GitLab CI/CD can run pipelines automatically for many reasons, including Git changes and merge request updates. Use the rules keyword to control which events to run jobs for. Scheduled pipelines are defined in the project settings. + + + tools + Not applicable + In Jenkins, tools defines additional tools to install in the environment. GitLab does not have a similar keyword, as the recommendation is to use container images prebuilt with the exact tools required for your jobs. These images can be cached and can be built to already contain the tools you need for your pipelines. If a job needs additional tools, they can be installed as part of a before_script section. + + + input + Not applicable + In Jenkins, input adds a prompt for user input. Similar to parameters, inputs are handled in GitLab through CI/CD variables. + + + when + rules + In Jenkins, when defines when a stage should be executed. GitLab also has a when keyword, which defines whether a job should start running based on the status of earlier jobs, for example if jobs passed or failed. To control when to add jobs to specific pipelines, use rules. + + + + +Common configurations + + +This section goes over commonly used CI/CD configurations, showing how they can be converted +from Jenkins to GitLab CI/CD. + +Jenkins pipelines generate automated CI/CD jobs +that are triggered when certain event take place, such as a new commit being pushed. +A Jenkins pipeline is defined in a Jenkinsfile. The GitLab equivalent is the .gitlab-ci.yml configuration file. + +Jenkins does not provide a place to store source code, so the Jenkinsfile must be stored +in a separate source control repository. + +Jobs + + +Jobs are a set of commands that run in a set sequence to achieve a particular result. + +For example, build a container then deploy it to production, in a Jenkinsfile: + +pipeline { + agent any + stages { + stage('build') { + agent { docker 'golang:alpine' } + steps { + apk update + go build -o bin/hello + } + post { + always { + archiveArtifacts artifacts: 'bin/hello' + onlyIfSuccessful: true + } + } + } + stage('deploy') { + agent { docker 'golang:alpine' } + when { + branch 'staging' + } + steps { + echo ""Deploying to staging"" + scp bin/hello remoteuser@remotehost:/remote/directory + } + } + } +} + + +This example: + + + Uses the golang:alpine container image. + Runs a job for building code. + + Stores the built executable as an artifact. + + + Adds a second job to deploy to staging, which: + + Only exists if the commit targets the staging branch. + Starts after the build stage succeeds. + Uses the built executable artifact from the earlier job. + + + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +default: + image: golang:alpine + +stages: + - build + - deploy + +build-job: + stage: build + script: + - apk update + - go build -o bin/hello + artifacts: + paths: + - bin/hello + expire_in: 1 week + +deploy-job: + stage: deploy + script: + - echo ""Deploying to Staging"" + - scp bin/hello remoteuser@remotehost:/remote/directory + rules: + - if: $CI_COMMIT_BRANCH == 'staging' + artifacts: + paths: + - bin/hello + + +Parallel + + +In Jenkins, jobs that are not dependent on previous jobs can run in parallel when +added to a parallel section. + +For example, in a Jenkinsfile: + +pipeline { + agent any + stages { + stage('Parallel') { + parallel { + stage('Python') { + agent { docker 'python:latest' } + steps { + sh ""python --version"" + } + } + stage('Java') { + agent { docker 'openjdk:latest' } + when { + branch 'staging' + } + steps { + sh ""java -version"" + } + } + } + } + } +} + + +This example runs a Python and a Java job in parallel, using different container images. +The Java job only runs when the staging branch is changed. + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +python-version: + image: python:latest + script: + - python --version + +java-version: + image: openjdk:latest + rules: + - if: $CI_COMMIT_BRANCH == 'staging' + script: + - java -version + + +In this case, no extra configuration is needed to make the jobs run in parallel. +Jobs run in parallel by default, each on a different runner assuming there are enough runners +for all the jobs. The Java job is set to only run when the staging branch is changed. + +Matrix + + +In GitLab you can use a matrix to run a job multiple times in parallel in a single pipeline, +but with different variable values for each instance of the job. Jenkins runs the matrix sequentially. + +For example, in a Jenkinsfile: + +matrix { + axes { + axis { + name 'PLATFORM' + values 'linux', 'mac', 'windows' + } + axis { + name 'ARCH' + values 'x64', 'x86' + } + } + stages { + stage('build') { + echo ""Building $PLATFORM for $ARCH"" + } + stage('test') { + echo ""Building $PLATFORM for $ARCH"" + } + stage('deploy') { + echo ""Building $PLATFORM for $ARCH"" + } + } +} + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +stages: + - build + - test + - deploy + +.parallel-hidden-job: + parallel: + matrix: + - PLATFORM: [linux, mac, windows] + ARCH: [x64, x86] + +build-job: + extends: .parallel-hidden-job + stage: build + script: + - echo ""Building $PLATFORM for $ARCH"" + +test-job: + extends: .parallel-hidden-job + stage: test + script: + - echo ""Testing $PLATFORM for $ARCH"" + +deploy-job: + extends: .parallel-hidden-job + stage: deploy + script: + - echo ""Testing $PLATFORM for $ARCH"" + + +Container Images + + +In GitLab you can run your CI/CD jobs in separate, isolated Docker containers +using the image keyword. + +For example, in a Jenkinsfile: + +stage('Version') { + agent { docker 'python:latest' } + steps { + echo 'Hello Python' + sh 'python --version' + } +} + + +This example shows commands running in a python:latest container. + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +version-job: + image: python:latest + script: + - echo ""Hello Python"" + - python --version + + +Variables + + +In GitLab, use the variables keyword to define CI/CD variables. +Use variables to reuse configuration data, have more dynamic configuration, or store important values. +Variables can be defined either globally or per job. + +For example, in a Jenkinsfile: + +pipeline { + agent any + environment { + NAME = 'Fern' + } + stages { + stage('English') { + environment { + GREETING = 'Hello' + } + steps { + sh 'echo ""$GREETING $NAME""' + } + } + stage('Spanish') { + environment { + GREETING = 'Hola' + } + steps { + sh 'echo ""$GREETING $NAME""' + } + } + } +} + + +This example shows how variables can be used to pass values to commands in jobs. + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +default: + image: alpine:latest + +stages: + - greet + +variables: + NAME: ""Fern"" + +english: + stage: greet + variables: + GREETING: ""Hello"" + script: + - echo ""$GREETING $NAME"" + +spanish: + stage: greet + variables: + GREETING: ""Hola"" + script: + - echo ""$GREETING $NAME"" + + +Variables can also be set in the GitLab UI, in the CI/CD settings. +In some cases, you can use protected +and masked variables for secret values. +These variables can be accessed in pipeline jobs the same as variables defined in the +configuration file. + +For example, in a Jenkinsfile: + +pipeline { + agent any + stages { + stage('Example Username/Password') { + environment { + AWS_ACCESS_KEY = credentials('aws-access-key') + } + steps { + sh 'my-login-script.sh $AWS_ACCESS_KEY' + } + } + } +} + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +login-job: + script: + - my-login-script.sh $AWS_ACCESS_KEY + + +Additionally, GitLab CI/CD makes predefined variables +available to every pipeline and job which contain values relevant to the pipeline and repository. + +Expressions and conditionals + + +When a new pipeline starts, GitLab checks which jobs should run in that pipeline. +You can configure jobs to run depending on factors like the status of variables, +or the pipeline type. + +For example, in a Jenkinsfile: + +stage('deploy_staging') { + agent { docker 'alpine:latest' } + when { + branch 'staging' + } + steps { + echo ""Deploying to staging"" + } +} + + +In this example, the job only runs when the branch we are committing to is named staging. + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +deploy_staging: + stage: deploy + script: + - echo ""Deploy to staging server"" + rules: + - if: '$CI_COMMIT_BRANCH == staging' + + +Runners + + +Like Jenkins agents, GitLab runners are the hosts that run jobs. If you are using GitLab.com, +you can use the instance runner fleet to run jobs without provisioning +your own runners. + +To convert a Jenkins agent for use with GitLab CI/CD, uninstall the agent and then +install and register a runner. Runners do not require much overhead, +so you might be able to use similar provisioning as the Jenkins agents you were using. + +Some key details about runners: + + + Runners can be configured to be shared across an instance, +a group, or dedicated to a single project. + You can use the tags keyword +for finer control, and associate runners with specific jobs. For example, you can use a tag for jobs that +require dedicated, more powerful, or specific hardware. + GitLab has autoscaling for runners. +Use autoscaling to provision runners only when needed and scale down when not needed. + + +For example, in a Jenkinsfile: + +pipeline { + agent none + stages { + stage('Linux') { + agent { + label 'linux' + } + steps { + echo ""Hello, $USER"" + } + } + stage('Windows') { + agent { + label 'windows' + } + steps { + echo ""Hello, %USERNAME%"" + } + } + } +} + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +linux_job: + stage: build + tags: + - linux + script: + - echo ""Hello, $USER"" + +windows_job: + stage: build + tags: + - windows + script: + - echo ""Hello, %USERNAME%"" + + +Artifacts + + +In GitLab, any job can use the artifacts keyword to define a set of artifacts to +be stored when a job completes. Artifacts are files that can be used in later jobs, +for example for testing or deployment. + +For example, in a Jenkinsfile: + +stages { + stage('Generate Cat') { + steps { + sh 'touch cat.txt' + sh 'echo ""meow"" > cat.txt' + } + post { + always { + archiveArtifacts artifacts: 'cat.txt' + onlyIfSuccessful: true + } + } + } + stage('Use Cat') { + steps { + sh 'cat cat.txt' + } + } + } + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +stages: + - generate + - use + +generate_cat: + stage: generate + script: + - touch cat.txt + - echo ""meow"" > cat.txt + artifacts: + paths: + - cat.txt + expire_in: 1 week + +use_cat: + stage: use + script: + - cat cat.txt + artifacts: + paths: + - cat.txt + + +Caching + + +A cache is created when a job downloads one or more files and +saves them for faster access in the future. Subsequent jobs that use the same cache don’t have to download the files again, +so they execute more quickly. The cache is stored on the runner and uploaded to S3 if +distributed cache is enabled. +Jenkins core does not provide caching. + +For example, in a .gitlab-ci.yml file: + +cache-job: + script: + - echo ""This job uses a cache."" + cache: + key: binaries-cache-$CI_COMMIT_REF_SLUG + paths: + - binaries/ + + +Jenkins plugins + + +Some functionality in Jenkins that is enabled through plugins is supported natively +in GitLab with keywords and features that offer similar functionality. For example: + + + + + Jenkins plugin + GitLab feature + + + + + Build Timeout + timeout keyword + + + Cobertura + +Coverage report artifacts and Code coverage + + + + Code coverage API + +Code coverage and Test coverage visualization + + + + Embeddable Build Status + Pipeline status badges + + + JUnit + +JUnit test report artifacts and Unit test reports + + + + Mailer + Notification emails + + + Parameterized Trigger Plugin + +trigger keyword and downstream pipelines + + + + Role-based Authorization Strategy + GitLab permissions and roles + + + + Timestamper + +Job logs are time stamped by default + + + + +Security Scanning features + + +You might have used plugins for things like code quality, security, or static application scanning in Jenkins. +GitLab provides security scanners out-of-the-box to detect +vulnerabilities in all parts of the SDLC. You can add these plugins in GitLab using templates, for example to add +SAST scanning to your pipeline, add the following to your .gitlab-ci.yml: + +include: + - template: Jobs/SAST.gitlab-ci.yml + + +You can customize the behavior of security scanners by using CI/CD variables, for example +with the SAST scanners. + +Secrets Management + + +Privileged information, often referred to as “secrets”, is sensitive information +or credentials you need in your CI/CD workflow. You might use secrets to unlock protected resources +or sensitive information in tools, applications, containers, and cloud-native environments. + +Secrets management in Jenkins is usually handled with the Secret type field or the +Credentials Plugin. Credentials stored in the Jenkins settings can be exposed to +jobs as environment variables by using the Credentials Binding plugin. + +For secrets management in GitLab, you can use one of the supported integrations +for an external service. These services securely store secrets outside of your GitLab project, +though you must have a subscription for the service: + + + HashiCorp Vault + +Azure Key Vault. + + +GitLab also supports OIDC authentication +for other third party services that support OIDC. + +Additionally, you can make credentials available to jobs by storing them in CI/CD variables, though secrets +stored in plain text are susceptible to accidental exposure, the same as in Jenkins. +You should always store sensitive information in masked +and protected variables, which mitigates +some of the risk. + +Also, never store secrets as variables in your .gitlab-ci.yml file, which is public to all +users with access to the project. Storing sensitive information in variables should +only be done in the project, group, or instance settings. + +Review the security guidelines to improve +the safety of your CI/CD variables. + +Planning and Performing a Migration + + +The following list of recommended steps was created after observing organizations +that were able to quickly complete this migration. + +Create a Migration Plan + + +Before starting a migration you should create a migration plan to make preparations for the migration. For a migration from Jenkins, ask yourself the following questions in preparation: + + + What plugins are used by jobs in Jenkins today? + + Do you know what these plugins do exactly? + Do any plugins wrap a common build tool? For example, Maven, Gradle, or NPM? + + + What is installed on the Jenkins agents? + Are there any shared libraries in use? + How are you authenticating from Jenkins? Are you using SSH keys, API tokens, or other secrets? + Are there other projects that you need to access from your pipeline? + Are there credentials in Jenkins to access outside services? For example Ansible Tower, +Artifactory, or other Cloud Providers or deployment targets? + + +Prerequisites + + +Before doing any migration work, you should first: + + + Get familiar with GitLab. + + Read about the key GitLab CI/CD features. + Follow tutorials to create your first GitLab pipeline and more complex pipelines that build, test, and deploys a static site. + Review the CI/CD YAML syntax reference. + + + Set up and configure GitLab. + Test your GitLab instance. + + Ensure runners are available, either by using shared GitLab.com runners or installing new runners. + + + + +Migration Steps + + + + Migrate projects from your SCM solution to GitLab. + + (Recommended) You can use the available importers +to automate mass imports from external SCM providers. + You can import repositories by URL. + + + Create a .gitlab-ci.yml file in each project. + Migrate Jenkins configuration to GitLab CI/CD jobs and configure them to show results directly in merge requests. + Migrate deployment jobs by using cloud deployment templates, +environments, and the GitLab agent for Kubernetes. + Check if any CI/CD configuration can be reused across different projects, then create +and share CI/CD templates. + Check the pipeline efficiency documentation +to learn how to make your GitLab CI/CD pipelines faster and more efficient. + + +Additional Resources + + + + + You can use the JenkinsFile Wrapper +to run a complete Jenkins instance inside of a GitLab CI/CD job, including plugins. Use this tool to help ease the transition to GitLab CI/CD, by delaying the migration of less urgent pipelines. + + + note The JenkinsFile Wrapper is not packaged with GitLab and falls outside of the scope of support. +For more information, see the Statement of Support. + + + + +If you have questions that are not answered here, the GitLab community forum can be a great resource. + + +2. Jenkins + + + +Jenkins + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + Moved to GitLab Free in 13.7. + + + + + +Jenkins is an open source automation server that supports +building, deploying and automating projects. + +You should use a Jenkins integration with GitLab when: + + + You plan to migrate your CI from Jenkins to GitLab CI/CD +in the future, but need an interim solution. + You’re invested in Jenkins plugins and choose +to keep using Jenkins to build your apps. + + +This integration can trigger a Jenkins build when a change is pushed to GitLab. + +You cannot use this integration to trigger GitLab CI/CD pipelines from Jenkins. Instead, +use the pipeline triggers API endpoint in a Jenkins job, +authenticated with a pipeline trigger token. + +After you have configured a Jenkins integration, you trigger a build in Jenkins +when you push code to your repository or create a merge request in GitLab. The +Jenkins pipeline status displays on merge request widgets and the GitLab +project’s home page. + + +For an overview of the Jenkins integration for GitLab, see +GitLab workflow with Jira issues and Jenkins pipelines. + +To configure a Jenkins integration with GitLab: + + + Grant Jenkins access to the GitLab project. + Configure the Jenkins server. + Configure the Jenkins project. + Configure the GitLab project. + + +Grant Jenkins access to the GitLab project + + + + + Create a personal, project, or group access token. + + + +Create a personal access token +to use the token for all Jenkins integrations of that user. + +Create a project access token +to use the token at the project level only. For instance, you can revoke +the token in a project without affecting Jenkins integrations in other projects. + +Create a group access token +to use the token for all Jenkins integrations in all projects of that group. + + + Set the access token scope to API. + Copy the access token value to configure the Jenkins server. + + +Configure the Jenkins server + + +Install and configure the Jenkins plugin to authorize the connection to GitLab. + + + On the Jenkins server, select Manage Jenkins > Manage Plugins. + Select the Available tab. Search for gitlab-plugin and select it to install. +See the Jenkins GitLab documentation +for other ways to install the plugin. + Select Manage Jenkins > Configure System. + In the GitLab section, select Enable authentication for ‘/project’ end-point. + Select Add, then choose Jenkins Credential Provider. + Select GitLab API token as the token type. + In API Token, paste the access token value you copied from GitLab +and select Add. + Enter the GitLab server’s URL in GitLab host URL. + + To test the connection, select Test Connection. + + + + + +For more information, see +Jenkins-to-GitLab authentication. + +Configure the Jenkins project + + +Set up the Jenkins project you intend to run your build on. + + + On your Jenkins instance, select New Item. + Enter the project’s name. + Select Freestyle or Pipeline and select OK. +You should select a freestyle project, because the Jenkins plugin updates the build status on +GitLab. In a pipeline project, you must configure a script to update the status on GitLab. + Choose your GitLab connection from the dropdown list. + Select Build when a change is pushed to GitLab. + Select the following checkboxes: + + Accepted Merge Request Events + Closed Merge Request Events + + + Specify how the build status is reported to GitLab: + + If you created a freestyle project, in the Post-build Actions section, +choose Publish build status to GitLab. + + If you created a pipeline project, you must use a Jenkins Pipeline script to +update the status on GitLab. + + Example Jenkins Pipeline script: + + + pipeline { + agent any + + stages { + stage('gitlab') { + steps { + echo 'Notify GitLab' + updateGitlabCommitStatus name: 'build', state: 'pending' + updateGitlabCommitStatus name: 'build', state: 'success' + } + } + } + } + + + For more Jenkins Pipeline script examples, see the + Jenkins GitLab plugin repository on GitHub. + + + + + +Configure the GitLab project + + +Configure the GitLab integration with Jenkins in one of the following ways. + +With a Jenkins server URL + + +You should use this approach for Jenkins integrations if you can provide GitLab +with your Jenkins server URL and authentication information. + + + On the left sidebar, select Search or go to and find your project. + Select Settings > Integrations. + Select Jenkins. + Select the Active checkbox. + Select the events you want GitLab to trigger a Jenkins build for: + + Push + Merge request + Tag push + + + Enter the Jenkins server URL. + Optional. Clear the Enable SSL verification checkbox to disable SSL verification. + Enter the Project name. +The project name should be URL-friendly, where spaces are replaced with underscores. To ensure +the project name is valid, copy it from your browser’s address bar while viewing the Jenkins +project. + If your Jenkins server requires authentication, enter the Username and Password. + Optional. Select Test settings. + Select Save changes. + + +With a webhook + + +If you cannot provide GitLab with your Jenkins server URL and authentication information, you can configure a webhook to integrate GitLab and Jenkins. + + + In the configuration of your Jenkins job, in the GitLab configuration section, select Advanced. + Under Secret Token, select Generate. + Copy the token, and save the job configuration. + In GitLab: + + +Create a webhook for your project. + Enter the trigger URL (such as https://JENKINS_URL/project/YOUR_JOB). + Paste the token in Secret Token. + + + To test the webhook, select Test. + + +Related topics + + + + GitLab Jenkins Integration + How to set up Jenkins on your local machine + How to migrate from Jenkins to GitLab CI/CD + Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment + + +Troubleshooting + + +Error during GitLab configuration - “Connection failed. Please check your settings” + + +While configuring GitLab, you might get an error that states “Connection failed. Please check your settings”. + +This issue has multiple possible causes and solutions: + + + + + Cause + Workaround + + + + + GitLab is unable to reach your Jenkins instance at the address. + If your GitLab instance is self-managed, ping the Jenkins instance at the domain provided on the GitLab instance. + + + The Jenkins instance is at a local address and is not included in the GitLab installation’s allowlist. + Add the instance to the GitLab installation’s allowlist. + + + The credentials for the Jenkins instance do not have sufficient access or are invalid. + Grant the credentials sufficient access or create valid credentials. + + + The Enable authentication for /project end-point checkbox is not selected in your Jenkins plugin configuration + + Select the checkbox. + + + + +Error in merge requests - “Could not connect to the CI server” + + +You might get an error that states Could not connect to the CI server in a merge +request if GitLab did not receive a build status update from Jenkins through the +Commit Status API. + +This issue occurs when Jenkins is not properly configured or there is an error +reporting the status through the API. + +To fix this issue: + + + +Configure the Jenkins server for GitLab API access. + +Configure the Jenkins project, and make sure +that, if you create a freestyle project, you choose the “Publish build status to GitLab” +post-build action. + + +Merge request event does not trigger a Jenkins pipeline + + +This issue can occur when the request exceeds the +webhook timeout, +which is set to 10 seconds by default. + +For this issue, check: + + + The integration webhook logs for request failures. + + /var/log/gitlab/gitlab-rails/production.log for messages like: + + +WebHook Error => Net::ReadTimeout + + + or + + +WebHook Error => execution expired + + + + +On self-managed GitLab instances, you can fix this issue by increasing the webhook timeout value. + +Enable job logs in Jenkins + + +To troubleshoot an integration issue, you can enable job logs in Jenkins to get +more details about your builds. + +To enable job logs in Jenkins: + + + Go to Dashboard > Manage Jenkins > System Log. + Select Add new log recorder. + Enter a name for the log recorder. + On the next screen, select Add and enter com.dabsquared.gitlabjenkins. + Make sure that the Log Level is All and select Save. + + +To view your logs: + + + Run a build. + Go to Dashboard > Manage Jenkins > System Log. + Select your logger and check the logs. + + + +" +what is the recommended installation for gitlab?,,"1. Install GitLab + + +Install GitLab + + +You can install GitLab on most GNU/Linux distributions, on several +cloud providers, and in Kubernetes clusters. +To get the best experience, you should balance performance, reliability, +ease of administration (backups, upgrades, and troubleshooting) with the cost of hosting. + + + + + + +   +   +   + + + + + +Installation system requirements Prerequisites for installation. + +Installation methods Linux, Helm, Docker, Operator, source, or scripts. + +Install GitLab on a cloud provider AWS, Google Cloud Platform, Azure. + + + +Offline GitLab Isolated installation. + +Reference architectures Recommended deployments at scale. + +Upgrade GitLab Latest version instructions. + + + +Install GitLab Runner Software for CI/CD jobs. + +Configure GitLab Runner Config.toml, certificates, autoscaling, proxy setup. +   + + + + + + + +2. Installation system requirements + + + +Installation system requirements + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + +This page includes information about the minimum requirements you need to install and use GitLab. + +Hardware requirements + + +Storage + + +The necessary hard drive space largely depends on the size of the repositories you want to store in GitLab but as a guideline you should have at least as much free space as all your repositories combined take up. + +The Linux package requires about 2.5 GB of storage space for installation. + +If you want to be flexible about growing your hard drive space in the future consider mounting it using logical volume management (LVM) so you can add more hard drives when you need them. + +Apart from a local hard drive you can also mount a volume that supports the network file system (NFS) protocol. This volume might be located on a file server, a network attached storage (NAS) device, a storage area network (SAN) or on an Amazon Web Services (AWS) Elastic Block Store (EBS) volume. + +If you have enough RAM and a recent CPU the speed of GitLab is mainly limited by hard drive seek times. Having a fast drive (7200 RPM and up) or a solid state drive (SSD) improves the responsiveness of GitLab. + + + note Because file system performance may affect the overall performance of GitLab, +we don’t recommend using cloud-based file systems for storage. + + + + note +NFS for Git repository storage is deprecated. See our official Statement of Support for further information. + + +CPU + + +CPU requirements are dependent on the number of users and expected workload. Your exact needs may be more, depending on your workload. Your workload is influenced by factors such as - but not limited to - how active your users are, how much automation you use, mirroring, and repository/change size. + +The following is the recommended minimum CPU hardware guidance for a handful of example GitLab user base sizes. + + + +4 cores is the recommended minimum number of cores and supports up to 500 users + 8 cores supports up to 1000 users + More users? Consult the reference architectures page + + + +Memory + + +Memory requirements are dependent on the number of users and expected workload. Your exact needs may be more, depending on your workload. Your workload is influenced by factors such as - but not limited to - how active your users are, how much automation you use, mirroring, and repository/change size. + +The following is the recommended minimum Memory hardware guidance for a handful of example GitLab user base sizes. + + + +4 GB RAM is the required minimum memory size and supports up to 500 users + 8 GB RAM supports up to 1000 users + More users? Consult the reference architectures page + + + +For smaller installations, you should: + + + Have at least 2 GB of swap on your server, even if you have enough available RAM. Having swap helps to reduce the chance of +errors occurring if your available memory changes. + Configure the kernel’s swappiness setting to a low value like 10 to make the most of your RAM while still having the swap available when needed. + + +For larger installations that follow our reference architectures, you shouldn’t configure swap. + + + note Although excessive swapping is undesired and degrades performance, it is an +extremely important last resort against out-of-memory conditions. During +unexpected system load, such as OS updates or other services on the same host, +peak memory load spikes could be much higher than average. Having plenty of swap +helps avoid the Linux OOM killer unsafely terminating a potentially critical +process, such as PostgreSQL, which can have disastrous consequences. + + +Database + + +PostgreSQL is the only supported database, which is bundled with the Linux package. +You can also use an external PostgreSQL database. + +PostgreSQL Requirements + + +The server running PostgreSQL should have at least 5-10 GB of storage +available, though the exact requirements depend on the number of users. For Ultimate customers the server should have at least 12 GB of storage available, as 1 GB of vulnerability data needs to be imported. + +We highly recommend using at least the minimum PostgreSQL versions (as specified in +the following table) as these were used for development and testing: + + + + + GitLab version + Minimum PostgreSQL version1 + + Maximum PostgreSQL version2 + + + + + + 13.0 + 11 + 2 + + + 14.0 + 12.7 + 2 + + + 15.0 + 12.10 + 13.x (14.x3) + + + 16.0 + 13.6 + 15.x4 + + + + 17.0 (planned) + 14.9 + 15.x4 + + + + + + + PostgreSQL minor release upgrades (for example 14.8 to 14.9) include only bug and security fixes. +Patch levels in this table are not prescriptive. Always deploy the most recent patch level +to avoid known bugs in PostgreSQL that might be triggered by GitLab. + If you want to run a later major release of PostgreSQL than the specified minimum +check if a more recent version shipped with Linux package (Omnibus) GitLab. +postgresql-new is a later version that’s definitely supported. + PostgreSQL 14.x tested against GitLab 15.11 only. + +Tested against GitLab 16.1 and later. + + +You must also ensure the following extensions are loaded into every +GitLab database. Read more about this requirement, and troubleshooting. + + + + + Extension + Minimum GitLab version + + + + + pg_trgm + 8.6 + + + btree_gist + 13.1 + + + plpgsql + 11.7 + + + + +The following managed PostgreSQL services are known to be incompatible and should not be used: + + + + + GitLab version + Managed service + + + + + 14.4+ + Amazon Aurora (see 14.4.0) + + + + + + note Support for PostgreSQL 9.6 and 10 was removed in GitLab 13.0 so that GitLab can benefit from PostgreSQL 11 improvements, such as partitioning. + + +Additional requirements for GitLab Geo + + +If you’re using GitLab Geo, we strongly recommend running instances installed by using the Linux package or using +validated cloud-managed instances, +as we actively develop and test based on those. +We cannot guarantee compatibility with other external databases. + +It is recommended to review the full requirements for running Geo. + +Operating system locale compatibility and silent index corruption + + +Changes to locale data in glibc means that PostgreSQL database files are not fully compatible +between different OS releases. + +To avoid index corruption, check for locale compatibility +when: + + + Moving binary PostgreSQL data between servers. + Upgrading your Linux distribution. + Updating or changing third party container images. + + +Gitaly Cluster database requirements + + +Read more in the Gitaly Cluster documentation. + +Exclusive use of GitLab databases + + +Databases created or used for GitLab, Geo, Gitaly Cluster, or other components should be for the +exclusive use of GitLab. Do not make direct changes to the database, schemas, users, or other +properties except when following procedures in the GitLab documentation or following the directions +of GitLab Support or other GitLab engineers. + + + + The main GitLab application uses three schemas: + + + The default public schema + +gitlab_partitions_static (automatically created) + +gitlab_partitions_dynamic (automatically created) + + + No other schemas should be manually created. + + + GitLab may create new schemas as part of Rails database migrations. This happens when performing +a GitLab upgrade. The GitLab database account requires access to do this. + + + GitLab creates and modifies tables during the upgrade process, and also as part of standard +operations to manage partitioned tables. + + + You should not modify the GitLab schema (for example, adding triggers or modifying tables). +Database migrations are tested against the schema definition in the GitLab codebase. GitLab +version upgrades may fail if the schema is modified. + + + +Puma settings + + +The recommended settings for Puma are determined by the infrastructure on which it’s running. +The Linux package defaults to the recommended Puma settings. Regardless of installation method, you can +tune the Puma settings: + + + If you’re using the Linux package, see Puma settings +for instructions on changing the Puma settings. + If you’re using the GitLab Helm chart, see the +webservice chart. + + +Puma workers + + +The recommended number of workers is calculated as the highest of the following: + + + 2 + A combination of CPU and memory resource availability (see how this is configured automatically for the Linux package). + + +Take for example the following scenarios: + + + + A node with 2 cores / 8 GB memory should be configured with 2 Puma workers. + + Calculated as: + + +The highest number from +2 +And +[ +the lowest number from + - number of cores: 2 + - memory limit: (8 - 1.5) = 6.5 +] + + + So, the highest from 2 and 2 is 2. + + + A node with 4 cores / 4 GB memory should be configured with 2 Puma workers. + + +The highest number from +2 +And +[ +the lowest number from + - number of cores: 4 + - memory limit: (4 - 1.5) = 2.5 +] + + + So, the highest from 2 and 2 is 2. + + + A node with 4 cores / 8 GB memory should be configured with 4 Puma workers. + + +The highest number from +2 +And +[ +the lowest number from + - number of cores: 4 + - memory limit: (8 - 1.5) = 6.5 +] + + + So, the highest from 2 and 4 is 4. + + + +You can increase the number of Puma workers, provided enough CPU and memory capacity is available. +A higher number of Puma workers usually helps to reduce the response time of the application +and increase the ability to handle parallel requests. You must perform testing to verify the +optimal settings for your infrastructure. + +Puma threads + + +The recommended number of threads is dependent on several factors, including total memory. + + + If the operating system has a maximum 2 GB of memory, the recommended number of threads is 1. +A higher value results in excess swapping, and decrease performance. + In all other cases, the recommended number of threads is 4. We don’t recommend setting this +higher, due to how Ruby MRI multi-threading +works. + + +Puma per worker maximum memory + + +By default, each Puma worker is limited to 1.2 GB of memory. +You can adjust this memory setting and should do so +if you must increase the number of Puma workers. + +Redis + + +Redis stores all user sessions and the background task queue. + +The requirements for Redis are as follows: + + + Redis 6.x or 7.x is required in GitLab 16.0 and later. However, you should upgrade to +Redis 6.2 or later as Redis 6.0 is no longer supported. + Redis Cluster mode is not supported. Redis Standalone must be used, with or without HA. + Storage requirements for Redis are minimal, about 25 kB per user on average. + +Redis eviction mode set appropriately. + + +Sidekiq + + +Sidekiq processes the background jobs with a multi-threaded process. +This process starts with the entire Rails stack (200 MB+) but it can grow over time due to memory leaks. +On a very active server (10,000 billable users) the Sidekiq process can use 1 GB+ of memory. + +Prometheus and its exporters + + +Prometheus and its related exporters are enabled by +default to enable in depth monitoring of GitLab. With default settings, these +processes consume approximately 200 MB of memory. + +If you would like to disable Prometheus and it’s exporters or read more information +about it, check the Prometheus documentation. + +GitLab Runner + + +We strongly advise against installing GitLab Runner on the same machine you plan +to install GitLab on. Depending on how you decide to configure GitLab Runner and +what tools you use to exercise your application in the CI environment, GitLab +Runner can consume significant amount of available memory. + +Memory consumption calculations, that are available above, are not valid if +you decide to run GitLab Runner and the GitLab Rails application on the same +machine. + +It’s also not safe to install everything on a single machine, because of the +security reasons, especially when you plan to use shell executor with GitLab +Runner. + +We recommend using a separate machine for each GitLab Runner, if you plan to +use the CI features. +The GitLab Runner server requirements depend on: + + + The type of executor you configured on GitLab Runner. + Resources required to run build jobs. + Job concurrency settings. + + +Because the nature of the jobs varies for each use case, you must experiment by adjusting the job concurrency to get the optimum setting. + +For reference, the SaaS runners on Linux +are configured so that a single job runs in a single instance with: + + + 1 vCPU. + 3.75 GB of RAM. + + +Supported web browsers + + + + caution With GitLab 13.0 (May 2020) we have removed official support for Internet Explorer 11. + + +GitLab supports the following web browsers: + + + Mozilla Firefox + Google Chrome + Chromium + Apple Safari + Microsoft Edge + + +For the listed web browsers, GitLab supports: + + + The current and previous major versions of browsers. + The current minor version of a supported major version. + + + + note We don’t support running GitLab with JavaScript disabled in the browser and have no plans of supporting that +in the future because we have features such as issue boards which require JavaScript extensively. + + +Security + + +After installation, be sure to read and follow guidance on maintaining a secure GitLab installation. + + + + +" +can GitLab Duo Chat help me setup a gitlab pages site?,,"1. GitLab Duo Chat + + + +GitLab Duo Chat + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated +Status: Beta + + +History + + + + + +Introduced as an Experiment for SaaS in GitLab 16.0. + Changed to Beta for SaaS in GitLab 16.6. + +Introduced as a Beta for self-managed in GitLab 16.8. + Changed from Ultimate to Premium tier in GitLab 16.9. + + + + + + +GitLab Duo Chat is your personal AI-powered assistant for boosting productivity. +It can assist various tasks of your daily work with the AI-generated content. +Here are the examples of use cases: + + + + + Feature + Use case example + Supported interfaces + Supported deployments + + + + + Ask about GitLab + I want to know how to create an issue in GitLab. + GitLab, VS Code, and Web IDE 1 + + GitLab.com + + + Ask about a specific issue + I want to summarize this issue. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Ask about a specific epic + I want to summarize this epic. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Ask about code + I want to understand how this code works. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Ask about CI/CD + I want to create a new CI/CD pipeline configuration. + GitLab, VS Code, and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Explain code in the IDE + I want to understand how this code works. + VS Code and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Refactor code in the IDE + I want to refactor this code. + VS Code and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + Write tests in the IDE + I want to write a test for this code. + VS Code and Web IDE 1 + + GitLab.com, self-managed, and GitLab Dedicated + + + + + +Footnotes: + + GitLab Duo Chat is not available in Web IDE on self-managed + + + + + + note This is a Beta feature. We’re continuously extending the capabilities and reliability of the responses. + + +Watch a demo + + + + View how to setup and use GitLab Duo Chat. + + + + + +What GitLab Duo Chat can help with + + +GitLab Duo Chat can help in a variety of areas. + +Ask about GitLab + + + +History + + + + + +Introduced for SaaS in GitLab 16.0. + + + + + + +You can ask questions about how GitLab works. Things like: + + + Explain the concept of a 'fork' in a concise manner. + Provide step-by-step instructions on how to reset a user's password. + + + + note This feature is not currently supported in self-managed instances. +See this epic for more information. + + +Ask about a specific issue + + + +History + + + + + +Introduced for SaaS in GitLab 16.0. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can ask about a specific GitLab issue. For example: + + + Generate a summary for the issue identified via this link: + When you are viewing an issue in GitLab, you can ask Generate a concise summary of the current issue. + + How can I improve the description of so that readers understand the value and problems to be solved? + + +Ask about a specific epic + + + +History + + + + + +Introduced for SaaS in GitLab 16.3. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can ask about a specific GitLab epic. For example: + + + Generate a summary for the epic identified via this link: + When you are viewing an epic in GitLab, you can ask Generate a concise summary of the opened epic. + + What are the unique use cases raised by commenters in ? + + +Ask about code + + + +History + + + + + +Introduced for SaaS in GitLab 16.1. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can also ask GitLab Duo Chat to generate code: + + + Write a Ruby function that prints 'Hello, World!' when called. + Develop a JavaScript program that simulates a two-player Tic-Tac-Toe game. Provide both game logic and user interface, if applicable. + Create a regular expression for parsing IPv4 and IPv6 addresses in Python. + Generate code for parsing a syslog log file in Java. Use regular expressions when possible, and store the results in a hash map. + Create a product-consumer example with threads and shared memory in C++. Use atomic locks when possible. + Generate Rust code for high performance gRPC calls. Provide a source code example for a server and client. + + +And you can ask GitLab Duo Chat to explain code: + + + Provide a clear explanation of the given Ruby code: def sum(a, b) a + b end. Describe what this code does and how it works. + + +Alternatively, you can use the /explain command to explain the selected code in your editor. + +For more practical examples, see the GitLab Duo examples. + +Ask about errors + + +Programming languages that require compiling the source code may throw cryptic error messages. Similarly, a script or a web application could throw a stack trace. You can ask GitLab Duo Chat by prefixing the copied error message with, for example, Please explain this error message:. Add the specific context, like the programming language. + + + Explain this error message in Java: Int and system cannot be resolved to a type + Explain when this C function would cause a segmentation fault: sqlite3_prepare_v2() + Explain what would cause this error in Python: ValueError: invalid literal for int() + Why is ""this"" undefined in VueJS? Provide common error cases, and explain how to avoid them. + How to debug a Ruby on Rails stacktrace? Share common strategies and an example exception. + + +For more practical examples, see the GitLab Duo examples. + +Ask about CI/CD + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + +You can ask GitLab Duo Chat to create a CI/CD configuration: + + + Create a .gitlab-ci.yml configuration file for testing and building a Ruby on Rails application in a GitLab CI/CD pipeline. + Create a CI/CD configuration for building and linting a Python application. + Create a CI/CD configuration to build and test Rust code. + Create a CI/CD configuration for C++. Use gcc as compiler, and cmake as build tool. + Create a CI/CD configuration for VueJS. Use npm, and add SAST security scanning. + Generate a security scanning pipeline configuration, optimized for Java. + + +You can also ask to explain specific job errors by copy-pasting the error message, prefixed with Please explain this CI/CD job error message, in the context of :: + + + Please explain this CI/CD job error message in the context of a Go project: build.sh: line 14: go command not found + + +Alternatively, you can use root cause analysis in CI/CD. + +For more practical examples, see the GitLab Duo examples. + +Explain code in the IDE + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + + + note This feature is available in VS Code and the Web IDE only. + + +/explain is a special command to explain the selected code in your editor. +You can also add additional instructions to be considered, for example: /explain the performance +See Use GitLab Duo Chat in VS Code for more information. + + + /explain focus on the algorithm + /explain the performance gains or losses using this code + +/explain the object inheritance (classes, object-oriented) + +/explain why a static variable is used here (C++) + +/explain how this function would cause a segmentation fault (C) + +/explain how concurrency works in this context (Go) + +/explain how the request reaches the client (REST API, database) + + +For more practical examples, see the GitLab Duo examples. + +Refactor code in the IDE + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + + + note This feature is available in VS Code and the Web IDE only. + + +/refactor is a special command to generate a refactoring suggestion for the selected code in your editor. +You can include additional instructions to be considered. For example: + + + Use a specific coding pattern, for example /refactor with ActiveRecord or /refactor into a class providing static functions. + Use a specific library, for example /refactor using mysql. + Use a specific function/algorithm, for example /refactor into a stringstream with multiple lines in C++. + Refactor to a different programming language, for example /refactor to TypeScript. + Focus on performance, for example /refactor improving performance. + Focus on potential vulnerabilities, for example /refactor avoiding memory leaks and exploits. + + +See Use GitLab Duo Chat in the VS Code for more information. + +For more practical examples, see the GitLab Duo examples. + +Write tests in the IDE + + + +History + + + + + +Introduced for SaaS in GitLab 16.7. + +Introduced for self-managed in GitLab 16.8. + + + + + + + + note This feature is available in VS Code and the Web IDE only. + + +/tests is a special command to generate a testing suggestion for the selected code in your editor. +You can also add additional instructions to be considered, for example: /tests using the Boost.Test framework +See Use GitLab Duo Chat in the VS Code for more information. + + + Use a specific test framework, for example /tests using the Boost.test framework (C++) or /tests using Jest (JavaScript). + Focus on extreme test cases, for example /tests focus on extreme cases, force regression testing. + Focus on performance, for example /tests focus on performance. + Focus on regressions and potential exploits, for example /tests focus on regressions and potential exploits. + + +For more practical examples, see the GitLab Duo examples. + +Ask follow up questions + + +You can ask follow-up questions to delve deeper into the topic or task at hand. +This helps you get more detailed and precise responses tailored to your specific needs, +whether it’s for further clarification, elaboration, or additional assistance. + +A follow-up to the question Write a Ruby function that prints 'Hello, World!' when called could be: + + + Can you also explain how I can call and execute this Ruby function in a typical Ruby environment, such as the command line? + + +A follow-up to the question How to start a C# project? could be: + + + Can you also please explain how to add a .gitignore and .gitlab-ci.yml file for C#? + + +For more practical examples, see the GitLab Duo examples. + +Enable GitLab Duo Chat + + +For SaaS users + + +To use this feature, at least one group you’re a member of must +have the experiment and beta features setting enabled. + +You can ask questions about resources that belong only to groups where this setting is enabled. + +Troubleshoot Chat access + + +If you have access to chat responses you did not expect, you might be part of +a group that has the Use Experiment and Beta features setting enabled. +Review the list of your groups and verify which ones you have access to. + +GitLab.com administrators can verify your access by running this snippet in the Rails console: + +u = User.find_by_username($USERNAME) +u.member_namespaces.namespace_settings_with_ai_features_enabled.with_ai_supported_plan(:ai_chat) + + +You can ask specific questions about group resources (like “summarize this issue”) when this feature is enabled. + +For self-managed users + + + + note Usage of GitLab Duo Chat is governed by the GitLab Testing Agreement. +Learn about data usage when using GitLab Duo Chat. + + +Prerequisites: + + + You have GitLab version 16.8 or later. + The Premium or Ultimate license is activated in your GitLab instance by using cloud licensing. + Your firewalls and HTTP proxy servers allow outbound connections +to cloud.gitlab.com. To use an HTTP proxy, both +gitLab _workhorse and gitLab_rails have the necessary +web proxy environment variables set. + All of the users in your instance have the latest version of their IDE extension. + You are an administrator. + + +To enable GitLab Duo Chat for your self-managed GitLab instance: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > General. + Expand AI-powered features and select Enable Experiment and Beta AI-powered features. + Select Save changes. + To make sure GitLab Duo Chat works immediately, you must +manually synchronize your subscription. + + +Manually synchronize your subscription + + +You must manually synchronize your subscription if either: + + + You have just purchased a subscription for the Premium or Ultimate tier and have upgraded to GitLab 16.8. + You already have a subscription for the Premium or Ultimate tier and have upgraded to GitLab 16.8. + + +Without the manual synchronization, it might take up to 24 hours to activate GitLab Duo Chat on your instance. + +Use GitLab Duo Chat in the GitLab UI + + + + In the lower-left corner, select the Help icon. +The new left sidebar must be enabled. + Select GitLab Duo Chat. A drawer opens on the right side of your screen. + Enter your question in the chat input box and press Enter or select Send. It may take a few seconds for the interactive AI chat to produce an answer. + You can ask a follow-up question. + If you want to ask a new question unrelated to the previous conversation, you may receive better answers if you clear the context by typing /reset into the input box and selecting Send. + + + + note Only the last 50 messages are retained in the chat history. The chat history expires 3 days after last use. + + +Delete all conversations + + +To delete all previous conversations: + + + In the text box, type /clean and select Send. + + +Use GitLab Duo Chat in the Web IDE + + + +Tier: Premium, Ultimate +Status: Experiment + + +History + + + + + Introduced in GitLab 16.6 as an Experiment + + + + + + + +To use GitLab Duo Chat in the Web IDE on GitLab: + + + Open the Web IDE: + + On the left sidebar, select Search or go to and find your project. + Select a file. Then in the upper right, select Edit > Open in Web IDE. + + + Then open Chat by using one of the following methods: + + On the left sidebar, select GitLab Duo Chat. + In the file that you have open in the editor, select some code. + + Right-click and select GitLab Duo Chat. + Select Explain selected code or Generate Tests. + + + Use the keyboard shortcut: ALT+d (on Windows and Linux) or Option+d (on Mac) + + + In the message box, enter your question and press Enter or select Send. + + +If you have selected code in the editor, this selection is sent along with your question to the AI. This way you can ask questions about this code selection. For instance, Could you simplify this?. + + + note GitLab Duo Chat is not available in the Web IDE on self-managed. + + +Use GitLab Duo Chat in VS Code + + + +Tier: Premium, Ultimate +Status: Experiment + + +History + + + + + Introduced in GitLab 16.6 as an Experiment. + + + + + + +To use GitLab Duo Chat in GitLab Workflow extension for VS Code: + + + Install and set up the Workflow extension for VS Code: + + In VS Code, download and Install the GitLab Workflow extension for VS Code. + Configure the GitLab Workflow extension. + + + In VS Code, open a file. The file does not need to be a file in a Git repository. + Open Chat by using one of the following methods: + + On the left sidebar, select GitLab Duo Chat. + In the file that you have open in the editor, select some code. + + Right-click and select GitLab Duo Chat. + Select Explain selected code or Generate Tests. + + + Use the keyboard shortcut: ALT+d (on Windows and Linux) or Option+d (on Mac) + + + In the message box, enter your question and press Enter or select Send. + + +If you have selected code in the editor, this selection is sent along with your question to the AI. This way you can ask questions about this code selection. For instance, Could you simplify this?. + +Perform standard task in the IDE from the context menu or by using slash commands + + +Get code explained, code refactored or get tests generated for code. To do so: + + + Select code in your editor in VS Code or in the Web IDE. + Type one the following slash commands into the chat field: /explain, /refactor or /tests. Alternatively, use the context menu to perform these tasks. + + +When you use one of the slash commands you can also add additional instructions to be considered, for example: /tests using the Boost.Test framework + +Disable Chat in VS Code + + +To disable GitLab Duo Chat in VS Code: + + + Go to Settings > Extensions > GitLab Workflow (GitLab VS Code Extension). + Clear the Enable GitLab Duo Chat assistant checkbox. + + +Give feedback + + +Your feedback is important to us as we continually enhance your GitLab Duo Chat experience. +Leaving feedback helps us customize the Chat for your needs and improve its performance for everyone. + +To give feedback about a specific response, use the feedback buttons in the response message. +Or, you can add a comment in the feedback issue. + + +2. GitLab Duo examples + + + +GitLab Duo examples + + +The following use cases describe practical examples with GitLab Duo. +Learn how to start with software development and refactor existing source code. +Dive into debugging problems with root cause analysis, solve security vulnerabilities, +and use all stages of the DevSecOps lifecycle. + +Use GitLab Duo to solve development challenges + + +Start with a C# application + + +In this example, open your C# application and start to explore how to use +GitLab Duo AI-powered features for more efficiency. + + +Watch these steps in action in GitLab Duo Coffee Chat: Get started with C# + + +The challenge is to create a CLI tool for querying the GitLab REST API. + + + + Ask GitLab Duo Chat how to start a new C# project and learn how to use the dotNET CLI: + + +How can I get started creating an empty C# console application in VSCode? + + + + Use Code Suggestions to generate a REST API client with a new code comment: + + +// Connect to a REST API and print the response + + + + The generated source code might need an explanation: Use the code task /explain +to get an insight how the REST API calls work. + + + +After successfully generating the source code from a Code Suggestions comment, +CI/CD configuration is needed. + + + + Chat can help with best practices for a .gitignore file for C#: + + +Please show a .gitignore and .gitlab-ci.yml configuration for a C# project. + + + + If your CI/CD job fails, Root Cause Analysis +can help understand the problem. Alternatively, you can copy the error message into +GitLab Duo Chat, and ask for help: + + +Please explain the CI/CD error: The current .NET SDK does not support targeting +.NET 8.0 + + + + To create tests later, ask GitLab Duo to use the code task /refactor to refactor +the selected code into a function. + + + Chat can also explain programming language specific keywords and functions, or C# +compiler errors. + + +Can you explain async and await in C# with practical examples? + +explain error CS0122: 'Program' is inaccessible due to its protection level + + + + Generate tests by using the /tests code task. + + + +The next question is where to put the generated tests in C# solutions. +As a beginner, you might not know that the application and test projects need to exist on the same solutions level to avoid import problems. + + + + GitLab Duo Chat can help by asking and refining the prompt questions: + + +In C# and VS Code, how can I add a reference to a project from a test project? + +Please provide the XML configuration which I can add to a C# .csproj file to add a +reference to another project in the existing solution? + + + + Sometimes, you must refine the prompt to get better results. The prompt +/refactor into the public class creates a proposal for code that can be accessed +from the test project later. + + +/refactor into the public class + + + + You can also use the /refactor code task to ask Chat how to execute tests in the +.gitlab-ci.yml file. + + +/refactor add a job to run tests (the test project) + + + + +Resources: + + + Project with source code + + +Refactor a C++ application with SQLite + + +In this example, existing source code with a single main function exists. It repeats code, and cannot be tested. + + +Watch these steps in action in GitLab Duo Coffee Chat: C++, SQLite and CMake + + +Refactoring the source code into reusable and testable functions is a great first step. + + + Open VS Code or the Web IDE with GitLab Duo enabled. + + Select the source code, and ask GitLab Duo Chat to refactor it into functions, using a refined prompt: + + +/refactor into functions + + + This refactoring step might not work for the entire selected source code. + + + Split the refactoring strategy into functional blocks. +For example, iterate on all insert, update, and delete operations in the database. + + + The next step is to generate tests for the newly created functions. Select the source code again. +You can use the code task /tests with specific prompt instructions for the test framework: + + +/tests using the CTest test framework + + + + If your application uses the Boost.Test framework instead, refine the prompt: + + +/tests using the Boost.Test framework + + + + +Resources: + + + Project with source code + + +Refactor C++ functions into object-oriented code + + +In this example, existing source code has been wrapped into functions. +To support more database types in the future, the code needs to be refactored into classes and object inheritance. + + +Watch these steps in action in GitLab Duo Coffee Chat: Refactor C++ functions into OOP classes + + +Start working on the class + + + + + Ask GitLab Duo Chat how to implement an object-oriented pattern for a base database class and inherit it in a SQLite class: + + +Explain a generic database implementation using a base class, and SQLite specific class using C++. Provide source examples and steps to follow. + + + + The learning curve includes asking GitLab Duo Chat about pure virtual functions and virtual function overrides in the implementation class. + + +What is a pure virtual function, and what is required for the developer inheriting from that class? + + + + Code tasks can help refactoring the code. Select the functions in the C++ header file, and use a refined prompt: + + +/refactor into class with public functions, and private path/db attributes. Inherit from the base class DB + +/refactor into a base class with pure virtual functions called DB. Remote the SQLite specific parts. + + + + GitLab Duo Chat also guides with constructor overloading, object initialization, and optimized memory management with shared pointers. + + +How to add a function implementation to a class in a cpp file? + +How to pass values to class attributes through the class constructor call? + + + + +Find better answers + + + + + The following question did not provide enough context. + + +Should I use virtual override instead of just override? + + + + Instead, try to add more context to get better answers. + + +When implementing a pure virtual function in an inherited class, should I use virtual function override, or just function override? Context is C++. + + + + A relatively complex question involves how to instantiate an object from the newly created class, and call specific functions. + + +How to instantiate an object from a class in C++, call the constructor with the SQLite DB path and call the functions. Prefer pointers. + + + + The result can be helpful, but needed refinements for shared pointers and required source code headers. + + +How to instantiate an object from a class in C++, call the constructor with the SQLite DB path and call the functions. Prefer shared pointers. Explain which header includes are necessary. + + + + Code Suggestions help generate the correct syntax for std::shared_ptr pointer arithmetic and help improve the code quality. + + +// Define the SQLite path in a variable, default value database.db + +// Create a shared pointer for the SQLite class + +// Open a database connection using OpenConnection + + + + +Refactor your code + + + + + After refactoring the source code, compiler errors may occur. Ask Chat to explain them. + + +Explain the error: `db` is a private member of `SQLiteDB` + + + + A specific SQL query string should be refactored into a multi-line string for more efficient editing. + + +std::string sql = ""CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL, email TEXT NOT NULL)""; + + + + Select the source code, and use the /refactor code task: + + +/refactor into a stringstream with multiple lines + + + + You can also refactor utility functions into a class with static functions in C++ and then ask Chat how to call them. + + +/refactor into a class providing static functions + +How to call the static functions in the class? + + + + +After refactoring the source code, the foundation for more database types is built, and overall code quality improved. + +Resources: + + + Project with source code + + +Explain and resolve vulnerabilities + + +In this example, detected security vulnerabilities in C should be fixed with the help from GitLab Duo. + + +Watch the GitLab Duo Coffee Chat: Vulnerability Resolution Challenge #1 + + +This source code snippet +introduces a security vulnerability with a buffer overflow: + + strcpy(region, ""Hello GitLab Duo Vulnerability Resolution challenge""); + + printf(""Contents of region: %s\n"", region); + + +SAST security scanners can detect and report the problem. Use Vulnerability Explanation to understand the problem. +Vulnerability resolution helps to generate an MR. +If the suggested changes do not fit requirements, or would otherwise lead to problems, you can use Code Suggestions and Chat to refine. For example: + + + + Open VS Code or the Web IDE with GitLab Duo enabled, and add a comment with instructions: + + + // Avoid potential buffer overflows + + // Possible AI-generated code below + strncpy(region, ""Hello GitLab Duo Vulnerability Resolution challenge"", pagesize); + region[pagesize-1] = '\0'; + printf(""Contents of region: %s\n"", region); + + + + Delete the suggested code, and use a different comment to use an alternative method. + + + // Avoid potential buffer overflows using snprintf() + + // Possible AI-generated code below + snprintf(region, pagesize, ""Hello GitLab Duo Vulnerability Resolution challenge""); + + printf(""Contents of region: %s\n"", region); + + + + In addition, use GitLab Duo Chat to ask questions. The /refactor code task can generate different suggestions. +If you prefer a specific algorithm or function, refine the prompt: + + +/refactor using snprintf + + + + +Resources: + + + Project with source code: GitLab Duo Coffee Chat 2024-01-30 - Vulnerability Resolution Challenge + + + +Answer questions about GitLab + + +In this example, the challenge is exploring the GitLab Duo Chat Beta to solve problems. + + +Watch the recording here: GitLab Duo Coffee Chat: Solve problems with GitLab Duo Chat Beta Challenge + + + + + You can use GitLab Duo Chat to explain CI/CD errors. + + + Explain this CI/CD error: build.sh: line 14: go command not found + + + + What happens when you are impatient, and input just one or two words? + + +labels + +issue labels + + + GitLab Duo Chat asks for more context. + + + Refine your question into a full sentence, describing the problem and asking for a solution. + + +Explain labels in GitLab. Provide an example for efficient usage. + + + + +Resources: + + + Project with source code + + +Use GitLab Duo to contribute to GitLab + + +GitLab Duo usage focuses on contributing to the GitLab codebase, and how customers can contribute more efficiently. + +The GitLab codebase is large, and requires to understand sometimes complex algorithms or application specific implementations. +Review the architecture components to learn more. + +Contribute to frontend: Profile Settings + + +In this example, the challenge was to update the GitLab profile page and improve the social networks settings. + + +Watch the recording here: GitLab Duo Coffee Chat: Contribute to GitLab using Code Suggestions and Chat + + +GitLab Duo Chat can be helpful to explain and refactor code, and generate tests. +Code Suggestions help complete existing code, and can generate new functions and algorithms in Ruby, Go, or VueJS. + + + Use the /explain code task to explain selected code sections, and learn how the HAML templates work. + You can refine the code task prompts, and instead ask /explain how HAML rendering works + + + +Alternatively, you can write in the chat prompt directly, for example: + +how to populate a select in haml + + +The refactoring examples involve the following: + + + /refactor into a HAML dropdown + After inspecting the existing UI form code, refine the prompt to /refactor into a HAML dropdown with a form select + + + +GitLab Duo Chat helped with error debugging, prefixing the error message: + +please explain this error: undefined method `icon` for + + +Code generation prompts + + +The following examples provide helpful code generation +prompts for the supported languages in GitLab Duo. +Code generation prompts can be refined using multi-line comments. + +The examples are stored in the GitLab Duo Prompts project, maintained by the Developer Relations team. + +C++ code generation prompts + + +Create an application to manage distributed file nodes. + +// Create an application to manage distributed file nodes +// Provide an overview the health state of nodes +// Use OOP patterns to define the base file node +// Add specific filesystems inherited from the base file + + +Create an eBPF program which attaches to XDP kernel events to measure network traffic. +Only works on Linux kernels. + +// Create an eBPF program which attaches to XDP kernel events +// Count all packets by IP address +// Print a summary +// Include necessary headers + + + +C# code generation prompts + + +Create a medical analyzer app from different sensors, and store the data in MSSQL. + +// Create a medical analyzer app +// Collect data from different sensors +// Store data in MSSQL +// Provide methods to access the sensor data + + +Go code generation prompts + + +Create an observability application for Kubernetes which reads and prints the state of containers, pods, and services in the cluster. + +// Create a client for Kubernetes observability +// Create a function that +// Read the kubernetes configuration file from the KUBECONFIG env var +// Create kubernetes context, namespace default +// Inspect container, pod, service status and print an overview +// Import necessary packages +// Create main package + + +Java code generation prompts + + +Create a data analytics application, with different data sources for metrics. +Provide an API for data queries and aggregation. + +// Create a data analytics app +// Parse different input sources and their values +// Store the metrics in a columnar format +// Provide an API to query and aggregate data + + +JavaScript code generation prompts + + +Create a paid-time-off application for employees in ReactJS, with a date-time picker. + +// Create a Paid Time Off app for users +// Create a date-time picker in ReactJS +// Provide start and end options +// Show public holidays based on the selected country +// Send the request to a server API + + +PHP code generation prompts + + +Create an RSS feed fetcher for GitLab releases, allow filtering by title. + +// Create a web form to show GitLab releases +// Fetch the RSS feed from https://about.gitlab.com/atom.xml +// Provide filter options for the title + + +Python code generation prompts + + +Create a webserver using Flask to manage users using the REST API, store them in SQLite. + +# Create a Flask webserver +# Add REST API entrypoints to manage users by ID +# Implement create, update, delete functions +# User data needs to be stored in SQlite, create table if not exists +# Run the server on port 8080, support TLS +# Print required packages for requirements.txt in a comment. +# Use Python 3.10 as default + + +Ruby code generation prompts + + +Create a log parser application which stores log data in Elasticsearch. + +# Create a Ruby app as log parser +# Provide hooks to replace sensitive strings in log lines +# Format the logs and store them in Elasticsearch + + +Rust code generation prompts + + +Create an RSS feed reader app, example from the blog post Learn advanced Rust programming with a little help from AI. + + // Create a function that iterates over the source array + // and fetches the data using HTTP from the RSS feed items. + // Store the results in a new hash map. + // Print the hash map to the terminal. + + +Resources + + +Many of the use cases are available as hands-on recordings in the GitLab Duo Coffee Chat YouTube playlist. +The GitLab Duo Coffee Chat is a learning series maintained by the Developer Relations team. + +Blog resources + + + + Learning Rust with a little help from AI + Learn advanced Rust programming with a little help from AI + Learning Python with a little help from AI + Write Terraform plans faster with GitLab Duo Code Suggestions + Explore the Dragon Realm: Build a C++ adventure game with a little help from AI + GitLab uses Anthropic for smart, safe AI-assisted code generation + + + +" +what are the tradeoffs between gitlab saas vs gitlab self-managed?,,"1. Choose a GitLab subscription + + + +Choose a GitLab subscription + + +To choose the right GitLab subscription, select an offering and a tier. + +Choose a subscription + + +Choose which GitLab subscription suits your needs: + + + +GitLab SaaS: The GitLab software-as-a-service offering. +You don’t need to install anything to use GitLab SaaS, you only need to +sign up and start using GitLab straight away. + +GitLab Dedicated: A single-tenant SaaS service for highly regulated and large enterprises. + +GitLab self-managed: Install, administer, and maintain +your own GitLab instance. + + +On a GitLab self-managed instance, a GitLab subscription provides the same set of +features for all users. On GitLab SaaS, you can apply a subscription to a group +namespace. You cannot apply a subscription to a personal namespace. + + + note Subscriptions cannot be transferred between GitLab SaaS and GitLab self-managed. +A new subscription must be purchased and applied as needed. + + +Choose a GitLab tier + + +Pricing is tier-based, allowing you to choose +the features that fit your budget. + +For more details, see +a comparison of self-managed features available in each tier. + +Find your subscription + + +The following chart should help you determine your subscription model. Select +the list item to go to the respective help page. + +#mermaid-1710941566425{font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:16px;fill:#000000;}#mermaid-1710941566425 .error-icon{fill:#552222;}#mermaid-1710941566425 .error-text{fill:#552222;stroke:#552222;}#mermaid-1710941566425 .edge-thickness-normal{stroke-width:2px;}#mermaid-1710941566425 .edge-thickness-thick{stroke-width:3.5px;}#mermaid-1710941566425 .edge-pattern-solid{stroke-dasharray:0;}#mermaid-1710941566425 .edge-pattern-dashed{stroke-dasharray:3;}#mermaid-1710941566425 .edge-pattern-dotted{stroke-dasharray:2;}#mermaid-1710941566425 .marker{fill:#666;stroke:#666;}#mermaid-1710941566425 .marker.cross{stroke:#666;}#mermaid-1710941566425 svg{font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:16px;}#mermaid-1710941566425 .label{font-family:""trebuchet ms"",verdana,arial,sans-serif;color:#000000;}#mermaid-1710941566425 .cluster-label text{fill:#333;}#mermaid-1710941566425 .cluster-label span,#mermaid-1710941566425 p{color:#333;}#mermaid-1710941566425 .label text,#mermaid-1710941566425 span,#mermaid-1710941566425 p{fill:#000000;color:#000000;}#mermaid-1710941566425 .node rect,#mermaid-1710941566425 .node circle,#mermaid-1710941566425 .node ellipse,#mermaid-1710941566425 .node polygon,#mermaid-1710941566425 .node path{fill:#eee;stroke:#999;stroke-width:1px;}#mermaid-1710941566425 .flowchart-label text{text-anchor:middle;}#mermaid-1710941566425 .node .label{text-align:center;}#mermaid-1710941566425 .node.clickable{cursor:pointer;}#mermaid-1710941566425 .arrowheadPath{fill:#333333;}#mermaid-1710941566425 .edgePath .path{stroke:#666;stroke-width:2.0px;}#mermaid-1710941566425 .flowchart-link{stroke:#666;fill:none;}#mermaid-1710941566425 .edgeLabel{background-color:white;text-align:center;}#mermaid-1710941566425 .edgeLabel rect{opacity:0.5;background-color:white;fill:white;}#mermaid-1710941566425 .labelBkg{background-color:rgba(255, 255, 255, 0.5);}#mermaid-1710941566425 .cluster rect{fill:hsl(0, 0%, 98.9215686275%);stroke:#707070;stroke-width:1px;}#mermaid-1710941566425 .cluster text{fill:#333;}#mermaid-1710941566425 .cluster span,#mermaid-1710941566425 p{color:#333;}#mermaid-1710941566425 div.mermaidTooltip{position:absolute;text-align:center;max-width:200px;padding:2px;font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:12px;background:hsl(-160, 0%, 93.3333333333%);border:1px solid #707070;border-radius:2px;pointer-events:none;z-index:100;}#mermaid-1710941566425 .flowchartTitleText{text-anchor:middle;font-size:18px;fill:#000000;}#mermaid-1710941566425 :root{--mermaid-font-family:""trebuchet ms"",verdana,arial,sans-serif;}Is your user account on GitLab.com?YesNo View your subscription on GitLab.com View your self-hosted subscription + +Contact Support + + + + See the tiers of GitLab Support. + +Submit a request through the Support Portal. + + +We also encourage all users to search our project trackers for known issues and existing feature requests in the GitLab project. + +These issues are the best avenue for getting updates on specific product plans and for communicating directly with the relevant GitLab team members. + + +2. GitLab Runner + + + +GitLab Runner + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed + +GitLab Runner is an application that works with GitLab CI/CD to run jobs in a pipeline. + +Use GitLab.com SaaS runners + + +If you use GitLab.com, you can run your CI/CD jobs on SaaS runners +hosted by GitLab. +These runners are managed by GitLab and fully integrated with GitLab.com. +By default these runners are enabled for all projects. +You can disable the runners if you have +the Owner role for the project. + +Use self-managed runners + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + +Alternatively, you can install GitLab Runner and register your own runners on GitLab.com or +on your own instance. To use self-managed runners, you install GitLab Runner on infrastructure +that you own or manage. + +Scale a fleet of runners + + +When your organization scales to having a fleet of runners, you +should plan for how you will monitor and adjust performance for these runners. + +GitLab Runner versions + + +For compatibility reasons, the GitLab Runner major.minor version +should stay in sync with the GitLab major and minor version. Older runners may still work +with newer GitLab versions, and vice versa. However, features may not be available or work properly +if a version difference exists. + +Backward compatibility is guaranteed between minor version updates. However, sometimes minor +version updates of GitLab can introduce new features that require GitLab Runner to be on the same minor +version. + + + note GitLab Runner 15.0 introduced a change to the +registration API request format. It prevents the GitLab Runner from communicating with GitLab versions lower than 14.8. +You must use a Runner version that is appropriate for the GitLab version, or upgrade the GitLab application. + + +If you host your own runners but host your repositories on GitLab.com, +keep GitLab Runner updated to the latest version, as GitLab.com is +updated continuously. + +Runner registration + + +After you install the application, you register +individual runners. Runners are the agents that run the CI/CD jobs that come from GitLab. + +When you register a runner, you are setting up communication between your +GitLab instance and the machine where GitLab Runner is installed. + +Runners usually process jobs on the same machine where you installed GitLab Runner. +However, you can also have a runner process jobs in a container, +in a Kubernetes cluster, or in auto-scaled instances in the cloud. + +Executors + + +When you register a runner, you must choose an executor. + +An executor determines the environment each job runs in. + +For example: + + + If you want your CI/CD job to run PowerShell commands, you might install GitLab +Runner on a Windows server and then register a runner that uses the shell executor. + If you want your CI/CD job to run commands in a custom Docker container, +you might install GitLab Runner on a Linux server and register a runner that uses +the Docker executor. + + +These are only a few of the possible configurations. You can install GitLab Runner +on a virtual machine and have it use another virtual machine as an executor. + +When you install GitLab Runner in a Docker container and choose the +Docker executor +to run your jobs, it’s sometimes referred to as a “Docker-in-Docker” configuration. + +Who has access to runners in the GitLab UI + + +Before you register a runner, you should determine if everyone in GitLab +should have access to it, or if you want to limit it to a specific GitLab group or project. + +There are three types of runners, based on who you want to have access: + + + +Shared runners are for use by all projects + +Group runners are for all projects and subgroups in a group + +Project runners are for individual projects + + +The scope of a runner is defined during the registration. +This is how the runner knows which projects it’s available for. + +Tags + + +When you register a runner, you can add tags to it. + +When a CI/CD job runs, it knows which runner to use by looking at the assigned tags. +Tags are the only way to filter the list of available runners for a job. + +For example, if a runner has the ruby tag, you would add this code to +your project’s .gitlab-ci.yml file: + +job: + tags: + - ruby + + +When the job runs, it uses the runner with the ruby tag. + +Configuring runners + + +You can configure +the runner by editing the config.toml file. This is a file that is installed during the runner installation process. + +In this file you can edit settings for a specific runner, or for all runners. + +You can specify settings like logging and cache. You can set concurrency, +memory, CPU limits, and more. + +Monitoring runners + + +You can use Prometheus to monitor your runners. +You can view things like the number of currently-running jobs and how +much CPU your runners are using. + +Use a runner to run jobs + + +After a runner is configured and available for your project, your +CI/CD jobs can use the runner. + +Features + + +GitLab Runner has the following features. + + + Run multiple jobs concurrently. + Use multiple tokens with multiple servers (even per-project). + Limit the number of concurrent jobs per-token. + Jobs can be run: + + Locally. + Using Docker containers. + Using Docker containers and executing job over SSH. + Using Docker containers with autoscaling on different clouds and virtualization hypervisors. + Connecting to a remote SSH server. + + + Is written in Go and distributed as single binary without any other requirements. + Supports Bash, PowerShell Core, and Windows PowerShell. + Works on GNU/Linux, macOS, and Windows (pretty much anywhere you can run Docker). + Allows customization of the job running environment. + Automatic configuration reload without restart. + Easy to use setup with support for Docker, Docker-SSH, Parallels, or SSH running environments. + Enables caching of Docker containers. + Easy installation as a service for GNU/Linux, macOS, and Windows. + Embedded Prometheus metrics HTTP server. + Referee workers to monitor and pass Prometheus metrics and other job-specific data to GitLab. + + +Runner execution flow + + +This diagram shows how runners are registered and how jobs are requested and handled. It also shows which actions use registration, authentication, and job tokens. + +ExecutorGitLabRunnerGitLabExecutorGitLabRunnerGitLab#mermaid-1710941569012{font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:16px;fill:#000000;}#mermaid-1710941569012 .error-icon{fill:#552222;}#mermaid-1710941569012 .error-text{fill:#552222;stroke:#552222;}#mermaid-1710941569012 .edge-thickness-normal{stroke-width:2px;}#mermaid-1710941569012 .edge-thickness-thick{stroke-width:3.5px;}#mermaid-1710941569012 .edge-pattern-solid{stroke-dasharray:0;}#mermaid-1710941569012 .edge-pattern-dashed{stroke-dasharray:3;}#mermaid-1710941569012 .edge-pattern-dotted{stroke-dasharray:2;}#mermaid-1710941569012 .marker{fill:#666;stroke:#666;}#mermaid-1710941569012 .marker.cross{stroke:#666;}#mermaid-1710941569012 svg{font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:16px;}#mermaid-1710941569012 .actor{stroke:hsl(0, 0%, 83%);fill:#eee;}#mermaid-1710941569012 text.actor>tspan{fill:#333;stroke:none;}#mermaid-1710941569012 .actor-line{stroke:#666;}#mermaid-1710941569012 .messageLine0{stroke-width:1.5;stroke-dasharray:none;stroke:#333;}#mermaid-1710941569012 .messageLine1{stroke-width:1.5;stroke-dasharray:2,2;stroke:#333;}#mermaid-1710941569012 #arrowhead path{fill:#333;stroke:#333;}#mermaid-1710941569012 .sequenceNumber{fill:white;}#mermaid-1710941569012 #sequencenumber{fill:#333;}#mermaid-1710941569012 #crosshead path{fill:#333;stroke:#333;}#mermaid-1710941569012 .messageText{fill:#333;stroke:none;}#mermaid-1710941569012 .labelBox{stroke:hsl(0, 0%, 83%);fill:#eee;}#mermaid-1710941569012 .labelText,#mermaid-1710941569012 .labelText>tspan{fill:#333;stroke:none;}#mermaid-1710941569012 .loopText,#mermaid-1710941569012 .loopText>tspan{fill:#333;stroke:none;}#mermaid-1710941569012 .loopLine{stroke-width:2px;stroke-dasharray:2,2;stroke:hsl(0, 0%, 83%);fill:hsl(0, 0%, 83%);}#mermaid-1710941569012 .note{stroke:#999;fill:#666;}#mermaid-1710941569012 .noteText,#mermaid-1710941569012 .noteText>tspan{fill:#fff;stroke:none;}#mermaid-1710941569012 .activation0{fill:#f4f4f4;stroke:#666;}#mermaid-1710941569012 .activation1{fill:#f4f4f4;stroke:#666;}#mermaid-1710941569012 .activation2{fill:#f4f4f4;stroke:#666;}#mermaid-1710941569012 .actorPopupMenu{position:absolute;}#mermaid-1710941569012 .actorPopupMenuPanel{position:absolute;fill:#eee;box-shadow:0px 8px 16px 0px rgba(0,0,0,0.2);filter:drop-shadow(3px 5px 2px rgb(0 0 0 / 0.4));}#mermaid-1710941569012 .actor-man line{stroke:hsl(0, 0%, 83%);fill:#eee;}#mermaid-1710941569012 .actor-man circle,#mermaid-1710941569012 line{stroke:hsl(0, 0%, 83%);fill:#eee;stroke-width:2px;}#mermaid-1710941569012 :root{--mermaid-font-family:""trebuchet ms"",verdana,arial,sans-serif;}opt[registration]loop[job requesting and handling]POST /api/v4/runners with registration_tokenRegistered with runner_tokenPOST /api/v4/jobs/request with runner_tokenjob payload with job_tokenJob payloadclone sources with job_tokendownload artifacts with job_tokenreturn job output and statusupdating job output and status with job_token + +Glossary + + +This glossary provides definitions for terms related to GitLab Runner. + + + +GitLab Runner: The application that you install that executes GitLab CI jobs on a target computing platform. + +runner configuration: A single [[runner]] entry in the config.toml that displays as a runner in the UI. + +runner manager: The process that reads the config.toml and runs all the runner configurations concurrently. + +runner: The process that executes the job on a selected machine. +Depending on the type of executor, this machine could be local to the runner manager (shell or docker executor) or a remote machine created by an autoscaler (docker-autoscaler or kubernetes). + +machine: A virtual machine (VM) or pod that the runner operates in. +GitLab Runner automatically generates a unique, persistent machine ID so that when multiple machines are given the same runner configuration, jobs can be routed separately but the runner configurations are grouped in the UI. + + +See also the official GitLab Word List and the GitLab Architecture entry for GitLab Runner. + +Troubleshooting + + +Learn how to troubleshoot common issues. + +Contributing + + +Contributions are welcome. See CONTRIBUTING.md +and the development documentation for details. + +If you’re a reviewer of GitLab Runner project, take a moment to read the +Reviewing GitLab Runner document. + +You can also review the release process for the GitLab Runner project. + +Changelog + + +See the CHANGELOG to view recent changes. + +License + + +This code is distributed under the MIT license. View the LICENSE file. + + +" +how do i set up saml for my gitlab instance,,"1. SAML SSO for self-managed GitLab instances + + + +SAML SSO for self-managed GitLab instances + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + +This page describes how to set up instance-wide SAML single sign on (SSO) for +self-managed GitLab instances. + +You can configure GitLab to act as a SAML service provider (SP). This allows +GitLab to consume assertions from a SAML identity provider (IdP), such as +Okta, to authenticate users. + +To set up SAML on GitLab.com, see SAML SSO for GitLab.com groups. + +For more information on: + + + OmniAuth provider settings, see the OmniAuth documentation. + Commonly-used terms, see the glossary. + + +Configure SAML support in GitLab + + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + Make sure GitLab is configured with HTTPS. + Configure the common settings +to add saml as a single sign-on provider. This enables Just-In-Time +account provisioning for users who do not have an existing GitLab account. + + To allow your users to use SAML to sign up without having to manually create +an account first, edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['omniauth_allow_single_sign_on'] = ['saml'] +gitlab_rails['omniauth_block_auto_created_users'] = false + + + + Optional. You should automatically link a first-time SAML sign-in with existing GitLab users if their +email addresses match. To do this, add the following setting in /etc/gitlab/gitlab.rb: + + +gitlab_rails['omniauth_auto_link_saml_user'] = true + + + Only the GitLab account’s primary email address is matched against the email in the SAML response. + + Alternatively, a user can manually link their SAML identity to an existing GitLab +account by enabling OmniAuth for an existing user. + + + Configure the following attributes so your SAML users cannot change them: + + + +NameID. + +Email when used with omniauth_auto_link_saml_user. + + + If users can change these attributes, they can sign in as other authorized users. +See your SAML IdP documentation for information on how to make these attributes +unchangeable. + + + Edit /etc/gitlab/gitlab.rb and add the provider configuration: + + +gitlab_rails['omniauth_providers'] = [ + { + name: ""saml"", + label: ""Provider name"", # optional label for login button, defaults to ""Saml"" + args: { + assertion_consumer_service_url: ""https://gitlab.example.com/users/auth/saml/callback"", + idp_cert_fingerprint: ""43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8"", + idp_sso_target_url: ""https://login.example.com/idp"", + issuer: ""https://gitlab.example.com"", + name_identifier_format: ""urn:oasis:names:tc:SAML:2.0:nameid-format:persistent"" + } + } +] + + + Where: + + + +assertion_consumer_service_url: The GitLab HTTPS endpoint +(append /users/auth/saml/callback to the HTTPS URL of your GitLab installation). + +idp_cert_fingerprint: Your IdP value. It must be a SHA1 fingerprint. +For more information on these values, see the +OmniAuth SAML documentation. +For more information on other configuration settings, see +configuring SAML on your IdP. + +idp_sso_target_url: Your IdP value. + +issuer: Change to a unique name, which identifies the application to the IdP. + +name_identifier_format: Your IdP value. + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + Make sure GitLab is configured with HTTPS. + Configure the common settings +to add saml as a single sign-on provider. This enables Just-In-Time +account provisioning for users who do not have an existing GitLab account. + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + To allow your users to use SAML to sign up without having to manually create +an account first, edit gitlab_values.yaml: + + +global: + appConfig: + omniauth: + enabled: true + allowSingleSignOn: ['saml'] + blockAutoCreatedUsers: false + + + + Optional. You can automatically link SAML users with existing GitLab users if their +email addresses match by adding the following setting in gitlab_values.yaml: + + +global: + appConfig: + omniauth: + autoLinkSamlUser: true + + + Alternatively, a user can manually link their SAML identity to an existing GitLab +account by enabling OmniAuth for an existing user. + + + Configure the following attributes so your SAML users cannot change them: + + + +NameID. + +Email when used with omniauth_auto_link_saml_user. + + + If users can change these attributes, they can sign in as other authorized users. +See your SAML IdP documentation for information on how to make these attributes +unchangeable. + + + Put the following content in a file named saml.yaml to be used as a +Kubernetes Secret: + + +name: 'saml' +label: 'Provider name' # optional label for login button, defaults to ""Saml"" +args: + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback' + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8' + idp_sso_target_url: 'https://login.example.com/idp' + issuer: 'https://gitlab.example.com' + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + + + Where: + + + +assertion_consumer_service_url: The GitLab HTTPS endpoint +(append /users/auth/saml/callback to the HTTPS URL of your GitLab installation). + +idp_cert_fingerprint: Your IdP value. It must be a SHA1 fingerprint. +For more information on these values, see the +OmniAuth SAML documentation. +For more information on other configuration settings, see +configuring SAML on your IdP. + +idp_sso_target_url: Your IdP value. + +issuer: Change to a unique name, which identifies the application to the IdP. + +name_identifier_format: Your IdP value. + + + + Create the Kubernetes Secret: + + +kubectl create secret generic -n gitlab-saml --from-file=provider=saml.yaml + + + + Edit gitlab_values.yaml and add the provider configuration: + + +global: + appConfig: + omniauth: + providers: + - secret: gitlab-saml + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + Make sure GitLab is configured with HTTPS. + Configure the common settings +to add saml as a single sign-on provider. This enables Just-In-Time +account provisioning for users who do not have an existing GitLab account. + + To allow your users to use SAML to sign up without having to manually create +an account first, edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_allow_single_sign_on'] = ['saml'] + gitlab_rails['omniauth_block_auto_created_users'] = false + + + + Optional. You can automatically link SAML users with existing GitLab users if their +email addresses match by adding the following setting in docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_auto_link_saml_user'] = true + + + Alternatively, a user can manually link their SAML identity to an existing GitLab +account by enabling OmniAuth for an existing user. + + + Configure the following attributes so your SAML users cannot change them: + + + +NameID. + +Email when used with omniauth_auto_link_saml_user. + + + If users can change these attributes, they can sign in as other authorized users. +See your SAML IdP documentation for information on how to make these attributes +unchangeable. + + + Edit docker-compose.yml and add the provider configuration: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_providers'] = [ + { + name: ""saml"", + label: ""Provider name"", # optional label for login button, defaults to ""Saml"" + args: { + assertion_consumer_service_url: ""https://gitlab.example.com/users/auth/saml/callback"", + idp_cert_fingerprint: ""43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8"", + idp_sso_target_url: ""https://login.example.com/idp"", + issuer: ""https://gitlab.example.com"", + name_identifier_format: ""urn:oasis:names:tc:SAML:2.0:nameid-format:persistent"" + } + } + ] + + + Where: + + + +assertion_consumer_service_url: The GitLab HTTPS endpoint +(append /users/auth/saml/callback to the HTTPS URL of your GitLab installation). + +idp_cert_fingerprint: Your IdP value. It must be a SHA1 fingerprint. +For more information on these values, see the +OmniAuth SAML documentation. +For more information on other configuration settings, see +configuring SAML on your IdP. + +idp_sso_target_url: Your IdP value. + +issuer: Change to a unique name, which identifies the application to the IdP. + +name_identifier_format: Your IdP value. + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + Make sure GitLab is configured with HTTPS. + Configure the common settings +to add saml as a single sign-on provider. This enables Just-In-Time +account provisioning for users who do not have an existing GitLab account. + + To allow your users to use SAML to sign up without having to manually create +an account first, edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + omniauth: + enabled: true + allow_single_sign_on: [""saml""] + block_auto_created_users: false + + + + Optional. You can automatically link SAML users with existing GitLab users if their +email addresses match by adding the following setting in /home/git/gitlab/config/gitlab.yml: + + +production: &base + omniauth: + auto_link_saml_user: true + + + Alternatively, a user can manually link their SAML identity to an existing GitLab +account by enabling OmniAuth for an existing user. + + + Configure the following attributes so your SAML users cannot change them: + + + +NameID. + +Email when used with omniauth_auto_link_saml_user. + + + If users can change these attributes, they can sign in as other authorized users. +See your SAML IdP documentation for information on how to make these attributes +unchangeable. + + + Edit /home/git/gitlab/config/gitlab.yml and add the provider configuration: + + +omniauth: + providers: + - { + name: 'saml', + label: 'Provider name', # optional label for login button, defaults to ""Saml"" + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } + + + Where: + + + +assertion_consumer_service_url: The GitLab HTTPS endpoint +(append /users/auth/saml/callback to the HTTPS URL of your GitLab installation). + +idp_cert_fingerprint: Your IdP value. It must be a SHA1 fingerprint. +For more information on these values, see the +OmniAuth SAML documentation. +For more information on other configuration settings, see +configuring SAML on your IdP. + +idp_sso_target_url: Your IdP value. + +issuer: Change to a unique name, which identifies the application to the IdP. + +name_identifier_format: Your IdP value. + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Register GitLab in your SAML IdP + + + + + Register the GitLab SP in your SAML IdP, using the application name specified in issuer. + + + To provide configuration information to the IdP, build a metadata URL for the +application. To build the metadata URL for GitLab, append users/auth/saml/metadata +to the HTTPS URL of your GitLab installation. For example: + + +https://gitlab.example.com/users/auth/saml/metadata + + + At a minimum the IdP must provide a claim containing the user’s email address +using email or mail. For more information on other available claims, see +configuring assertions. + + + On the sign in page there should now be a SAML icon below the regular sign in form. +Select the icon to begin the authentication process. If authentication is successful, +you are returned to GitLab and signed in. + + + +Configure SAML on your IdP + + +To configure a SAML application on your IdP, you need at least the following information: + + + Assertion consumer service URL. + Issuer. + +NameID. + +Email address claim. + + +For an example configuration, see set up identity providers. + +Your IdP may need additional configuration. For more information, see +additional configuration for SAML apps on your IdP. + +Configure GitLab to use multiple SAML IdPs + + + +History + + + + + +Introduced in GitLab 14.6. + + + + + + +You can configure GitLab to use multiple SAML IdPs if: + + + Each provider has a unique name set that matches a name set in args. At least +one provider must have the name saml to mitigate a +known issue in GitLab +14.6 and later. + The providers’ names are used: + + In OmniAuth configuration for properties based on the provider name. For example, +allowBypassTwoFactor, allowSingleSignOn, and syncProfileFromProvider. + For association to each existing user as an additional identity. + + + The assertion_consumer_service_url matches the provider name. + The strategy_class is explicitly set because it cannot be inferred from provider +name. + + +To set up multiple SAML IdPs: + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['omniauth_providers'] = [ + { + name: 'saml', # This must match the following name configuration parameter + label: 'Provider 1' # Differentiate the two buttons and providers in the UI + args: { + name: 'saml', # This is mandatory and must match the provider name + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', # URL must match the name of the provider + strategy_class: 'OmniAuth::Strategies::SAML', + ... # Put here all the required arguments similar to a single provider + }, + }, + { + name: 'saml_2', # This must match the following name configuration parameter + label: 'Provider 2' # Differentiate the two buttons and providers in the UI + args: { + name: 'saml_2', # This is mandatory and must match the provider name + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml_2/callback', # URL must match the name of the provider + strategy_class: 'OmniAuth::Strategies::SAML', + ... # Put here all the required arguments similar to a single provider + }, + } +] + + + To allow your users to use SAML to sign up without having to manually create an +account from either of the providers, add the following values to your configuration: + + +gitlab_rails['omniauth_allow_single_sign_on'] = ['saml', 'saml_2'] + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Put the following content in a file named saml.yaml to be used as a +Kubernetes Secret +for the first SAML provider: + + +name: 'saml' # At least one provider must be named 'saml' +label: 'Provider 1' # Differentiate the two buttons and providers in the UI +args: + name: 'saml' # This is mandatory and must match the provider name + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback' # URL must match the name of the provider + strategy_class: 'OmniAuth::Strategies::SAML' # Mandatory + ... # Put here all the required arguments similar to a single provider + + + + Put the following content in a file named saml_2.yaml to be used as a +Kubernetes Secret +for the second SAML provider: + + +name: 'saml_2' +label: 'Provider 2' # Differentiate the two buttons and providers in the UI +args: + name: 'saml_2' # This is mandatory and must match the provider name + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml_2/callback' # URL must match the name of the provider + strategy_class: 'OmniAuth::Strategies::SAML' # Mandatory + ... # Put here all the required arguments similar to a single provider + + + Optional. Set additional SAML providers by following the same steps. + + Create the Kubernetes Secrets: + + +kubectl create secret generic -n gitlab-saml \ + --from-file=saml=saml.yaml \ + --from-file=saml_2=saml_2.yaml + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + omniauth: + providers: + - secret: gitlab-saml + key: saml + - secret: gitlab-saml + key: saml_2 + + + To allow your users to use SAML to sign up without having to manually create an +account from either of the providers, add the following values to your configuration: + + +global: + appConfig: + omniauth: + allowSingleSignOn: ['saml', 'saml_2'] + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_allow_single_sign_on'] = ['saml', 'saml1'] + gitlab_rails['omniauth_providers'] = [ + { + name: 'saml', # This must match the following name configuration parameter + label: 'Provider 1' # Differentiate the two buttons and providers in the UI + args: { + name: 'saml', # This is mandatory and must match the provider name + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', # URL must match the name of the provider + strategy_class: 'OmniAuth::Strategies::SAML', + ... # Put here all the required arguments similar to a single provider + }, + }, + { + name: 'saml_2', # This must match the following name configuration parameter + label: 'Provider 2' # Differentiate the two buttons and providers in the UI + args: { + name: 'saml_2', # This is mandatory and must match the provider name + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml_2/callback', # URL must match the name of the provider + strategy_class: 'OmniAuth::Strategies::SAML', + ... # Put here all the required arguments similar to a single provider + }, + } + ] + + + To allow your users to use SAML to sign up without having to manually create an +account from either of the providers, add the following values to your configuration: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_allow_single_sign_on'] = ['saml', 'saml_2'] + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + omniauth: + providers: + - { + name: 'saml', # This must match the following name configuration parameter + label: 'Provider 1' # Differentiate the two buttons and providers in the UI + args: { + name: 'saml', # This is mandatory and must match the provider name + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', # URL must match the name of the provider + strategy_class: 'OmniAuth::Strategies::SAML', + ... # Put here all the required arguments similar to a single provider + }, + } + - { + name: 'saml_2', # This must match the following name configuration parameter + label: 'Provider 2' # Differentiate the two buttons and providers in the UI + args: { + name: 'saml_2', # This is mandatory and must match the provider name + strategy_class: 'OmniAuth::Strategies::SAML', + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml_2/callback', # URL must match the name of the provider + ... # Put here all the required arguments similar to a single provider + }, + } + + + To allow your users to use SAML to sign up without having to manually create an +account from either of the providers, add the following values to your configuration: + + +production: &base + omniauth: + allow_single_sign_on: [""saml"", ""saml_2""] + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Set up identity providers + + +GitLab support of SAML means you can sign in to GitLab through a wide range +of IdPs. + +GitLab provides the following content on setting up the Okta and Google Workspace +IdPs for guidance only. If you have any questions on configuring either of these +IdPs, contact your provider’s support. + +Set up Okta + + + + In the Okta administrator section choose Applications. + On the app screen, select Create App Integration and then select +SAML 2.0 on the next screen. + Optional. Choose and add a logo from GitLab Press. +You must crop and resize the logo. + Complete the SAML general configuration. Enter: + + +""Single sign-on URL"": Use the assertion consumer service URL. + +""Audience URI"": Use the issuer. + +NameID. + +Assertions. + + + In the feedback section, enter that you’re a customer and creating an +app for internal use. + At the top of your new app’s profile, select SAML 2.0 configuration instructions. + Note the Identity Provider Single Sign-On URL. Use this URL for the +idp_sso_target_url on your GitLab configuration file. + Before you sign out of Okta, make sure you add your user and groups if any. + + +Set up Google Workspace + + +Prerequisites: + + + Make sure you have access to a +Google Workspace Super Admin account. + + +To set up a Google Workspace: + + + + Use the following information, and follow the instructions in +Set up your own custom SAML application in Google Workspace. + + + + +   + Typical value + Description + + + + + Name of SAML App + GitLab + Other names OK. + + + ACS URL + https:///users/auth/saml/callback + Assertion Consumer Service URL. + + + GITLAB_DOMAIN + gitlab.example.com + Your GitLab instance domain. + + + Entity ID + https://gitlab.example.com + A value unique to your SAML application. Set it to the issuer in your GitLab configuration. + + + Name ID format + EMAIL + Required value. Also known as name_identifier_format. + + + Name ID + Primary email address + Your email address. Make sure someone receives content sent to that address. + + + First name + first_name + First name. Required value to communicate with GitLab. + + + Last name + last_name + Last name. Required value to communicate with GitLab. + + + + + + Set up the following SAML attribute mappings: + + + + + Google Directory attributes + App attributes + + + + + Basic information > Email + email + + + Basic Information > First name + first_name + + + Basic Information > Last name + last_name + + + + + You might use some of this information when you +configure SAML support in GitLab. + + + +When configuring the Google Workspace SAML application, record the following information: + + + + +   + Value + Description + + + + + SSO URL + Depends + Google Identity Provider details. Set to the GitLab idp_sso_target_url setting. + + + Certificate + Downloadable + Run openssl x509 -in -noout -fingerprint -sha1 to generate the SHA1 fingerprint that can be used in the idp_cert_fingerprint setting. + + + + +Google Workspace Administrator also provides the IdP metadata, Entity ID, and SHA-256 +fingerprint. However, GitLab does not need this information to connect to the +Google Workspace SAML application. + +Set up other IdPs + + +Some IdPs have documentation on how to use them as the IdP in SAML configurations. +For example: + + + Active Directory Federation Services (ADFS) + Auth0 + + +If you have any questions on configuring your IdP in a SAML configuration, contact +your provider’s support. + +Configure assertions + + + +History + + + + + Microsoft Azure/Entra ID attribute support introduced in GitLab 16.7. + + + + + + + + note The attributes are case-sensitive. + + + + + + Field + Supported default keys + + + + + Email (required) + +email, mail, http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress, http://schemas.microsoft.com/ws/2008/06/identity/claims/emailaddress + + + + Full Name + +name, http://schemas.xmlsoap.org/ws/2005/05/identity/claims/name, http://schemas.microsoft.com/ws/2008/06/identity/claims/name + + + + First Name + +first_name, firstname, firstName, http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname, http://schemas.microsoft.com/ws/2008/06/identity/claims/givenname + + + + Last Name + +last_name, lastname, lastName, http://schemas.xmlsoap.org/ws/2005/05/identity/claims/surname, http://schemas.microsoft.com/ws/2008/06/identity/claims/surname + + + + + +See attribute_statements for: + + + Custom assertion configuration examples. + How to configure custom username attributes. + + +For a full list of supported assertions, see the OmniAuth SAML gem + +Configure users based on SAML group membership + + +You can: + + + Require users to be members of a certain group. + Assign users external, administrator or auditor roles based on group membership. + + +GitLab checks these groups on each SAML sign in and updates user attributes as necessary. +This feature does not allow you to automatically add users to GitLab +Groups. + +Support for these groups depends on: + + + Your subscription. + Whether you’ve installed GitLab Enterprise Edition (EE). + The name of the SAML provider. Group +memberships are only supported by a single SAML provider named +saml. + + + + + + Group + Tier + GitLab Enterprise Edition (EE) Only? + + + + + Required + Free, Premium, Ultimate + Yes + + + External + Free, Premium, Ultimate + No + + + Admin + Free, Premium, Ultimate + Yes + + + Auditor + Premium, Ultimate + Yes + + + + +Prerequisites: + + + + You must tell GitLab where to look for group information. To do this, make sure +that your IdP server sends a specific AttributeStatement along with the regular +SAML response. For example: + + + + + Developers + Freelancers + Admins + Auditors + + + + + The name of the attribute must contain the groups that a user belongs to. +To tell GitLab where to find these groups, add a groups_attribute: +element to your SAML settings. + + + +Required groups + + +Your IdP passes group information to GitLab in the SAML response. To use this +response, configure GitLab to identify: + + + Where to look for the groups in the SAML response, using the groups_attribute setting. + Information about a group or user, using a group setting. + + +Use the required_groups setting to configure GitLab to identify which group +membership is required to sign in. + +If you do not set required_groups or leave the setting empty, anyone with proper +authentication can use the service. + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + required_groups: ['Developers', 'Freelancers', 'Admins', 'Auditors'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } +] + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Put the following content in a file named saml.yaml to be used as a +Kubernetes Secret: + + +name: 'saml' +label: 'Our SAML Provider' +groups_attribute: 'Groups' +required_groups: ['Developers', 'Freelancers', 'Admins', 'Auditors'] +args: + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback' + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8' + idp_sso_target_url: 'https://login.example.com/idp' + issuer: 'https://gitlab.example.com' + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + + + + Create the Kubernetes Secret: + + +kubectl create secret generic -n gitlab-saml --from-file=provider=saml.yaml + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + omniauth: + providers: + - secret: gitlab-saml + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + required_groups: ['Developers', 'Freelancers', 'Admins', 'Auditors'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } + ] + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + omniauth: + providers: + - { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + required_groups: ['Developers', 'Freelancers', 'Admins', 'Auditors'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +External groups + + +Your IdP passes group information to GitLab in the SAML response. To use this +response, configure GitLab to identify: + + + Where to look for the groups in the SAML response, using the groups_attribute setting. + Information about a group or user, using a group setting. + + +SAML can automatically identify a user as an +external user, based on the external_groups +setting. + +Example configuration: + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['omniauth_providers'] = [ + + { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + external_groups: ['Freelancers'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } +] + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Put the following content in a file named saml.yaml to be used as a +Kubernetes Secret: + + +name: 'saml' +label: 'Our SAML Provider' +groups_attribute: 'Groups' +external_groups: ['Freelancers'] +args: + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback' + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8' + idp_sso_target_url: 'https://login.example.com/idp' + issuer: 'https://gitlab.example.com' + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + + + + Create the Kubernetes Secret: + + +kubectl create secret generic -n gitlab-saml --from-file=provider=saml.yaml + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + omniauth: + providers: + - secret: gitlab-saml + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + external_groups: ['Freelancers'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } + ] + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + omniauth: + providers: + - { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + external_groups: ['Freelancers'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Administrator groups + + +Your IdP passes group information to GitLab in the SAML response. To use this +response, configure GitLab to identify: + + + Where to look for the groups in the SAML response, using the groups_attribute setting. + Information about a group or user, using a group setting. + + +Use the admin_groups setting to configure GitLab to identify which groups grant +the user administrator access. + +Example configuration: + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + admin_groups: ['Admins'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } +] + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Put the following content in a file named saml.yaml to be used as a +Kubernetes Secret: + + +name: 'saml' +label: 'Our SAML Provider' +groups_attribute: 'Groups' +admin_groups: ['Admins'] +args: + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback' + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8' + idp_sso_target_url: 'https://login.example.com/idp' + issuer: 'https://gitlab.example.com' + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + + + + Create the Kubernetes Secret: + + +kubectl create secret generic -n gitlab-saml --from-file=provider=saml.yaml + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + omniauth: + providers: + - secret: gitlab-saml + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + admin_groups: ['Admins'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } + ] + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + omniauth: + providers: + - { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + admin_groups: ['Admins'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Auditor groups + + + +Tier: Premium, Ultimate +Offering: Self-managed, GitLab Dedicated + + +History + + + + Introduced in GitLab 11.4. + + + + + +Your IdP passes group information to GitLab in the SAML response. To use this +response, configure GitLab to identify: + + + Where to look for the groups in the SAML response, using the groups_attribute setting. + Information about a group or user, using a group setting. + + +Use the auditor_groups setting to configure GitLab to identify which groups include +users with auditor access. + +Example configuration: + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + auditor_groups: ['Auditors'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } +] + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Put the following content in a file named saml.yaml to be used as a +Kubernetes Secret: + + +name: 'saml' +label: 'Our SAML Provider' +groups_attribute: 'Groups' +auditor_groups: ['Auditors'] +args: + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback' + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8' + idp_sso_target_url: 'https://login.example.com/idp' + issuer: 'https://gitlab.example.com' + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + + + + Create the Kubernetes Secret: + + +kubectl create secret generic -n gitlab-saml --from-file=provider=saml.yaml + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + omniauth: + providers: + - secret: gitlab-saml + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + auditor_groups: ['Auditors'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } + ] + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + omniauth: + providers: + - { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + auditor_groups: ['Auditors'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Automatically manage SAML Group Sync + + +For information on automatically managing GitLab group membership, see SAML Group Sync. + +Bypass two-factor authentication + + +To configure a SAML authentication method to count as two-factor authentication +(2FA) on a per session basis, register that method in the upstream_two_factor_authn_contexts +list. + + + + Make sure that your IdP is returning the AuthnContext. For example: + + + + + urn:oasis:names:tc:SAML:2.0:ac:classes:MediumStrongCertificateProtectedTransport + + + + + + Edit your installation configuration to register the SAML authentication method +in the upstream_two_factor_authn_contexts list. + + Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + upstream_two_factor_authn_contexts: + %w( + urn:oasis:names:tc:SAML:2.0:ac:classes:CertificateProtectedTransport + urn:oasis:names:tc:SAML:2.0:ac:classes:SecondFactorOTPSMS + urn:oasis:names:tc:SAML:2.0:ac:classes:SecondFactorIGTOKEN + ), + } + } +] + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Put the following content in a file named saml.yaml to be used as a +Kubernetes Secret: + + +name: 'saml' +label: 'Our SAML Provider' +args: + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback' + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8' + idp_sso_target_url: 'https://login.example.com/idp' + issuer: 'https://gitlab.example.com' + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + upstream_two_factor_authn_contexts: + - 'urn:oasis:names:tc:SAML:2.0:ac:classes:CertificateProtectedTransport' + - 'urn:oasis:names:tc:SAML:2.0:ac:classes:SecondFactorOTPSMS' + - 'urn:oasis:names:tc:SAML:2.0:ac:classes:SecondFactorIGTOKEN' + + + + Create the Kubernetes Secret: + + +kubectl create secret generic -n gitlab-saml --from-file=provider=saml.yaml + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + omniauth: + providers: + - secret: gitlab-saml + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + upstream_two_factor_authn_contexts: + %w( + urn:oasis:names:tc:SAML:2.0:ac:classes:CertificateProtectedTransport + urn:oasis:names:tc:SAML:2.0:ac:classes:SecondFactorOTPSMS + urn:oasis:names:tc:SAML:2.0:ac:classes:SecondFactorIGTOKEN + ) + } + } + ] + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + omniauth: + providers: + - { name: 'saml', + label: 'Our SAML Provider', + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + upstream_two_factor_authn_contexts: + [ + 'urn:oasis:names:tc:SAML:2.0:ac:classes:CertificateProtectedTransport', + 'urn:oasis:names:tc:SAML:2.0:ac:classes:SecondFactorOTPSMS', + 'urn:oasis:names:tc:SAML:2.0:ac:classes:SecondFactorIGTOKEN' + ] + } + } + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + + + +Validate response signatures + + +IdPs must sign SAML responses to ensure that the assertions are not tampered with. + +This prevents user impersonation and privilege escalation when specific group +membership is required. + +Using idp_cert_fingerprint + + +You configure the response signature validation using idp_cert_fingerprint. +An example configuration: + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } +] + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Put the following content in a file named saml.yaml to be used as a +Kubernetes Secret: + + +name: 'saml' +label: 'Our SAML Provider' +args: + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback' + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8' + idp_sso_target_url: 'https://login.example.com/idp' + issuer: 'https://gitlab.example.com' + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + + + + Create the Kubernetes Secret: + + +kubectl create secret generic -n gitlab-saml --from-file=provider=saml.yaml + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + omniauth: + providers: + - secret: gitlab-saml + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } + ] + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + omniauth: + providers: + - { name: 'saml', + label: 'Our SAML Provider', + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Using idp_cert + + +If your IdP does not support configuring this using idp_cert_fingerprint, you +can instead configure GitLab directly using idp_cert. +An example configuration: + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert: '-----BEGIN CERTIFICATE----- + + -----END CERTIFICATE-----', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } +] + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Put the following content in a file named saml.yaml to be used as a +Kubernetes Secret: + + +name: 'saml' +label: 'Our SAML Provider' +args: + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback' + idp_cert: | + -----BEGIN CERTIFICATE----- + + -----END CERTIFICATE----- + idp_sso_target_url: 'https://login.example.com/idp' + issuer: 'https://gitlab.example.com' + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + + + + Create the Kubernetes Secret: + + +kubectl create secret generic -n gitlab-saml --from-file=provider=saml.yaml + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + omniauth: + providers: + - secret: gitlab-saml + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert: '-----BEGIN CERTIFICATE----- + + -----END CERTIFICATE-----', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } + ] + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + omniauth: + providers: + - { name: 'saml', + label: 'Our SAML Provider', + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert: '-----BEGIN CERTIFICATE----- + + -----END CERTIFICATE-----', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + } + } + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +If you have configured the response signature validation incorrectly, you might see +error messages such as: + + + A key validation error. + Digest mismatch. + Fingerprint mismatch. + + +For more information on solving these errors, see the troubleshooting SAML guide. + +Customize SAML settings + + +Redirect users to SAML server for authentication + + +You can add the auto_sign_in_with_provider setting to your GitLab configuration +to automatically redirect you to your SAML server for authentication. This removes +the requirement to select an element before actually signing in. + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['omniauth_auto_sign_in_with_provider'] = 'saml' + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + omniauth: + autoSignInWithProvider: 'saml' + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_auto_sign_in_with_provider'] = 'saml' + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + omniauth: + auto_sign_in_with_provider: 'saml' + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Every sign in attempt redirects to the SAML server, so you cannot sign in using +local credentials. Make sure at least one of the SAML users has administrator access. + + + note To bypass the auto sign-in setting, append ?auto_sign_in=false in the sign in +URL, for example: https://gitlab.example.com/users/sign_in?auto_sign_in=false. + + +Map SAML response attribute names + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed, GitLab Dedicated + +You can use attribute_statements to map attribute names in a SAML response to entries +in the OmniAuth info hash. + + + note Only use this setting to map attributes that are part of the OmniAuth info hash schema. + + +For example, if your SAMLResponse contains an Attribute called EmailAddress, +specify { email: ['EmailAddress'] } to map the Attribute to the +corresponding key in the info hash. URI-named Attributes are also supported, for example, +{ email: ['http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress'] }. + +Use this setting to tell GitLab where to look for certain attributes required +to create an account. For example, if your IdP sends the user’s email address as EmailAddress +instead of email, let GitLab know by setting it on your configuration: + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + attribute_statements: { email: ['EmailAddress'] } + } + } +] + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Put the following content in a file named saml.yaml to be used as a +Kubernetes Secret: + + +name: 'saml' +label: 'Our SAML Provider' +args: + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback' + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8' + idp_sso_target_url: 'https://login.example.com/idp' + issuer: 'https://gitlab.example.com' + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + attribute_statements: + email: ['EmailAddress'] + + + + Create the Kubernetes Secret: + + +kubectl create secret generic -n gitlab-saml --from-file=provider=saml.yaml + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + omniauth: + providers: + - secret: gitlab-saml + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + attribute_statements: { email: ['EmailAddress'] } + } + } + ] + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + omniauth: + providers: + - { name: 'saml', + label: 'Our SAML Provider', + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + attribute_statements: { email: ['EmailAddress'] } + } + } + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Set a username + + +By default, the local part of the email address in the SAML response is used to +generate the user’s GitLab username. + +Configure username or nickname in attribute_statements to specify one or more attributes that contain a user’s desired username: + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + attribute_statements: { nickname: ['username'] } + } + } +] + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Put the following content in a file named saml.yaml to be used as a +Kubernetes Secret: + + +name: 'saml' +label: 'Our SAML Provider' +args: + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback' + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8' + idp_sso_target_url: 'https://login.example.com/idp' + issuer: 'https://gitlab.example.com' + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + attribute_statements: + nickname: ['username'] + + + + Create the Kubernetes Secret: + + +kubectl create secret generic -n gitlab-saml --from-file=provider=saml.yaml + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + omniauth: + providers: + - secret: gitlab-saml + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + required_groups: ['Developers', 'Freelancers', 'Admins', 'Auditors'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + attribute_statements: { nickname: ['username'] } + } + } + ] + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + omniauth: + providers: + - { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + required_groups: ['Developers', 'Freelancers', 'Admins', 'Auditors'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + attribute_statements: { nickname: ['username'] } + } + } + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +This also sets the username attribute in your SAML Response to the username in GitLab. + +Allow for clock drift + + +The clock of the IdP may drift slightly ahead of your system clocks. +To allow for a small amount of clock drift, use allowed_clock_drift in +your settings. You must enter the parameter’s value in a number and fraction of seconds. +The value given is added to the current time at which the response is validated. + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + required_groups: ['Developers', 'Freelancers', 'Admins', 'Auditors'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + allowed_clock_drift: 1 # for one second clock drift + } + } +] + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Put the following content in a file named saml.yaml to be used as a +Kubernetes Secret: + + +name: 'saml' +label: 'Our SAML Provider' +groups_attribute: 'Groups' +required_groups: ['Developers', 'Freelancers', 'Admins', 'Auditors'] +args: + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback' + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8' + idp_sso_target_url: 'https://login.example.com/idp' + issuer: 'https://gitlab.example.com' + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + allowed_clock_drift: 1 # for one second clock drift + + + + Create the Kubernetes Secret: + + +kubectl create secret generic -n gitlab-saml --from-file=provider=saml.yaml + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + omniauth: + providers: + - secret: gitlab-saml + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + required_groups: ['Developers', 'Freelancers', 'Admins', 'Auditors'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + allowed_clock_drift: 1 # for one second clock drift + } + } + ] + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + omniauth: + providers: + - { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + required_groups: ['Developers', 'Freelancers', 'Admins', 'Auditors'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + allowed_clock_drift: 1 # for one second clock drift + } + } + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Designate a unique attribute for the uid (optional) + + +By default, the users uid is set as the NameID attribute in the SAML response. To designate +a different attribute for the uid, you can set the uid_attribute. + +Before setting the uid to a unique attribute, make sure that you have configured +the following attributes so your SAML users cannot change them: + + + +NameID. + +Email when used with omniauth_auto_link_saml_user. + + +If users can change these attributes, they can sign in as other authorized users. +See your SAML IdP documentation for information on how to make these attributes +unchangeable. +In the following example, the value of uid attribute in the SAML response is set as the uid_attribute. + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + uid_attribute: 'uid' + } + } +] + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Put the following content in a file named saml.yaml to be used as a +Kubernetes Secret: + + +name: 'saml' +label: 'Our SAML Provider' +groups_attribute: 'Groups' +required_groups: ['Developers', 'Freelancers', 'Admins', 'Auditors'] +args: + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback' + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8' + idp_sso_target_url: 'https://login.example.com/idp' + issuer: 'https://gitlab.example.com' + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + uid_attribute: 'uid' + + + + Create the Kubernetes Secret: + + +kubectl create secret generic -n gitlab-saml --from-file=provider=saml.yaml + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + omniauth: + providers: + - secret: gitlab-saml + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + required_groups: ['Developers', 'Freelancers', 'Admins', 'Auditors'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + uid_attribute: 'uid' + } + } + ] + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + omniauth: + providers: + - { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + required_groups: ['Developers', 'Freelancers', 'Admins', 'Auditors'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + uid_attribute: 'uid' + } + } + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Assertion encryption (optional) + + +GitLab requires the use of TLS encryption with SAML 2.0. Sometimes, GitLab needs +additional assertion encryption. For example, if you: + + + Terminate TLS encryption early at a load balancer. + Include sensitive details in assertions that you do not want appearing in logs. + + +Most organizations should not need additional encryption at this layer. + +Your IdP encrypts the assertion with the public certificate of GitLab. +GitLab decrypts the EncryptedAssertion with its private key. + + + note This integration uses the certificate and private_key settings for both +assertion encryption and request signing. + + +The SAML integration supports EncryptedAssertion. To encrypt your assertions, +define the private key and the public certificate of your GitLab instance in the +SAML settings. + +When you define the key and certificate, replace all line feeds in the key file with \n. +This makes the key file one long string with no line feeds. + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + required_groups: ['Developers', 'Freelancers', 'Admins', 'Auditors'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + certificate: '-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----', + private_key: '-----BEGIN PRIVATE KEY-----\n\n-----END PRIVATE KEY-----' + } + } +] + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Put the following content in a file named saml.yaml to be used as a +Kubernetes Secret: + + +name: 'saml' +label: 'Our SAML Provider' +groups_attribute: 'Groups' +required_groups: ['Developers', 'Freelancers', 'Admins', 'Auditors'] +args: + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback' + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8' + idp_sso_target_url: 'https://login.example.com/idp' + issuer: 'https://gitlab.example.com' + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + certificate: '-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----' + private_key: '-----BEGIN PRIVATE KEY-----\n\n-----END PRIVATE KEY-----' + + + + Create the Kubernetes Secret: + + +kubectl create secret generic -n gitlab-saml --from-file=provider=saml.yaml + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + omniauth: + providers: + - secret: gitlab-saml + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + required_groups: ['Developers', 'Freelancers', 'Admins', 'Auditors'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + certificate: '-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----', + private_key: '-----BEGIN PRIVATE KEY-----\n\n-----END PRIVATE KEY-----' + } + } + ] + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + omniauth: + providers: + - { name: 'saml', + label: 'Our SAML Provider', + groups_attribute: 'Groups', + required_groups: ['Developers', 'Freelancers', 'Admins', 'Auditors'], + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + certificate: '-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----', + private_key: '-----BEGIN PRIVATE KEY-----\n\n-----END PRIVATE KEY-----' + } + } + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Sign SAML authentication requests (optional) + + +You can configure GitLab to sign SAML authentication requests. This configuration +is optional because GitLab SAML requests use the SAML redirect binding. + +To implement signing: + + + Create a private key and public certificate pair for your GitLab instance to +use for SAML. + + Configure the signing settings in the security section of the configuration. +For example: + + Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + certificate: '-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----', + private_key: '-----BEGIN PRIVATE KEY-----\n\n-----END PRIVATE KEY-----', + security: { + authn_requests_signed: true, # enable signature on AuthNRequest + want_assertions_signed: true, # enable the requirement of signed assertion + metadata_signed: false, # enable signature on Metadata + signature_method: 'http://www.w3.org/2001/04/xmldsig-more#rsa-sha256', + digest_method: 'http://www.w3.org/2001/04/xmlenc#sha256', + } + } + } +] + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Put the following content in a file named saml.yaml to be used as a +Kubernetes Secret: + + +name: 'saml' +label: 'Our SAML Provider' +args: + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback' + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8' + idp_sso_target_url: 'https://login.example.com/idp' + issuer: 'https://gitlab.example.com' + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent' + certificate: '-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----' + private_key: '-----BEGIN PRIVATE KEY-----\n\n-----END PRIVATE KEY-----' + security: + authn_requests_signed: true # enable signature on AuthNRequest + want_assertions_signed: true # enable the requirement of signed assertion + metadata_signed: false # enable signature on Metadata + signature_method: 'http://www.w3.org/2001/04/xmldsig-more#rsa-sha256' + digest_method: 'http://www.w3.org/2001/04/xmlenc#sha256' + + + + Create the Kubernetes Secret: + + +kubectl create secret generic -n gitlab-saml --from-file=provider=saml.yaml + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + omniauth: + providers: + - secret: gitlab-saml + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_providers'] = [ + { name: 'saml', + label: 'Our SAML Provider', + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + certificate: '-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----', + private_key: '-----BEGIN PRIVATE KEY-----\n\n-----END PRIVATE KEY-----', + security: { + authn_requests_signed: true, # enable signature on AuthNRequest + want_assertions_signed: true, # enable the requirement of signed assertion + metadata_signed: false, # enable signature on Metadata + signature_method: 'http://www.w3.org/2001/04/xmldsig-more#rsa-sha256', + digest_method: 'http://www.w3.org/2001/04/xmlenc#sha256', + } + } + } + ] + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + omniauth: + providers: + - { name: 'saml', + label: 'Our SAML Provider', + args: { + assertion_consumer_service_url: 'https://gitlab.example.com/users/auth/saml/callback', + idp_cert_fingerprint: '43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8', + idp_sso_target_url: 'https://login.example.com/idp', + issuer: 'https://gitlab.example.com', + name_identifier_format: 'urn:oasis:names:tc:SAML:2.0:nameid-format:persistent', + certificate: '-----BEGIN CERTIFICATE-----\n\n-----END CERTIFICATE-----', + private_key: '-----BEGIN PRIVATE KEY-----\n\n-----END PRIVATE KEY-----', + security: { + authn_requests_signed: true, # enable signature on AuthNRequest + want_assertions_signed: true, # enable the requirement of signed assertion + metadata_signed: false, # enable signature on Metadata + signature_method: 'http://www.w3.org/2001/04/xmldsig-more#rsa-sha256', + digest_method: 'http://www.w3.org/2001/04/xmlenc#sha256', + } + } + } + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + + + +GitLab then: + + + Signs the request with the provided private key. + Includes the configured public x500 certificate in the metadata for your IdP +to validate the signature of the received request with. + + +For more information on this option, see the +Ruby SAML gem documentation. + +The Ruby SAML gem is used by the +OmniAuth SAML gem to implement the +client side of the SAML authentication. + + + note The SAML redirect binding is different to the SAML POST binding. In the POST binding, +signing is required to prevent intermediaries from tampering with the requests. + + +Password generation for users created through SAML + + +GitLab generates and sets passwords for users created through SAML. + +Users authenticated with SSO or SAML must not use a password for Git operations +over HTTPS. These users can instead: + + + Set up a personal, project, or group access token. + Use an OAuth credential helper. + + +Link SAML identity for an existing user + + +An administrator can configure GitLab to automatically link SAML users with existing GitLab users. +For more information, see Configure SAML support in GitLab. + +A user can manually link their SAML identity to an existing GitLab account. For more information, +see Enable OmniAuth for an existing user. + +Configure group SAML SSO on a self-managed instance + + + +Tier: Premium, Ultimate +Offering: Self-managed, GitLab Dedicated + +Use group SAML SSO if you have to allow access through multiple SAML IdPs on your +self-managed instance. + +To configure group SAML SSO: + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + Make sure GitLab is configured with HTTPS. + + Edit /etc/gitlab/gitlab.rb to enable OmniAuth and the group_saml provider: + + +gitlab_rails['omniauth_enabled'] = true +gitlab_rails['omniauth_providers'] = [{ name: 'group_saml' }] + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + Make sure GitLab is configured with HTTPS. + + Put the following content in a file named group_saml.yaml to be used as a +Kubernetes Secret: + + +name: 'group_saml' + + + + Create the Kubernetes Secret: + + +kubectl create secret generic -n gitlab-group-saml --from-file=provider=group_saml.yaml + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml to enable OmniAuth and the group_saml provider: + + +global: + appConfig: + omniauth: + enabled: true + providers: + - secret: gitlab-group-saml + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + Make sure GitLab is configured with HTTPS. + + Edit docker-compose.yml to enable OmniAuth and the group_saml provider: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['omniauth_enabled'] = true + gitlab_rails['omniauth_providers'] = [{ name: 'group_saml' }] + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + Make sure GitLab is configured with HTTPS. + + Edit /home/git/gitlab/config/gitlab.yml to enable OmniAuth and the group_saml provider: + + +production: &base + omniauth: + enabled: true + providers: + - { name: 'group_saml' } + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +As a multi-tenant solution, group SAML on a self-managed instance is limited compared +to the recommended instance-wide SAML. Use +instance-wide SAML to take advantage of: + + + +LDAP compatibility. + +LDAP Group Sync. + +Required groups. + +Administrator groups. + +Auditor groups. + + +Additional configuration for SAML apps on your IdP + + +When configuring a SAML app on the IdP, your IdP may need additional configuration, +such as the following: + + + + + Field + Value + Notes + + + + + SAML profile + Web browser SSO profile + GitLab uses SAML to sign users in through their browser. No requests are made directly to the IdP. + + + SAML request binding + HTTP Redirect + GitLab (the SP) redirects users to your IdP with a base64 encoded SAMLRequest HTTP parameter. + + + SAML response binding + HTTP POST + Specifies how the SAML token is sent by your IdP. Includes the SAMLResponse, which a user’s browser submits back to GitLab. + + + Sign SAML response + Required + Prevents tampering. + + + X.509 certificate in response + Required + Signs the response and checks the response against the provided fingerprint. + + + Fingerprint algorithm + SHA-1 + GitLab uses a SHA-1 hash of the certificate to sign the SAML Response. + + + Signature algorithm + SHA-1/SHA-256/SHA-384/SHA-512 + Determines how a response is signed. Also known as the digest method, this can be specified in the SAML response. + + + Encrypt SAML assertion + Optional + Uses TLS between your identity provider, the user’s browser, and GitLab. + + + Sign SAML assertion + Optional + Validates the integrity of a SAML assertion. When active, signs the whole response. + + + Check SAML request signature + Optional + Checks the signature on the SAML response. + + + Default RelayState + Optional + Specifies the sub-paths of the base URL that users should end up on after successfully signing in through SAML at your IdP. + + + NameID format + Persistent + See NameID format details. + + + Additional URLs + Optional + May include the issuer, identifier, or assertion consumer service URL in other fields on some providers. + + + + +For example configurations, see the notes on specific providers. + +Glossary + + + + + + Term + Description + + + + + Identity provider (IdP) + The service that manages your user identities, such as Okta or OneLogin. + + + Service provider (SP) + Consumes assertions from a SAML IdP, such as Okta, to authenticate users. You can configure GitLab as a SAML 2.0 SP. + + + Assertion + A piece of information about a user’s identity, such as their name or role. Also known as a claim or an attribute. + + + Single Sign-On (SSO) + Name of the authentication scheme. + + + Assertion consumer service URL + The callback on GitLab where users are redirected after successfully authenticating with the IdP. + + + Issuer + How GitLab identifies itself to the IdP. Also known as a “Relying party trust identifier”. + + + Certificate fingerprint + Confirms that communications over SAML are secure by checking that the server is signing communications with the correct certificate. Also known as a certificate thumbprint. + + + + +Troubleshooting + + +See our troubleshooting SAML guide. + + +2. SAML SSO for GitLab.com groups + + + +SAML SSO for GitLab.com groups + + + +Tier: Premium, Ultimate +Offering: GitLab.com + + +History + + + + + Introduced in GitLab 11.0. + + + + + + +Users can sign in to GitLab through their SAML identity provider. + +SCIM synchronizes users with the group on GitLab.com. + + + When you add or remove a user from the SCIM app, SCIM adds or removes the user +from the GitLab group. + If the user is not already a group member, the user is added to the group as part of the sign-in process. + + +You can configure SAML SSO for the top-level group only. + +Set up your identity provider + + +The SAML standard means that you can use a wide range of identity providers with GitLab. Your identity provider might have relevant documentation. It can be generic SAML documentation or specifically targeted for GitLab. + +When setting up your identity provider, use the following provider-specific documentation +to help avoid common issues and as a guide for terminology used. + +For identity providers not listed, you can refer to the instance SAML notes on configuring an identity provider +for additional guidance on information your provider may require. + +GitLab provides the following information for guidance only. +If you have any questions on configuring the SAML app, contact your provider’s support. + +If you are having issues setting up your identity provider, see the +troubleshooting documentation. + +Azure + + +To set up SSO with Azure as your identity provider: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > SAML SSO. + Note the information on this page. + + Go to Azure and follow the instructions for configuring SSO for an application. The following GitLab settings correspond to the Azure fields. + + + + + GitLab setting + Azure field + + + + + Identifier + Identifier (Entity ID) + + + Assertion consumer service URL + Reply URL (Assertion Consumer Service URL) + + + GitLab single sign-on URL + Sign on URL + + + Identity provider single sign-on URL + Login URL + + + Certificate fingerprint + Thumbprint + + + + + You should set the following attributes: + + +Unique User Identifier (Name identifier) to user.objectID. + +nameid-format to persistent. For more information, see how to manage user SAML identity. + +email to user.mail or similar. + +Additional claims to supported attributes. + + + + Make sure the identity provider is set to have provider-initiated calls +to link existing GitLab accounts. + + Optional. If you use Group Sync, customize the name of the +group claim to match the required attribute. + + + +View a demo of SCIM provisioning on Azure using SAML SSO for groups. The objectID mapping is outdated in this video. Follow the SCIM documentation instead. + +For more information, see an example configuration page. + +Google Workspace + + +To set up Google Workspace as your identity provider: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > SAML SSO. + Note the information on this page. + + Follow the instructions for setting up SSO with Google as your identity provider. The following GitLab settings correspond to the Google Workspace fields. + + + + + GitLab setting + Google Workspace field + + + + + Identifier + Entity ID + + + Assertion consumer service URL + ACS URL + + + GitLab single sign-on URL + Start URL + + + Identity provider single sign-on URL + SSO URL + + + + + Google Workspace displays a SHA256 fingerprint. To retrieve the SHA1 fingerprint +required by GitLab to configure SAML: + + Download the certificate. + + Run this command: + + +openssl x509 -noout -fingerprint -sha1 -inform pem -in ""GoogleIDPCertificate-domain.com.pem"" + + + + + Set these values: + + For Primary email: email. + For First name: first_name. + For Last name: last_name. + For Name ID format: EMAIL. + For NameID: Basic Information > Primary email. +For more information, see supported attributes. + + + Make sure the identity provider is set to have provider-initiated calls +to link existing GitLab accounts. + + +On the GitLab SAML SSO page, when you select Verify SAML Configuration, disregard +the warning that recommends setting the NameID format to persistent. + +For more information, see an example configuration page. + + +View a demo of how to configure SAML with Google Workspaces and set up Group Sync. + +Okta + + +To set up SSO with Okta as your identity provider: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > SAML SSO. + Note the information on this page. + + Follow the instructions for setting up a SAML application in Okta. + + The following GitLab settings correspond to the Okta fields. + + + + + GitLab setting + Okta field + + + + + Identifier + Audience URI + + + Assertion consumer service URL + Single sign-on URL + + + GitLab single sign-on URL + +Login page URL (under Application Login Page settings) + + + Identity provider single sign-on URL + Identity Provider Single Sign-On URL + + + + + + Under the Okta Single sign-on URL field, select the Use this for Recipient URL and Destination URL checkbox. + + Set these values: + + For Application username (NameID): Custom user.getInternalProperty(""id""). + For Name ID Format: Persistent. For more information, see manage user SAML identity. + For email: user.email or similar. + For additional Attribute Statements, see supported attributes. + + + Make sure the identity provider is set to have provider-initiated calls +to link existing GitLab accounts. + + +The Okta GitLab application available in the App Catalog only supports SCIM. Support +for SAML is proposed in issue 216173. + + +For a demo of the Okta SAML setup including SCIM, see Demo: Okta Group SAML & SCIM setup. + +For more information, see an example configuration page + +OneLogin + + +OneLogin supports its own GitLab (SaaS) application. + +To set up OneLogin as your identity provider: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > SAML SSO. + Note the information on this page. + + If you use the OneLogin generic +SAML Test Connector (Advanced), +you should use the OneLogin SAML Test Connector. The following GitLab settings correspond +to the OneLogin fields: + + + + + GitLab setting + OneLogin field + + + + + Identifier + Audience + + + Assertion consumer service URL + Recipient + + + Assertion consumer service URL + ACS (Consumer) URL + + + Assertion consumer service URL (escaped version) + ACS (Consumer) URL Validator + + + GitLab single sign-on URL + Login URL + + + Identity provider single sign-on URL + SAML 2.0 Endpoint + + + + + For NameID, use OneLogin ID. For more information, see manage user SAML identity. + Configure required and supported attributes. + Make sure the identity provider is set to have provider-initiated calls +to link existing GitLab accounts. + + +Configure assertions + + + + note The attributes are case-sensitive. + + +At minimum, you must configure the following assertions: + + + +NameID. + Email. + + +Optionally, you can pass user information to GitLab as attributes in the SAML assertion. + + + The user’s email address can be an email or mail attribute. + The username can be either a username or nickname attribute. You should specify only +one of these. + + +For more information, see the attributes available for self-managed GitLab instances. + +Use metadata + + +To configure some identity providers, you need a GitLab metadata URL. +To find this URL: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > SAML SSO. + Copy the provided GitLab metadata URL. + Follow your identity provider’s documentation and paste the metadata URL when it’s requested. + + +Check your identity provider’s documentation to see if it supports the GitLab metadata URL. + +Manage the identity provider + + +After you have set up your identity provider, you can: + + + Change the identity provider. + Change email domains. + + +Change the identity provider + + +You can change to a different identity provider. During the change process, +users cannot access any of the SAML groups. To mitigate this, you can disable +SSO enforcement. + +To change identity providers: + + + +Configure the group with the new identity provider. + Optional. If the NameID is not identical, change the NameID for users. + + +Change email domains + + +To migrate users to a new email domain, tell users to: + + + +Add their new email as the primary email to their accounts and verify it. + Optional. Remove their old email from the account. + + +If the NameID is configured with the email address, change the NameID for users. + +Configure GitLab + + + +History + + + + + Ability to set a custom role as the default membership role introduced in GitLab 16.7. + + + + + + +After you set up your identity provider to work with GitLab, you must configure GitLab to use it for authentication: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > SAML SSO. + Complete the fields: + + In the Identity provider single sign-on URL field, enter the SSO URL from your identity provider. + In the Certificate fingerprint field, enter the fingerprint for the SAML token signing certificate. + + + In the Default membership role field, select the role to assign to new users. +The default role is Guest. That role becomes the starting role of all users +added to the group: + + In GitLab 13.3 and +later, group Owners can set a default membership role other than Guest. + In GitLab 16.7 and later, group Owners can set a custom role +as the default membership role. + + + Select the Enable SAML authentication for this group checkbox. + Optional. Select: + + +Enforce SSO-only authentication for web activity for this group. + +Enforce SSO-only authentication for Git activity for this group. +For more information, see the SSO enforcement documentation. + + + Select Save changes. + + + + note The certificate fingerprint algorithm must be in SHA1. When configuring the identity provider (such as Google Workspace), use a secure signature algorithm. + + +If you are having issues configuring GitLab, see the troubleshooting documentation. + +User access and management + + + +History + + + + + SAML user provisioning introduced in GitLab 13.7. + + + + + + +After group SSO is configured and enabled, users can access the GitLab.com group through the identity provider’s dashboard. +If SCIM is configured, see user access on the SCIM page. + +When a user tries to sign in with Group SSO, GitLab attempts to find or create a user based on the following: + + + Find an existing user with a matching SAML identity. This would mean the user either had their account created by SCIM or they have previously signed in with the group’s SAML IdP. + If there is no conflicting user with the same email address, create a new account automatically. + If there is a conflicting user with the same email address, redirect the user to the sign-in page to: + + Create a new account with another email address. + Sign-in to their existing account to link the SAML identity. + + + + +Link SAML to your existing GitLab.com account + + + +History + + + + + +Remember me checkbox introduced in GitLab 15.7. + + + + + + +To link SAML to your existing GitLab.com account: + + + Sign in to your GitLab.com account. Reset your password +if necessary. + Locate and visit the GitLab single sign-on URL for the group you’re signing +in to. A group owner can find this on the group’s Settings > SAML SSO page. +If the sign-in URL is configured, users can connect to the GitLab app from the identity provider. + Optional. Select the Remember me checkbox to stay signed in to GitLab for 2 weeks. +You may still be asked to re-authenticate with your SAML provider more frequently. + Select Authorize. + Enter your credentials on the identity provider if prompted. + You are then redirected back to GitLab.com and should now have access to the group. +In the future, you can use SAML to sign in to GitLab.com. + + +If a user is already a member of the group, linking the SAML identity does not +change their role. + +On subsequent visits, you should be able to sign in to GitLab.com with SAML +or by visiting links directly. If the enforce SSO option is turned on, you +are then redirected to sign in through the identity provider. + +Sign in to GitLab.com with SAML + + + + Sign in to your identity provider. + From the list of apps, select the “GitLab.com” app. (The name is set by the administrator of the identity provider.) + You are then signed in to GitLab.com and redirected to the group. + + +Manage user SAML identity + + + +History + + + + + Update of SAML identities using the SAML API introduced in GitLab 15.5. + + + + + + +GitLab.com uses the SAML NameID to identify users. The NameID is: + + + A required field in the SAML response. + Case sensitive. + + +The NameID must: + + + Be unique to each user. + Be a persistent value that never changes, such as a randomly generated unique user ID. + Match exactly on subsequent sign-in attempts, so it should not rely on user input +that could change between upper and lower case. + + +The NameID should not be an email address or username because: + + + Email addresses and usernames are more likely to change over time. For example, +when a person’s name changes. + Email addresses are case-insensitive, which can result in users being unable to +sign in. + + +The NameID format must be Persistent, unless you are using a field, like email, that +requires a different format. You can use any format except Transient. + +Change user NameID + + +Group owners can use the SAML API to change their group members’ NameID and update their SAML identities. + +If SCIM is configured, group owners can update the SCIM identities using the SCIM API. + +Alternatively, ask the users to reconnect their SAML account. + + + Ask relevant users to unlink their account from the group. + Ask relevant users to link their account to the new SAML app. + + + + caution After users have signed into GitLab using SSO SAML, changing the NameID value +breaks the configuration and could lock users out of the GitLab group. + + +For more information on the recommended value and format for specific identity +providers, see set up your identity provider. + +Configure enterprise user settings from SAML response + + + +History + + + + + +Introduced in GitLab 13.7. + +Changed to configure only enterprise user settings in GitLab 16.7. + + + + + + +GitLab allows setting certain user attributes based on values from the SAML response. +An existing user’s attributes are updated from the SAML response values if that +user is an enterprise user of the group. + +Supported user attributes + + + + +can_create_group - true or false to indicate whether an enterprise user can create +new top-level groups. Default is true. + +projects_limit - The total number of personal projects an enterprise user can create. +A value of 0 means the user cannot create new projects in their personal +namespace. Default is 100000. + + +Example SAML response + + +You can find SAML responses in the developer tools or console of your browser, +in base64-encoded format. Use the base64 decoding tool of your choice to +convert the information to XML. An example SAML response is shown here. + + + + user.email + + + user.nickName + + + user.firstName + + + user.lastName + + + true + + + 10 + + + + +Bypass user email confirmation with verified domains + + + +History + + + + + +Introduced in GitLab 15.4. + + + + + + +By default, users provisioned with SAML or SCIM are sent a verification email to verify their identity. Instead, you can +configure GitLab with a custom domain and GitLab +automatically confirms user accounts. Users still receive an +enterprise user welcome email. Confirmation is bypassed if both of the following are true: + + + The user is provisioned with SAML or SCIM. + The user has an email address that belongs to the verified domain. + + +Block user access + + +To rescind a user’s access to the group when only SAML SSO is configured, either: + + + Remove (in order) the user from: + + The user data store on the identity provider or the list of users on the specific app. + The GitLab.com group. + + + Use Group Sync at the top-level of +your group with the default role set to minimal access +to automatically block access to all resources in the group. + + +To rescind a user’s access to the group when also using SCIM, refer to Remove access. + +Unlink accounts + + +Users can unlink SAML for a group from their profile page. This can be helpful if: + + + You no longer want a group to be able to sign you in to GitLab.com. + Your SAML NameID has changed and so GitLab can no longer find your user. + + + + caution Unlinking an account removes all roles assigned to that user in the group. +If a user re-links their account, roles need to be reassigned. + + +Groups require at least one owner. If your account is the only owner in the +group, you are not allowed to unlink the account. In that case, set up another user as a +group owner, and then you can unlink the account. + +For example, to unlink the MyOrg account: + + + On the left sidebar, select your avatar. + Select Edit profile. + On the left sidebar, select Account. + In the Service sign-in section, select Disconnect next to the connected account. + + +SSO enforcement + + + +History + + + + + +Introduced in GitLab 11.8. + +Improved in GitLab 11.11 with ongoing enforcement in the GitLab UI. + +Improved in GitLab 13.8, with an updated timeout experience. + +Improved in GitLab 13.8 with allowing group owners to not go through SSO. + +Improved in GitLab 13.11 with enforcing open SSO session to use Git if this setting is switched on. + +Improved in GitLab 14.7 to not enforce SSO checks for Git activity originating from CI/CD jobs. + +Improved in GitLab 15.5 with a flag named transparent_sso_enforcement to include transparent enforcement even when SSO enforcement is not enabled. Disabled on GitLab.com. + +Improved in GitLab 15.8 by enabling transparent SSO by default on GitLab.com. + +Generally available in GitLab 15.10. Feature flag transparent_sso_enforcement removed. + + + + + + +On GitLab.com, SSO is enforced: + + + When SAML SSO is enabled. + For users with an existing SAML identity when accessing groups and projects in the organization’s +group hierarchy. Users can view other groups and projects as well as their user settings without SSO sign in by using their GitLab.com credentials. + + +A user has a SAML identity if one or both of the following are true: + + + They have signed in to GitLab by using their GitLab group’s single sign-on URL. + They were provisioned by SCIM. + + +Users are not prompted to sign in through SSO on each visit. GitLab checks +whether a user has authenticated through SSO. If the user last signed in more +than 24 hours ago, GitLab prompts the user to sign in again through SSO. + +SSO is enforced as follows: + + + + + Project/Group visibility + Enforce SSO setting + Member with identity + Member without identity + Non-member or not signed in + + + + + Private + Off + Enforced + Not enforced + Not enforced + + + Private + On + Enforced + Enforced + Enforced + + + Public + Off + Enforced + Not enforced + Not enforced + + + Public + On + Enforced + Enforced + Not enforced + + + + +An issue exists to add a similar SSO requirement for API activity. + +SSO-only for web activity enforcement + + +When the Enforce SSO-only authentication for web activity for this group option is enabled: + + + All members must access GitLab by using their GitLab group’s single sign-on URL +to access group resources, regardless of whether they have an existing SAML +identity. + SSO is enforced when users access groups and projects in the organization’s +group hierarchy. Users can view other groups and projects without SSO sign in. + Users cannot be added as new members manually. + Users with the Owner role can use the standard sign in process to make +necessary changes to top-level group settings. + For non-members or users who are not signed in: + + SSO is not enforced when they access public group resources. + SSO is enforced when they access private group resources. + + + For items in the organization’s group hierarchy, dashboard visibility is as +follows: + + SSO is enforced when viewing your To-Do List. Your +to-do items are hidden if your SSO session has expired, and an +alert is shown. + SSO is enforced when viewing your list of assigned issues. Your issues are +hidden if your SSO session has expired. +Issue 414475 proposes to change this +behavior so that issues are visible. + SSO is not enforced when viewing lists of merge requests where you are the +assignee or your review is requested. You can see merge requests even if +your SSO session has expired. + + + + +SSO enforcement for web activity has the following effects when enabled: + + + For groups, users cannot share a project in the group outside the top-level +group, even if the project is forked. + Git activity originating from CI/CD jobs do not have the SSO check enforced. + Credentials that are not tied to regular users (for example, project and group +access tokens, and deploy keys) do not have the SSO check enforced. + Users must be signed-in through SSO before they can pull images using the +Dependency Proxy. + When the Enforce SSO-only authentication for Git and Dependency Proxy +activity for this group option is enabled, any API endpoint that involves +Git activity is under SSO enforcement. For example, creating or deleting a +branch, commit, or tag. For Git activity over SSH and HTTPS, users must +have at least one active session signed-in through SSO before they can push to or +pull from a GitLab repository. + + +When SSO for web activity is enforced, non-SSO group members do not lose access +immediately. If the user: + + + Has an active session, they can continue accessing the group for up to 24 +hours until the identity provider session times out. + Is signed out, they cannot access the group after being removed from the +identity provider. + + +Related topics + + + + SAML SSO for self-managed GitLab instances + Glossary + Blog post: The ultimate guide to enabling SAML and SSO on GitLab.com + Authentication comparison between SaaS and self-managed + Passwords for users created through integrated authentication + SAML Group Sync + + +Troubleshooting + + +If you find it difficult to match the different SAML terms between GitLab and the +identity provider: + + + Check your identity provider’s documentation. Look at their example SAML +configurations for information on the terms they use. + Check the SAML SSO for self-managed GitLab instances documentation. +The self-managed GitLab instance SAML configuration file supports more options +than the GitLab.com file. You can find information on the self-managed instance +file in the: + + External OmniAuth SAML documentation. + +ruby-saml library. + + + Compare the XML response from your provider with our +example XML used for internal testing. + + +For other troubleshooting information, see the troubleshooting SAML guide. + + +" +what are security policies?,,"1. Policies + + + +Policies + + + +Tier: Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + + Generally available in GitLab 14.4. Feature flag security_orchestration_policies_configuration removed. + + +Policies in GitLab provide security and compliance teams with a way to enforce controls globally in +their organization. Security teams can ensure: + + + Security scanners are enforced in development team pipelines with proper configuration. + That all scan jobs execute without any changes or alterations. + That proper approvals are provided on merge requests based on results from those findings. + + +Compliance teams can centrally enforce multiple approvers on all merge requests and ensure various +settings are enabled on projects in scope of organizational requirements, such as enabling or +locking merge request and repository settings. + +GitLab supports the following security policies: + + + Scan Execution Policy + Scan Result Policy + + +Security policy project + + +A security policy project (SPP) is a special type of project used only to contain policies. The +policies are stored in the .gitlab/security-policies/policy.yml YAML file. + +To enforce the policies contained in an SPP you link it to a project, subgroup, group, or multiples +of each. An SPP can contain multiple policies but they are enforced together. An +SPP enforced on a group or subgroup applies to everything below in the hierarchy, including all +subgroups and their projects. + +Policy changes made in a merge request take effect as soon as the merge request is merged. Those +that do not go through a merge request, but instead are committed directly to the default branch, may +require up to 10 minutes before the policy changes take effect. + +Policy design guidelines + + +When designing your policies, your goals should be: + + + Designing policy enforcement for minimum overhead but maximum coverage + Separation of duties + + +Policy enforcement design + + +To maximize policy coverage, link an SPP at the highest level that achieves your objectives: group +level, subgroup level, or project level. Enforcement at the highest level minimizes the number of +SPPs and therefore the management overhead. Policies cascade down from each level to a project, such that policies may be enforced from the group level, each subgroup above it, and then for any policies created at the project level itself. + +Policy inheritance of policies not only ensures maximum coverage with the minimum +number of SPPs, but also helps when implementing policy changes. For example, to test a policy change +you could copy an existing policy and enforce the modified policy first to a project, then to a +subgroup, and, if applicable, to a group. + + + note GitLab SaaS users may enforce policies against their top-level group or across subgroups, but cannot enforce policies across GitLab SaaS top-level groups. GitLab self-managed users can enforce policies across multiple top-level groups in their instance. + + +The following example illustrates two groups and their structure: + + + Alpha group contains two subgroups, each of which contains multiple projects. + Security and Compliance group contains two policies. + + +Alpha group (contains code projects) + + + +Finance (subgroup) + + Project A + Accounts receiving (subgroup) + + Project B + Project C + + + + + +Engineering (subgroup) + + Project K + Project L + Project M + + + + +Security and Compliance group (contains SPPs) + + + Security Policy Management + Security Policy Management - security policy project + + SAST policy + Secret Detection policy + + + + +Assuming no policies have already been enforced, consider the following examples: + + + If the “SAST” policy is enforced at group Alpha, it applies to its subgroups, Finance and +Engineering, and all their projects and subgroups. If the “Secret Detection” policy is enforced +also at subgroup “Accounts receiving”, both policies apply to projects B and C. However, only the +“SAST” policy applies to project A. + If the “SAST policy is enforced at subgroup “Accounts receiving”, it applies only to projects B +and C. No policy applies to project A. + If the “Secret Detection” is enforced at project K, it applies only to project K. No other +subgroups or projects have a policy apply to them. + + +Separation of duties + + +Separation of duties is vital to successfully implementing policies. Security and compliance teams +should be responsible for defining policies and working with development teams. Development teams +should not be able to disable, modify, or circumvent the policies, in any way, or for any +exceptions. The policies should be implemented to achieve the necessary compliance and security +requirements, while allowing development teams to achieve their goals. + +The role required to enforce an SPP depends on the hierarchy level at which it’s being linked: + + + + + Organization unit + Group owner + Subgroup owner + Project owner + + + + + Group + + Yes + + No + + No + + + Subgroup + + Yes + + Yes + + No + + + Project + + Yes + + Yes + + Yes + + + + +Policy implementation + + +Implementation options for SPPs differ slightly between GitLab SaaS and GitLab self-managed. The +main difference is that on GitLab SaaS it’s only possible to create subgroups. Ensuring separation +of duties requires more granular permission configuration. + +Enforce policies across subgroups and projects + + + +Tier: Ultimate +Offering: GitLab.com + +To enforce policies against subgroups and projects, create a subgroup to contain the SPPs, separate +to the subgroups containing the projects. Using separate subgroups allows for separation of duties, +with the SPP managed by the security team, and the projects’ subgroups managed by the development +team. The security team can add or change policies without intervention from the subgroups’ owners. +Subgroup and project owners cannot override policies. + +Prerequisites: + + + You must have the Owner role to link to the SPP. For details see +Separation of duties. + + +The high-level workflow for enforcing policies across multiple subgroups: + + + + Create a subgroup to contain your policies and ensure separation of duties. + + By creating a separate standalone subgroup, you can minimize the number of users who inherit +permissions. + + + In the new group or subgroup, create a new project for managing your policies, such as “Security +Policy Management”. + + This serves as the primary location of the policy editor, allowing you to create and manage +policies in the UI. + + + Create a test policy. (you can create a policy as disabled for testing.) + + Creating the policy automatically creates a new SPP under your group or subgroup. This project is +used to store your policy.yml or policy-as-code. + + Check and set project permissions in the newly-created project so that only members of the security team have the Owner role. + If additional restrictions are needed to block inherited permissions or require additional review +or approval of policy changes, you can create an additional and separate set of policies to +enforce against the first. For example, you may define a single set of individual users +responsible for approving policy changes. + In the SPP just created, create the policies required. You can use the policy editor in the Security Policy Management project you created, under the Policies tab. Or you can directly update the policies in the policy.yml file stored in the newly-created security policy project Security Policy Management - security policy project. + Link up groups, subgroups, or projects to the SPP. As a subgroup owner, or project +owner, you can visit the Policies page and create a link to the SPP. Include the full +path and the project’s name should end with “- security policy project”. For details, see +Link to a security policy project. + + +Enforce policies across groups, subgroups, and projects + + + +Tier: Ultimate +Offering: Self-managed, GitLab Dedicated + +To enforce policies against multiple groups, create a group to contain the SPPs, separate to the +groups containing the projects. Using separate groups allows for separation of duties, with the SPP +managed by the security team, and the projects’ groups managed by the development team. The security +team can add or change policies without intervention from the groups’ owners. Subgroup and project +owners cannot override policies. + +Prerequisites: + + + You must have the Owner role to link to the SPP. For details see +Separation of duties. + To support approval groups globally across your instance, enable security_policy_global_group_approvers_enabled in your GitLab instance application settings. + + +The high-level workflow for enforcing policies across multiple groups: + + + + Create a separate group to contain your policies and ensure separation of duties. + + By creating a separate standalone group, you can minimize the number of users who inherit +permissions. + + + In the new group, create a new project for managing your policies, such as “Security Policy +Management”. + + This serves as the primary location of the policy editor, allowing you to +create and manage policies in the UI. + + + Create a test policy (you can create a policy as disabled for testing). + + Creating the policy automatically creates a new SPP under your group. This project is +used to store your policy.yml or policy-as-code. + + Check and set permissions in the newly created project as desired. By default, Owners and +Maintainers are able to create, edit, and delete policies. + If additional restrictions are needed to block inherited permissions or require additional review +or approval of policy changes, you can create an additional and separate set of policies to +enforce against the first. For example, you may define a single set of individual users +responsible for approving policy changes. + Set the permissions of the SPP so that only members of the security team have the Owner role. + In the SPP just created, create the policies required. + Link up groups, subgroups, or projects to the SPP. As a group owner, subgroup owner, or project +owner, you can visit the Policies page and create a link to the SPP. Include the full path and +the project’s name should end with “- security policy project”. For details, see +Link to a security policy project. + + +Enforce policies across multiple projects + + +When linking a group or subgroup to your policies is not granular enough, it is possible to link up +to the SPP per project. This allows you to filter projects from enforcement that are not applicable. +To enforce an SPP policies at the project level, create a security policy project and link them. Use +project permissions to ensure only the security team has the Owner role in the security policy +project. + +To enforce policies against a project: + + + Create a security policy project at the same level as the target project. + In the security policy project, create the policies required. + Link the target project to the security policy project. + + +Link to a security policy project + + +To enforce the policies contained in an SPP against a project, subgroup, or group, you link them. + +Prerequisites: + + + You must have the Owner role to link to the SPP. For details, see +Separation of duties. + + +To link a project, subgroup, or group to an SPP: + + + On the left sidebar, select Search or go to and find your project, subgroup, or group. + Select Secure > Policies. + Select Edit Policy Project, then search for and select the project you would like to link +from the dropdown list. + Select Save. + + +To unlink a security policy project, follow the same steps but instead select the trash can icon in +the dialog. + +Viewing the linked security policy project + + +All users who have access to the project policy page and are not project owners instead view a +button linking out to the associated security policy project. + +Policy management + + +The Policies page displays deployed policies for all available environments. You can check a +policy’s information (for example, description or enforcement status), and create and edit deployed +policies: + + + On the left sidebar, select Search or go to and find your project. + Select Secure > Policies. + + + + +Policy editor + + + +History + + + + + +Introduced in GitLab 13.4. + + + + + + +You can use the policy editor to create, edit, and delete policies: + + + On the left sidebar, select Search or go to and find your project. + Select Secure > Policies. + + To create a new policy, select New policy which is located in the Policies page’s header. +You can then select which type of policy to create. + To edit an existing policy, select Edit policy in the selected policy drawer. + + + + +The policy editor has two modes: + + + + The visual Rule mode allows you to construct and preview policy +rules using rule blocks and related controls. + + + + + YAML mode allows you to enter a policy definition in .yaml format +and is aimed at expert users and cases that the Rule mode doesn’t +support. + + + + + +You can use both modes interchangeably and switch between them at any +time. If a YAML resource is incorrect or contains data not supported +by the Rule mode, Rule mode is automatically +disabled. If the YAML is incorrect, you must use YAML +mode to fix your policy before Rule mode is available again. + +When you finish creating or editing your policy, save and apply it by selecting the +Configure with a merge request button and then merging the resulting merge request. When you +press this button, the policy YAML is validated and any resulting errors are displayed. +Additionally, if you are a project owner and a security policy project has not been previously +associated with this project, then a new project is created and associated automatically at the same +time that the first policy merge request is created. + +Managing projects in bulk via a script + + +You can use the Vulnerability-Check Migration script to bulk create policies or associate security policy projects with development projects. For instructions and a demonstration of how to use the Vulnerability-Check Migration script, see this video. + +Troubleshooting + + + +Branch name 'update-policy-' does not follow the pattern '' + + +When you create a new security policy or change an existing policy, a new branch is automatically created with the branch name following the pattern update-policy-. For example: update-policy-1659094451. + +If you have group or instance push rules that do not allow branch name patterns that contain the text update-policy-, you will get an error that states Branch name 'update-policy-' does not follow the pattern ''. + +The workaround is to amend your group or instance push rules to allow branches following the pattern update-policy- followed by an integer timestamp. + +Troubleshooting common issues configuring security policies + + + + Confirm that scanners are properly configured and producing results for the latest branch. +Security Policies are designed to require approval when there are no results (no security report), +as this ensures that no vulnerabilities are introduced. We cannot know if there are any +vulnerabilities unless the scans enforced by the policy complete successfully and are evaluated. + For merge request approval policies, we require artifacts for each scanner defined in the policy for both the +source and target branch. To ensure merge request approval policies capture the necessary results, confirm +your scan execution is properly implemented and enforced. If using scan execution policies, +enforcing on all branches often addresses this need. + Comparison in merge request approval policies depends on a successful and completed merge base pipeline. If the merge base pipeline is skipped, merge requests with the merge base pipeline are blocked. + When running scan execution policies based on a SAST action, ensure target repositories contain +proper code files. SAST runs different analyzers +based on the types of files in the repository, +and if no supported files are found it does not run any jobs. See the +SAST CI template +for more details. + Check for any branch configuration conflicts. For example, if your policy is configured to enforce rules on +main but some projects in the scope are using master as their default branch, the policy +is not applied for the latter. You can define policies to enforce rules generically on default +branches regardless of the name used in the project or on all protected branches to address this +issue. + Merge request approval policies created at the group or subgroup level can take some time to apply to all +the merge requests in the group. + Scheduled scan execution policies run with a minimum 15 minute cadence. Learn more about the schedule rule type. + When scheduling pipelines, keep in mind that CRON scheduling is based on UTC on GitLab SaaS and is +based on your server time for self managed instances. When testing new policies, it may appear +pipelines are not running properly when in fact they are scheduled in your server’s time zone. + When enforcing scan execution policies, security policies use a bot in the target project to +trigger scheduled pipelines to ensure enforcement. When the bot is missing, it is automatically +created, and the following scheduled scan uses it. + You should not link a security policy project to a development project and to the group or +subgroup the development project belongs to at the same time. Linking this way results in approval +rules from the Scan Result Policy not being applied to merge requests in the development project. + When creating a Scan Result Policy, neither the array severity_levels nor the array +vulnerability_states in the scan_finding rule +can be left empty. For a working rule, at least one entry must exist. + When merge request approval policies are enforced on projects containing manual jobs in their pipeline, policies evaluate the completed pipeline jobs and ignore the manual jobs. When the manual jobs are run, the policy re-evaluates the MR. + + +If you are still experiencing issues, you can view recent reported bugs and raise new unreported issues. + + +2. Scan execution policies + + + +Scan execution policies + + + +Tier: Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + Group-level security policies introduced in GitLab 15.2. + Group-level security policies enabled on GitLab.com in GitLab 15.4. + Operational container scanning introduced in GitLab 15.5 + Support for custom CI variables in the Scan Execution Policies editor introduced in GitLab 16.2. + Enforcement of scan execution policies on projects with an existing GitLab CI/CD configuration introduced in GitLab 16.2 with a flag named scan_execution_policy_pipelines. Feature flag scan_execution_policy_pipelines removed in GitLab 16.5. + Overriding predefined variables in scan execution policies introduced in GitLab 16.10 with a flag named allow_restricted_variables_at_policy_level. Disabled by default. + + + + + + + + On self-managed GitLab, by default this feature is not available. To make it available, an administrator can enable the feature flag named allow_restricted_variables_at_policy_level. +On GitLab.com and GitLab Dedicated, this feature is not available. +Group, subgroup, or project owners can use scan execution policies to require that security scans +run on a specified schedule or with the project pipeline. The security scan runs with multiple +project pipelines if you define the policy at a group or subgroup level. GitLab injects the required +scans into the CI/CD pipeline as new jobs. + + +Scan execution policies are enforced for all applicable projects, even those without a GitLab +CI/CD configuration file or where AutoDevOps is disabled. Security policies create the file +implicitly so that the policies can be enforced. This ensures policies enabling execution of +secret detection, static analysis, or other scanners that do not require a build in the +project, are still able to execute and be enforced. + +GitLab appends a hyphen and a number to the job name. The number is unique per policy action to avoid name conflicts. +policy at the group level, it applies to every child project or subgroup. You cannot edit a +group-level policy from a child project or subgroup. + +This feature has some overlap with compliance framework pipelines, +as we have not unified the user experience for these two features. +For details on the similarities and differences between these features, see +Enforce scan execution. + + + note Policy jobs for scans other than DAST scans are created in the test stage of the pipeline. If you modify the default pipeline +stages, +to remove the test stage, jobs will run in the scan-policies stage instead. This stage is injected into the CI pipeline at evaluation time if it doesn’t exist. If the build stage exists, it is injected just after the build stage. If the build stage does not exist, it is injected at the beginning of the pipeline. DAST scans always run in the dast stage. If this stage does not exist, then a dast stage is injected at the end of the pipeline. + + + + + For a video walkthrough, see How to set up Security Scan Policies in GitLab. + + For an overview, see Enforcing scan execution policies on projects with no GitLab CI/CD configuration. + + +Requirements and limitations + + + + The maximum number of scan execution policies is five per security policy project. + + +Scan execution policy editor + + + + note Only group, subgroup, or project Owners have the permissions +to select Security Policy Project. + + +Once your policy is complete, save it by selecting Configure with a merge request +at the bottom of the editor. You are redirected to the merge request on the project’s +configured security policy project. If one does not link to your project, a security +policy project is automatically created. Existing policies can also be +removed from the editor interface by selecting Delete policy +at the bottom of the editor. + +Most policy changes take effect as soon as the merge request is merged. Any changes that +do not go through a merge request and are committed directly to the default branch may require up to 10 minutes +before the policy changes take effect. + + + + + note Selection of site and scanner profiles using the rule mode editor for DAST execution policies differs based on +whether the policy is being created at the project or group level. For project-level policies the rule mode editor +presents a list of profiles to choose from that are already defined in the project. For group-level policies +you are required to type in the names of the profiles to use, and to prevent pipeline errors, profiles with +matching names must exist in all of the group’s projects. + + +Scan execution policies schema + + +The YAML file with scan execution policies consists of an array of objects matching scan execution +policy schema nested under the scan_execution_policy key. You can configure a maximum of 5 +policies under the scan_execution_policy key. Any other policies configured after +the first 5 are not applied. + +When you save a new policy, GitLab validates its contents against this JSON schema. +If you’re not familiar with how to read JSON schemas, +the following sections and tables provide an alternative. + + + + + Field + Type + Required + Possible values + Description + + + + + scan_execution_policy + +array of scan execution policy + true +   + List of scan execution policies (maximum 5) + + + + +Scan execution policy schema + + + + + + Field + Type + Required + Possible values + Description + + + + + name + string + true +   + Name of the policy. Maximum of 255 characters. + + + +description (optional) + string + true +   + Description of the policy. + + + enabled + boolean + true + +true, false + + Flag to enable (true) or disable (false) the policy. + + + rules + +array of rules + true +   + List of rules that the policy applies. + + + actions + +array of actions + true +   + List of actions that the policy enforces. + + + + + +pipeline rule type + + + +History + + + + + The branch_type field was introduced in GitLab 16.1 with a flag named security_policies_branch_type. Generally available in GitLab 16.2. Feature flag removed. + The branch_exceptions field was introduced in GitLab 16.3 with a flag named security_policies_branch_exceptions. Generally available in GitLab 16.5. Feature flag removed. + + + + + + + + On self-managed GitLab, by default the branch_exceptions field is available. To hide the feature, an administrator can disable the feature flag named security_policies_branch_exceptions. +On GitLab.com and GitLab Dedicated, this feature is available. + + +This rule enforces the defined actions whenever the pipeline runs for a selected branch. + + + + + Field + Type + Required + Possible values + Description + + + + + type + string + true + pipeline + The rule’s type. + + + +branches 1 + + +array of string + + true if branch_type field does not exist + +* or the branch’s name + The branch the given policy applies to (supports wildcard). + + + +branch_type 1 + + string + true if branches field does not exist + +default, protected or all + + The types of branches the given policy applies to. + + + branch_exceptions + +array of string + + false + Names of branches + Branches to exclude from this rule. + + + + + + You must specify only one of branches or branch_type. + + + +schedule rule type + + + +History + + + + + The branch_type field was introduced in GitLab 16.1 with a flag named security_policies_branch_type. Generally available in GitLab 16.2. Feature flag removed. + The branch_exceptions field was introduced in GitLab 16.3 with a flag named security_policies_branch_exceptions. Generally available in GitLab 16.5. Feature flag removed. + + + + + + + + caution In GitLab 16.1 and earlier, you should not use direct transfer with scheduled scan execution policies. If using direct transfer, first upgrade to GitLab 16.2 and ensure security policy bots are enabled in the projects you are enforcing. + + + + On self-managed GitLab, by default the branch_exceptions field is available. To hide the feature, an administrator can disable the feature flag named security_policies_branch_exceptions. +On GitLab.com and GitLab Dedicated, this feature is available. + + +This rule schedules a scan pipeline, enforcing the defined actions on the schedule defined in the cadence field. A scheduled pipeline does not run other jobs defined in the project’s .gitlab-ci.yml file. When a project is linked to a security policy project, a security policy bot is created in the project and will become the author of any scheduled pipelines. + + + + + Field + Type + Required + Possible values + Description + + + + + type + string + true + schedule + The rule’s type. + + + +branches 1 + + +array of string + + true if either branch_type or agents fields does not exist + +* or the branch’s name + The branch the given policy applies to (supports wildcard). + + + +branch_type 1 + + string + true if either branches or agents fields does not exist + +default, protected or all + + The types of branches the given policy applies to. + + + branch_exceptions + +array of string + + false + Names of branches + Branches to exclude from this rule. + + + cadence + string + true + CRON expression (for example, 0 0 * * *) + A whitespace-separated string containing five fields that represents the scheduled time. Minimum of 15 minute intervals when used together with the branches field. + + + timezone + string + false + Time zone identifier (for example, America/New_York) + Time zone to apply to the cadence. Value must be an IANA Time Zone Database identifier. + + + +agents 1 + + object + true if either branch_type or branches fields do not exists +   + The name of the GitLab agents where Operational Container Scanning runs. The object key is the name of the Kubernetes agent configured for your project in GitLab. + + + + + + You must specify only one of branches, branch_type, or agents. + + +Scheduled scan pipelines are triggered by a security policy bot user that is a guest member of the project with elevated permissions for users of type security_policy_bot so it may carry out this task. Security policy bot users are automatically created when the security policy project is linked, and removed when the security policy project is unlinked. + +If the project does not have a security policy bot user, the bot will be automatically created, and the following scheduled scan pipeline will use it. + +GitLab supports the following types of CRON syntax for the cadence field: + + + A daily cadence of once per hour at a specified hour, for example: 0 18 * * * + + A weekly cadence of once per week on a specified day and at a specified hour, for example: 0 13 * * 0 + + + + + note Other elements of the CRON syntax may work in the cadence field if supported by the cron we are using in our implementation, however, GitLab does not officially test or support them. + + +When using the schedule rule type in conjunction with the agents field, note the following: + + + The GitLab Agent for Kubernetes checks every 30 seconds to see if there is an applicable policy. When a policy is found, the scans are executed according to the cadence defined. + The CRON expression is evaluated using the system-time of the Kubernetes-agent pod. + + +When using the schedule rule type in conjunction with the branches field, note the following: + + + The cron worker runs on 15 minute intervals and starts any pipelines that were scheduled to run during the previous 15 minutes. + Based on your rule, you might expect scheduled pipelines to run with an offset of up to 15 minutes. + The CRON expression is evaluated in standard UTC time from GitLab.com. If you have a self-managed GitLab instance and have changed the server time zone, the CRON expression is evaluated with the new time zone. + + + + + +agent schema + + +Use this schema to define agents objects in the schedule rule type. + + + + + Field + Type + Required + Description + + + + + namespaces + +array of string + + true + The namespace that is scanned. If empty, all namespaces are scanned. + + + + +Policy example + + +- name: Enforce Container Scanning in cluster connected through my-gitlab-agent for default and kube-system namespaces + enabled: true + rules: + - type: schedule + cadence: '0 10 * * *' + agents: + : + namespaces: + - 'default' + - 'kube-system' + actions: + - scan: container_scanning + + +The keys for a schedule rule are: + + + +cadence (required): a CRON expression for when the scans are run + +agents: (required): The name of the agent to use for scanning + +agents::namespaces (optional): The Kubernetes namespaces to scan. If omitted, all namespaces are scanned. + + + +scan action type + + + +History + + + + + Scan Execution Policies variable precedence was changed in GitLab 16.7 with a flag named security_policies_variables_precedence. Enabled by default. Feature flag removed in GitLab 16.8. + + + + + + +This action executes the selected scan with additional parameters when conditions for at least one +rule in the defined policy are met. + + + + + Field + Type + Possible values + Description + + + + + scan + string + +sast, sast_iac, dast, secret_detection, container_scanning, dependency_scanning + + The action’s type. + + + site_profile + string + Name of the selected DAST site profile. + The DAST site profile to execute the DAST scan. This field should only be set if scan type is dast. + + + scanner_profile + +string or null + + Name of the selected DAST scanner profile. + The DAST scanner profile to execute the DAST scan. This field should only be set if scan type is dast. + + + variables + object +   + A set of CI variables, supplied as an array of key: value pairs, to apply and enforce for the selected scan. The key is the variable name, with its value provided as a string. This parameter supports any variable that the GitLab CI job supports for the specified scan. + + + tags + +array of string + +   + A list of runner tags for the policy. The policy jobs are run by runner with the specified tags. + + + + +Note the following: + + + You must create the site profile and scanner profile +with selected names for each project that is assigned to the selected Security Policy Project. +Otherwise, the policy is not applied and a job with an error message is created instead. + Once you associate the site profile and scanner profile by name in the policy, it is not possible +to modify or delete them. If you want to modify them, you must first disable the policy by setting +the active flag to false. + When configuring policies with a scheduled DAST scan, the author of the commit in the security +policy project’s repository must have access to the scanner and site profiles. Otherwise, the scan +is not scheduled successfully. + For a secret detection scan, only rules with the default ruleset are supported. Custom rulesets +are not supported. Alternatively, you may configure a remote configuration file and set the SECRET_DETECTION_RULESET_GIT_REFERENCE variable. + By default, for scheduled scan execution policies, secret detection scans configured without any CI variables defined run first in historic mode (SECRET_DETECTION_HISTORIC_SCAN = true). All subsequent scheduled scans run in default mode with SECRET_DETECTION_LOG_OPTIONS set to the commit range between last run and current SHA. CI variables provided in the scan execution policy can override this behavior. Learn more about historic mode. + For triggered scan execution policies, secret detection works just like regular scan configured manually in the .gitlab-ci.yml. + A container scanning scan that is configured for the pipeline rule type ignores the agent defined in the agents object. The agents object is only considered for schedule rule types. +An agent with a name provided in the agents object must be created and configured for the project. + Variables defined in a Scan Execution Policy follow the standard CI/CD variable precedence. + + Preconfigured values are used for the following CI/CD variables in any project on which a scan +execution policy is enforced. Their values can be overridden, but only if they are declared in +a policy. They cannot be overridden by group or project CI/CD variables: + + +DS_EXCLUDED_PATHS: spec, test, tests, tmp +SAST_EXCLUDED_PATHS: spec, test, tests, tmp +SECRET_DETECTION_EXCLUDED_PATHS: '' +SECRET_DETECTION_HISTORIC_SCAN: false +SAST_DISABLED_ANALYZERS: '' +DS_DISABLED_ANALYZERS: '' + + + In GitLab 16.9 and earlier: + + + If the CI/CD variables suffixed _EXCLUDED_PATHS were declared in a policy, their values could +be overridden by group or project CI/CD variables. + If the CI/CD variables suffixed _DISABLED_ANALYZERS were declared in a policy, their values were +ignored, regardless of where they were defined: policy, group, or project. + + + + +Example security policies project + + +You can use this example in a .gitlab/security-policies/policy.yml file stored in a +security policy project: + +--- +scan_execution_policy: +- name: Enforce DAST in every release pipeline + description: This policy enforces pipeline configuration to have a job with DAST scan for release branches + enabled: true + rules: + - type: pipeline + branches: + - release/* + actions: + - scan: dast + scanner_profile: Scanner Profile A + site_profile: Site Profile B +- name: Enforce DAST and secret detection scans every 10 minutes + description: This policy enforces DAST and secret detection scans to run every 10 minutes + enabled: true + rules: + - type: schedule + branches: + - main + cadence: ""*/10 * * * *"" + actions: + - scan: dast + scanner_profile: Scanner Profile C + site_profile: Site Profile D + - scan: secret_detection +- name: Enforce Secret Detection and Container Scanning in every default branch pipeline + description: This policy enforces pipeline configuration to have a job with Secret Detection and Container Scanning scans for the default branch + enabled: true + rules: + - type: pipeline + branches: + - main + actions: + - scan: secret_detection + - scan: sast + variables: + SAST_EXCLUDED_ANALYZERS: brakeman + - scan: container_scanning + + +In this example: + + + For every pipeline executed on branches that match the release/* wildcard (for example, branch +release/v1.2.1) + + DAST scans run with Scanner Profile A and Site Profile B. + + + DAST and secret detection scans run every 10 minutes. The DAST scan runs with Scanner Profile C +and Site Profile D. + Secret detection, container scanning, and SAST scans run for every pipeline executed on the main +branch. The SAST scan runs with the SAST_EXCLUDED_ANALYZER variable set to ""brakeman"". + + +Example for scan execution policy editor + + +You can use this example in the YAML mode of the scan execution policy editor. +It corresponds to a single object from the previous example. + +name: Enforce Secret Detection and Container Scanning in every default branch pipeline +description: This policy enforces pipeline configuration to have a job with Secret Detection and Container Scanning scans for the default branch +enabled: true +rules: + - type: pipeline + branches: + - main +actions: + - scan: secret_detection + - scan: container_scanning + + +Avoiding duplicate scans + + +Scan execution policies can cause the same type of scanner to run more than once if developers include scan jobs in the project’s +.gitlab-ci.yml file. This behavior is intentional as scanners can run more than once with different variables and settings. For example, a +developer may want to try running a SAST scan with different variables than the one enforced by the security and compliance team. In +this case, two SAST jobs run in the pipeline, one with the developer’s variables and one with the security and compliance team’s variables. + +If you want to avoid running duplicate scans, you can either remove the scans from the project’s .gitlab-ci.yml file or disable your +local jobs by setting SAST_DISABLED: ""true"". Disabling jobs this way does not prevent the security jobs defined by scan execution +policies from running. + +Experimental features + + + +Status: Experiment + +These experimental features have limitations: + + + Enforcing pipeline execution using the pipeline execution action in projects +without a .gitlab-ci.yml is not supported. + The pipeline execution action cannot be used with a scheduled trigger type. + + +Have feedback on our experimental features? We’d love to hear it! Please share your thoughts in our +feedback issue. + +Pipeline execution policy action + + +Prerequisites: + + + + To enable the pipeline execution policy action feature, a Group owner or administrator must enable +the experimental feature: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > General. + Expand Permissions and group features. + Select the Security policy pipeline execution action checkbox. + + Optional. Select Enforce for all subgroups. + + If the setting is not enforced for all subgroups, subgroup owners can manage the setting per subgroup. + + + + + +The pipeline execution policy action introduces a new scan action type into +scan execution policies for creating and enforcing custom CI in your target +development projects. + +This custom scan type uses a remote CI configuration file to define the custom +CI you want enforced. Scan execution policies then merge this file with the +project’s .gitlab-ci.yml to execute the compliance jobs for each project +enforced by the policy. + + +ci_configuration_path object + + + + + + Field + Type + Required + Description + + + + + project + string + true + A project namespace path. + + + file + string + true + The filename of the CI/CD YAML file. + + + ref + string + false + The branch name, tag name, or commit SHA. If not specified, uses the default branch. + + + + + +scan action type + + +This action executes the selected scan with additional parameters when +conditions for at least one rule in the defined policy are met. + + + + + Field + Type + Possible values + Description + + + + + scan + string + custom + The action’s type. + + + ci_configuration + string +   + GitLab CI YAML as formatted as string. + + + ci_configuration_path + object +   + Object with project path and filename pointing to a CI configuration. + + + + +Note the following: + + + For custom scans, you must specify one of ci_configuration or ci_configuration_path. + +custom scans are being executed for triggered rules only. + Jobs variables from custom scans take precedence over the project’s CI/CD configuration. + Users triggering a pipeline must have at least read access to CI files specified in the ci_configuration_path or included in the CI/CD configuration. + It is not possible to define custom stages using the stages keyword in a custom scan action. Instead three default stages will be added to the pipeline: + + +.pipeline-policy-preat the beginning of the pipeline, before the .pre stage. + +.pipeline-policy-test after the test stage. If the test stage does not exist, it will be injected after the build stage. If the build stage does not exist, it will be injected at the beginning of the pipeline after the .pre stage. + +.pipeline-policy-post at the very end of the pipeline, after the .post stage. + + + Jobs without a stage are assigned to the .pipeline-policy-test stage by default. + + +Example security policies project + + +You can use this example in a .gitlab/security-policies/policy.yml file stored in a +security policy project: + +--- +scan_execution_policy: +- name: Create a custom scan that injects test job + description: This policy enforces pipeline configuration to have a job with DAST scan for release branches + enabled: true + rules: + - type: pipeline + branches: + - release/* + actions: + - scan: custom + ci_configuration: |- + test job: + stage: test + script: + - echo ""Hello World"" + + +In this example a test job is injected into the test stage of the pipeline, printing Hello World. + +Security policy scopes + + +Prerequisites: + + + + To enable the pipeline execution policy action feature, a group owner or administrator must enable +the experimental feature: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > General. + Expand Permissions and group features. + Select the Security Policy Scopes checkbox. + + Optional. Select Enforce for all subgroups. + + If the setting is not enforced for all subgroups, subgroup owners can manage the setting per subgroup. + + + + + +Security policy enforcement depends first on establishing a link between the group, subgroup, or +project on which you want to enforce policies, and the security policy project that contains the +policies. For example, if you are linking policies to a group, a group owner must create the link to +the security policy project. Then, all policies in the security policy project are inherited by all +projects in the group. + +You can refine a security policy’s scope to: + + + +Include only projects containing a compliance framework label. + +Include or exclude selected projects from enforcement. + + +Policy scope schema + + + + + + Field + Type + Required + Possible values + Description + + + + + policy_scope + object + false + +compliance_frameworks, projects + + Scopes the policy based on compliance framework labels or projects you define. + + + + + +policy_scope scope type + + + + + + Field + Type + Possible values + Description + + + + + compliance_frameworks + object + ids + List of IDs of the compliance frameworks in scope of enforcement, in an ids array. + + + projects + object + +including, excluding + + Use excluding: or including: then list the IDs of the projects you wish to include or exclude, in an ids array. + + + + +Example policy.yml with security policy scopes + + +--- +scan_execution_policy: +- name: Enforce DAST in every release pipeline + description: This policy enforces pipeline configuration to have a job with DAST scan for release branches + enabled: true + rules: + - type: pipeline + branches: + - release/* + actions: + - scan: dast + scanner_profile: Scanner Profile A + site_profile: Site Profile B + policy_scope: + compliance_frameworks: + ids: + - 2 + - 11 +- name: Enforce Secret Detection and Container Scanning in every default branch pipeline + description: This policy enforces pipeline configuration to have a job with Secret Detection and Container Scanning scans for the default branch + enabled: true + rules: + - type: pipeline + branches: + - main + actions: + - scan: secret_detection + - scan: sast + variables: + SAST_EXCLUDED_ANALYZERS: brakeman + policy_scope: + projects: + excluding: + ids: + - 24 + - 27 + + + +" +"i have security execution policy set at my top level group which ensures that container scanner is running for changes on the main branch. however, there are some projects within my top level group that don't use containers. as a result, when i make changes in that project, container scanning runs on the project even though there aren't any containers. as a result, the container scanning job fails. how do i exclude those projects from being scanned with container scanning",,"1. Policies + + + +Policies + + + +Tier: Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + + Generally available in GitLab 14.4. Feature flag security_orchestration_policies_configuration removed. + + +Policies in GitLab provide security and compliance teams with a way to enforce controls globally in +their organization. Security teams can ensure: + + + Security scanners are enforced in development team pipelines with proper configuration. + That all scan jobs execute without any changes or alterations. + That proper approvals are provided on merge requests based on results from those findings. + + +Compliance teams can centrally enforce multiple approvers on all merge requests and ensure various +settings are enabled on projects in scope of organizational requirements, such as enabling or +locking merge request and repository settings. + +GitLab supports the following security policies: + + + Scan Execution Policy + Scan Result Policy + + +Security policy project + + +A security policy project (SPP) is a special type of project used only to contain policies. The +policies are stored in the .gitlab/security-policies/policy.yml YAML file. + +To enforce the policies contained in an SPP you link it to a project, subgroup, group, or multiples +of each. An SPP can contain multiple policies but they are enforced together. An +SPP enforced on a group or subgroup applies to everything below in the hierarchy, including all +subgroups and their projects. + +Policy changes made in a merge request take effect as soon as the merge request is merged. Those +that do not go through a merge request, but instead are committed directly to the default branch, may +require up to 10 minutes before the policy changes take effect. + +Policy design guidelines + + +When designing your policies, your goals should be: + + + Designing policy enforcement for minimum overhead but maximum coverage + Separation of duties + + +Policy enforcement design + + +To maximize policy coverage, link an SPP at the highest level that achieves your objectives: group +level, subgroup level, or project level. Enforcement at the highest level minimizes the number of +SPPs and therefore the management overhead. Policies cascade down from each level to a project, such that policies may be enforced from the group level, each subgroup above it, and then for any policies created at the project level itself. + +Policy inheritance of policies not only ensures maximum coverage with the minimum +number of SPPs, but also helps when implementing policy changes. For example, to test a policy change +you could copy an existing policy and enforce the modified policy first to a project, then to a +subgroup, and, if applicable, to a group. + + + note GitLab SaaS users may enforce policies against their top-level group or across subgroups, but cannot enforce policies across GitLab SaaS top-level groups. GitLab self-managed users can enforce policies across multiple top-level groups in their instance. + + +The following example illustrates two groups and their structure: + + + Alpha group contains two subgroups, each of which contains multiple projects. + Security and Compliance group contains two policies. + + +Alpha group (contains code projects) + + + +Finance (subgroup) + + Project A + Accounts receiving (subgroup) + + Project B + Project C + + + + + +Engineering (subgroup) + + Project K + Project L + Project M + + + + +Security and Compliance group (contains SPPs) + + + Security Policy Management + Security Policy Management - security policy project + + SAST policy + Secret Detection policy + + + + +Assuming no policies have already been enforced, consider the following examples: + + + If the “SAST” policy is enforced at group Alpha, it applies to its subgroups, Finance and +Engineering, and all their projects and subgroups. If the “Secret Detection” policy is enforced +also at subgroup “Accounts receiving”, both policies apply to projects B and C. However, only the +“SAST” policy applies to project A. + If the “SAST policy is enforced at subgroup “Accounts receiving”, it applies only to projects B +and C. No policy applies to project A. + If the “Secret Detection” is enforced at project K, it applies only to project K. No other +subgroups or projects have a policy apply to them. + + +Separation of duties + + +Separation of duties is vital to successfully implementing policies. Security and compliance teams +should be responsible for defining policies and working with development teams. Development teams +should not be able to disable, modify, or circumvent the policies, in any way, or for any +exceptions. The policies should be implemented to achieve the necessary compliance and security +requirements, while allowing development teams to achieve their goals. + +The role required to enforce an SPP depends on the hierarchy level at which it’s being linked: + + + + + Organization unit + Group owner + Subgroup owner + Project owner + + + + + Group + + Yes + + No + + No + + + Subgroup + + Yes + + Yes + + No + + + Project + + Yes + + Yes + + Yes + + + + +Policy implementation + + +Implementation options for SPPs differ slightly between GitLab SaaS and GitLab self-managed. The +main difference is that on GitLab SaaS it’s only possible to create subgroups. Ensuring separation +of duties requires more granular permission configuration. + +Enforce policies across subgroups and projects + + + +Tier: Ultimate +Offering: GitLab.com + +To enforce policies against subgroups and projects, create a subgroup to contain the SPPs, separate +to the subgroups containing the projects. Using separate subgroups allows for separation of duties, +with the SPP managed by the security team, and the projects’ subgroups managed by the development +team. The security team can add or change policies without intervention from the subgroups’ owners. +Subgroup and project owners cannot override policies. + +Prerequisites: + + + You must have the Owner role to link to the SPP. For details see +Separation of duties. + + +The high-level workflow for enforcing policies across multiple subgroups: + + + + Create a subgroup to contain your policies and ensure separation of duties. + + By creating a separate standalone subgroup, you can minimize the number of users who inherit +permissions. + + + In the new group or subgroup, create a new project for managing your policies, such as “Security +Policy Management”. + + This serves as the primary location of the policy editor, allowing you to create and manage +policies in the UI. + + + Create a test policy. (you can create a policy as disabled for testing.) + + Creating the policy automatically creates a new SPP under your group or subgroup. This project is +used to store your policy.yml or policy-as-code. + + Check and set project permissions in the newly-created project so that only members of the security team have the Owner role. + If additional restrictions are needed to block inherited permissions or require additional review +or approval of policy changes, you can create an additional and separate set of policies to +enforce against the first. For example, you may define a single set of individual users +responsible for approving policy changes. + In the SPP just created, create the policies required. You can use the policy editor in the Security Policy Management project you created, under the Policies tab. Or you can directly update the policies in the policy.yml file stored in the newly-created security policy project Security Policy Management - security policy project. + Link up groups, subgroups, or projects to the SPP. As a subgroup owner, or project +owner, you can visit the Policies page and create a link to the SPP. Include the full +path and the project’s name should end with “- security policy project”. For details, see +Link to a security policy project. + + +Enforce policies across groups, subgroups, and projects + + + +Tier: Ultimate +Offering: Self-managed, GitLab Dedicated + +To enforce policies against multiple groups, create a group to contain the SPPs, separate to the +groups containing the projects. Using separate groups allows for separation of duties, with the SPP +managed by the security team, and the projects’ groups managed by the development team. The security +team can add or change policies without intervention from the groups’ owners. Subgroup and project +owners cannot override policies. + +Prerequisites: + + + You must have the Owner role to link to the SPP. For details see +Separation of duties. + To support approval groups globally across your instance, enable security_policy_global_group_approvers_enabled in your GitLab instance application settings. + + +The high-level workflow for enforcing policies across multiple groups: + + + + Create a separate group to contain your policies and ensure separation of duties. + + By creating a separate standalone group, you can minimize the number of users who inherit +permissions. + + + In the new group, create a new project for managing your policies, such as “Security Policy +Management”. + + This serves as the primary location of the policy editor, allowing you to +create and manage policies in the UI. + + + Create a test policy (you can create a policy as disabled for testing). + + Creating the policy automatically creates a new SPP under your group. This project is +used to store your policy.yml or policy-as-code. + + Check and set permissions in the newly created project as desired. By default, Owners and +Maintainers are able to create, edit, and delete policies. + If additional restrictions are needed to block inherited permissions or require additional review +or approval of policy changes, you can create an additional and separate set of policies to +enforce against the first. For example, you may define a single set of individual users +responsible for approving policy changes. + Set the permissions of the SPP so that only members of the security team have the Owner role. + In the SPP just created, create the policies required. + Link up groups, subgroups, or projects to the SPP. As a group owner, subgroup owner, or project +owner, you can visit the Policies page and create a link to the SPP. Include the full path and +the project’s name should end with “- security policy project”. For details, see +Link to a security policy project. + + +Enforce policies across multiple projects + + +When linking a group or subgroup to your policies is not granular enough, it is possible to link up +to the SPP per project. This allows you to filter projects from enforcement that are not applicable. +To enforce an SPP policies at the project level, create a security policy project and link them. Use +project permissions to ensure only the security team has the Owner role in the security policy +project. + +To enforce policies against a project: + + + Create a security policy project at the same level as the target project. + In the security policy project, create the policies required. + Link the target project to the security policy project. + + +Link to a security policy project + + +To enforce the policies contained in an SPP against a project, subgroup, or group, you link them. + +Prerequisites: + + + You must have the Owner role to link to the SPP. For details, see +Separation of duties. + + +To link a project, subgroup, or group to an SPP: + + + On the left sidebar, select Search or go to and find your project, subgroup, or group. + Select Secure > Policies. + Select Edit Policy Project, then search for and select the project you would like to link +from the dropdown list. + Select Save. + + +To unlink a security policy project, follow the same steps but instead select the trash can icon in +the dialog. + +Viewing the linked security policy project + + +All users who have access to the project policy page and are not project owners instead view a +button linking out to the associated security policy project. + +Policy management + + +The Policies page displays deployed policies for all available environments. You can check a +policy’s information (for example, description or enforcement status), and create and edit deployed +policies: + + + On the left sidebar, select Search or go to and find your project. + Select Secure > Policies. + + + + +Policy editor + + + +History + + + + + +Introduced in GitLab 13.4. + + + + + + +You can use the policy editor to create, edit, and delete policies: + + + On the left sidebar, select Search or go to and find your project. + Select Secure > Policies. + + To create a new policy, select New policy which is located in the Policies page’s header. +You can then select which type of policy to create. + To edit an existing policy, select Edit policy in the selected policy drawer. + + + + +The policy editor has two modes: + + + + The visual Rule mode allows you to construct and preview policy +rules using rule blocks and related controls. + + + + + YAML mode allows you to enter a policy definition in .yaml format +and is aimed at expert users and cases that the Rule mode doesn’t +support. + + + + + +You can use both modes interchangeably and switch between them at any +time. If a YAML resource is incorrect or contains data not supported +by the Rule mode, Rule mode is automatically +disabled. If the YAML is incorrect, you must use YAML +mode to fix your policy before Rule mode is available again. + +When you finish creating or editing your policy, save and apply it by selecting the +Configure with a merge request button and then merging the resulting merge request. When you +press this button, the policy YAML is validated and any resulting errors are displayed. +Additionally, if you are a project owner and a security policy project has not been previously +associated with this project, then a new project is created and associated automatically at the same +time that the first policy merge request is created. + +Managing projects in bulk via a script + + +You can use the Vulnerability-Check Migration script to bulk create policies or associate security policy projects with development projects. For instructions and a demonstration of how to use the Vulnerability-Check Migration script, see this video. + +Troubleshooting + + + +Branch name 'update-policy-' does not follow the pattern '' + + +When you create a new security policy or change an existing policy, a new branch is automatically created with the branch name following the pattern update-policy-. For example: update-policy-1659094451. + +If you have group or instance push rules that do not allow branch name patterns that contain the text update-policy-, you will get an error that states Branch name 'update-policy-' does not follow the pattern ''. + +The workaround is to amend your group or instance push rules to allow branches following the pattern update-policy- followed by an integer timestamp. + +Troubleshooting common issues configuring security policies + + + + Confirm that scanners are properly configured and producing results for the latest branch. +Security Policies are designed to require approval when there are no results (no security report), +as this ensures that no vulnerabilities are introduced. We cannot know if there are any +vulnerabilities unless the scans enforced by the policy complete successfully and are evaluated. + For merge request approval policies, we require artifacts for each scanner defined in the policy for both the +source and target branch. To ensure merge request approval policies capture the necessary results, confirm +your scan execution is properly implemented and enforced. If using scan execution policies, +enforcing on all branches often addresses this need. + Comparison in merge request approval policies depends on a successful and completed merge base pipeline. If the merge base pipeline is skipped, merge requests with the merge base pipeline are blocked. + When running scan execution policies based on a SAST action, ensure target repositories contain +proper code files. SAST runs different analyzers +based on the types of files in the repository, +and if no supported files are found it does not run any jobs. See the +SAST CI template +for more details. + Check for any branch configuration conflicts. For example, if your policy is configured to enforce rules on +main but some projects in the scope are using master as their default branch, the policy +is not applied for the latter. You can define policies to enforce rules generically on default +branches regardless of the name used in the project or on all protected branches to address this +issue. + Merge request approval policies created at the group or subgroup level can take some time to apply to all +the merge requests in the group. + Scheduled scan execution policies run with a minimum 15 minute cadence. Learn more about the schedule rule type. + When scheduling pipelines, keep in mind that CRON scheduling is based on UTC on GitLab SaaS and is +based on your server time for self managed instances. When testing new policies, it may appear +pipelines are not running properly when in fact they are scheduled in your server’s time zone. + When enforcing scan execution policies, security policies use a bot in the target project to +trigger scheduled pipelines to ensure enforcement. When the bot is missing, it is automatically +created, and the following scheduled scan uses it. + You should not link a security policy project to a development project and to the group or +subgroup the development project belongs to at the same time. Linking this way results in approval +rules from the Scan Result Policy not being applied to merge requests in the development project. + When creating a Scan Result Policy, neither the array severity_levels nor the array +vulnerability_states in the scan_finding rule +can be left empty. For a working rule, at least one entry must exist. + When merge request approval policies are enforced on projects containing manual jobs in their pipeline, policies evaluate the completed pipeline jobs and ignore the manual jobs. When the manual jobs are run, the policy re-evaluates the MR. + + +If you are still experiencing issues, you can view recent reported bugs and raise new unreported issues. + + +2. Scan execution policies + + + +Scan execution policies + + + +Tier: Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + Group-level security policies introduced in GitLab 15.2. + Group-level security policies enabled on GitLab.com in GitLab 15.4. + Operational container scanning introduced in GitLab 15.5 + Support for custom CI variables in the Scan Execution Policies editor introduced in GitLab 16.2. + Enforcement of scan execution policies on projects with an existing GitLab CI/CD configuration introduced in GitLab 16.2 with a flag named scan_execution_policy_pipelines. Feature flag scan_execution_policy_pipelines removed in GitLab 16.5. + Overriding predefined variables in scan execution policies introduced in GitLab 16.10 with a flag named allow_restricted_variables_at_policy_level. Disabled by default. + + + + + + + + On self-managed GitLab, by default this feature is not available. To make it available, an administrator can enable the feature flag named allow_restricted_variables_at_policy_level. +On GitLab.com and GitLab Dedicated, this feature is not available. +Group, subgroup, or project owners can use scan execution policies to require that security scans +run on a specified schedule or with the project pipeline. The security scan runs with multiple +project pipelines if you define the policy at a group or subgroup level. GitLab injects the required +scans into the CI/CD pipeline as new jobs. + + +Scan execution policies are enforced for all applicable projects, even those without a GitLab +CI/CD configuration file or where AutoDevOps is disabled. Security policies create the file +implicitly so that the policies can be enforced. This ensures policies enabling execution of +secret detection, static analysis, or other scanners that do not require a build in the +project, are still able to execute and be enforced. + +GitLab appends a hyphen and a number to the job name. The number is unique per policy action to avoid name conflicts. +policy at the group level, it applies to every child project or subgroup. You cannot edit a +group-level policy from a child project or subgroup. + +This feature has some overlap with compliance framework pipelines, +as we have not unified the user experience for these two features. +For details on the similarities and differences between these features, see +Enforce scan execution. + + + note Policy jobs for scans other than DAST scans are created in the test stage of the pipeline. If you modify the default pipeline +stages, +to remove the test stage, jobs will run in the scan-policies stage instead. This stage is injected into the CI pipeline at evaluation time if it doesn’t exist. If the build stage exists, it is injected just after the build stage. If the build stage does not exist, it is injected at the beginning of the pipeline. DAST scans always run in the dast stage. If this stage does not exist, then a dast stage is injected at the end of the pipeline. + + + + + For a video walkthrough, see How to set up Security Scan Policies in GitLab. + + For an overview, see Enforcing scan execution policies on projects with no GitLab CI/CD configuration. + + +Requirements and limitations + + + + The maximum number of scan execution policies is five per security policy project. + + +Scan execution policy editor + + + + note Only group, subgroup, or project Owners have the permissions +to select Security Policy Project. + + +Once your policy is complete, save it by selecting Configure with a merge request +at the bottom of the editor. You are redirected to the merge request on the project’s +configured security policy project. If one does not link to your project, a security +policy project is automatically created. Existing policies can also be +removed from the editor interface by selecting Delete policy +at the bottom of the editor. + +Most policy changes take effect as soon as the merge request is merged. Any changes that +do not go through a merge request and are committed directly to the default branch may require up to 10 minutes +before the policy changes take effect. + + + + + note Selection of site and scanner profiles using the rule mode editor for DAST execution policies differs based on +whether the policy is being created at the project or group level. For project-level policies the rule mode editor +presents a list of profiles to choose from that are already defined in the project. For group-level policies +you are required to type in the names of the profiles to use, and to prevent pipeline errors, profiles with +matching names must exist in all of the group’s projects. + + +Scan execution policies schema + + +The YAML file with scan execution policies consists of an array of objects matching scan execution +policy schema nested under the scan_execution_policy key. You can configure a maximum of 5 +policies under the scan_execution_policy key. Any other policies configured after +the first 5 are not applied. + +When you save a new policy, GitLab validates its contents against this JSON schema. +If you’re not familiar with how to read JSON schemas, +the following sections and tables provide an alternative. + + + + + Field + Type + Required + Possible values + Description + + + + + scan_execution_policy + +array of scan execution policy + true +   + List of scan execution policies (maximum 5) + + + + +Scan execution policy schema + + + + + + Field + Type + Required + Possible values + Description + + + + + name + string + true +   + Name of the policy. Maximum of 255 characters. + + + +description (optional) + string + true +   + Description of the policy. + + + enabled + boolean + true + +true, false + + Flag to enable (true) or disable (false) the policy. + + + rules + +array of rules + true +   + List of rules that the policy applies. + + + actions + +array of actions + true +   + List of actions that the policy enforces. + + + + + +pipeline rule type + + + +History + + + + + The branch_type field was introduced in GitLab 16.1 with a flag named security_policies_branch_type. Generally available in GitLab 16.2. Feature flag removed. + The branch_exceptions field was introduced in GitLab 16.3 with a flag named security_policies_branch_exceptions. Generally available in GitLab 16.5. Feature flag removed. + + + + + + + + On self-managed GitLab, by default the branch_exceptions field is available. To hide the feature, an administrator can disable the feature flag named security_policies_branch_exceptions. +On GitLab.com and GitLab Dedicated, this feature is available. + + +This rule enforces the defined actions whenever the pipeline runs for a selected branch. + + + + + Field + Type + Required + Possible values + Description + + + + + type + string + true + pipeline + The rule’s type. + + + +branches 1 + + +array of string + + true if branch_type field does not exist + +* or the branch’s name + The branch the given policy applies to (supports wildcard). + + + +branch_type 1 + + string + true if branches field does not exist + +default, protected or all + + The types of branches the given policy applies to. + + + branch_exceptions + +array of string + + false + Names of branches + Branches to exclude from this rule. + + + + + + You must specify only one of branches or branch_type. + + + +schedule rule type + + + +History + + + + + The branch_type field was introduced in GitLab 16.1 with a flag named security_policies_branch_type. Generally available in GitLab 16.2. Feature flag removed. + The branch_exceptions field was introduced in GitLab 16.3 with a flag named security_policies_branch_exceptions. Generally available in GitLab 16.5. Feature flag removed. + + + + + + + + caution In GitLab 16.1 and earlier, you should not use direct transfer with scheduled scan execution policies. If using direct transfer, first upgrade to GitLab 16.2 and ensure security policy bots are enabled in the projects you are enforcing. + + + + On self-managed GitLab, by default the branch_exceptions field is available. To hide the feature, an administrator can disable the feature flag named security_policies_branch_exceptions. +On GitLab.com and GitLab Dedicated, this feature is available. + + +This rule schedules a scan pipeline, enforcing the defined actions on the schedule defined in the cadence field. A scheduled pipeline does not run other jobs defined in the project’s .gitlab-ci.yml file. When a project is linked to a security policy project, a security policy bot is created in the project and will become the author of any scheduled pipelines. + + + + + Field + Type + Required + Possible values + Description + + + + + type + string + true + schedule + The rule’s type. + + + +branches 1 + + +array of string + + true if either branch_type or agents fields does not exist + +* or the branch’s name + The branch the given policy applies to (supports wildcard). + + + +branch_type 1 + + string + true if either branches or agents fields does not exist + +default, protected or all + + The types of branches the given policy applies to. + + + branch_exceptions + +array of string + + false + Names of branches + Branches to exclude from this rule. + + + cadence + string + true + CRON expression (for example, 0 0 * * *) + A whitespace-separated string containing five fields that represents the scheduled time. Minimum of 15 minute intervals when used together with the branches field. + + + timezone + string + false + Time zone identifier (for example, America/New_York) + Time zone to apply to the cadence. Value must be an IANA Time Zone Database identifier. + + + +agents 1 + + object + true if either branch_type or branches fields do not exists +   + The name of the GitLab agents where Operational Container Scanning runs. The object key is the name of the Kubernetes agent configured for your project in GitLab. + + + + + + You must specify only one of branches, branch_type, or agents. + + +Scheduled scan pipelines are triggered by a security policy bot user that is a guest member of the project with elevated permissions for users of type security_policy_bot so it may carry out this task. Security policy bot users are automatically created when the security policy project is linked, and removed when the security policy project is unlinked. + +If the project does not have a security policy bot user, the bot will be automatically created, and the following scheduled scan pipeline will use it. + +GitLab supports the following types of CRON syntax for the cadence field: + + + A daily cadence of once per hour at a specified hour, for example: 0 18 * * * + + A weekly cadence of once per week on a specified day and at a specified hour, for example: 0 13 * * 0 + + + + + note Other elements of the CRON syntax may work in the cadence field if supported by the cron we are using in our implementation, however, GitLab does not officially test or support them. + + +When using the schedule rule type in conjunction with the agents field, note the following: + + + The GitLab Agent for Kubernetes checks every 30 seconds to see if there is an applicable policy. When a policy is found, the scans are executed according to the cadence defined. + The CRON expression is evaluated using the system-time of the Kubernetes-agent pod. + + +When using the schedule rule type in conjunction with the branches field, note the following: + + + The cron worker runs on 15 minute intervals and starts any pipelines that were scheduled to run during the previous 15 minutes. + Based on your rule, you might expect scheduled pipelines to run with an offset of up to 15 minutes. + The CRON expression is evaluated in standard UTC time from GitLab.com. If you have a self-managed GitLab instance and have changed the server time zone, the CRON expression is evaluated with the new time zone. + + + + + +agent schema + + +Use this schema to define agents objects in the schedule rule type. + + + + + Field + Type + Required + Description + + + + + namespaces + +array of string + + true + The namespace that is scanned. If empty, all namespaces are scanned. + + + + +Policy example + + +- name: Enforce Container Scanning in cluster connected through my-gitlab-agent for default and kube-system namespaces + enabled: true + rules: + - type: schedule + cadence: '0 10 * * *' + agents: + : + namespaces: + - 'default' + - 'kube-system' + actions: + - scan: container_scanning + + +The keys for a schedule rule are: + + + +cadence (required): a CRON expression for when the scans are run + +agents: (required): The name of the agent to use for scanning + +agents::namespaces (optional): The Kubernetes namespaces to scan. If omitted, all namespaces are scanned. + + + +scan action type + + + +History + + + + + Scan Execution Policies variable precedence was changed in GitLab 16.7 with a flag named security_policies_variables_precedence. Enabled by default. Feature flag removed in GitLab 16.8. + + + + + + +This action executes the selected scan with additional parameters when conditions for at least one +rule in the defined policy are met. + + + + + Field + Type + Possible values + Description + + + + + scan + string + +sast, sast_iac, dast, secret_detection, container_scanning, dependency_scanning + + The action’s type. + + + site_profile + string + Name of the selected DAST site profile. + The DAST site profile to execute the DAST scan. This field should only be set if scan type is dast. + + + scanner_profile + +string or null + + Name of the selected DAST scanner profile. + The DAST scanner profile to execute the DAST scan. This field should only be set if scan type is dast. + + + variables + object +   + A set of CI variables, supplied as an array of key: value pairs, to apply and enforce for the selected scan. The key is the variable name, with its value provided as a string. This parameter supports any variable that the GitLab CI job supports for the specified scan. + + + tags + +array of string + +   + A list of runner tags for the policy. The policy jobs are run by runner with the specified tags. + + + + +Note the following: + + + You must create the site profile and scanner profile +with selected names for each project that is assigned to the selected Security Policy Project. +Otherwise, the policy is not applied and a job with an error message is created instead. + Once you associate the site profile and scanner profile by name in the policy, it is not possible +to modify or delete them. If you want to modify them, you must first disable the policy by setting +the active flag to false. + When configuring policies with a scheduled DAST scan, the author of the commit in the security +policy project’s repository must have access to the scanner and site profiles. Otherwise, the scan +is not scheduled successfully. + For a secret detection scan, only rules with the default ruleset are supported. Custom rulesets +are not supported. Alternatively, you may configure a remote configuration file and set the SECRET_DETECTION_RULESET_GIT_REFERENCE variable. + By default, for scheduled scan execution policies, secret detection scans configured without any CI variables defined run first in historic mode (SECRET_DETECTION_HISTORIC_SCAN = true). All subsequent scheduled scans run in default mode with SECRET_DETECTION_LOG_OPTIONS set to the commit range between last run and current SHA. CI variables provided in the scan execution policy can override this behavior. Learn more about historic mode. + For triggered scan execution policies, secret detection works just like regular scan configured manually in the .gitlab-ci.yml. + A container scanning scan that is configured for the pipeline rule type ignores the agent defined in the agents object. The agents object is only considered for schedule rule types. +An agent with a name provided in the agents object must be created and configured for the project. + Variables defined in a Scan Execution Policy follow the standard CI/CD variable precedence. + + Preconfigured values are used for the following CI/CD variables in any project on which a scan +execution policy is enforced. Their values can be overridden, but only if they are declared in +a policy. They cannot be overridden by group or project CI/CD variables: + + +DS_EXCLUDED_PATHS: spec, test, tests, tmp +SAST_EXCLUDED_PATHS: spec, test, tests, tmp +SECRET_DETECTION_EXCLUDED_PATHS: '' +SECRET_DETECTION_HISTORIC_SCAN: false +SAST_DISABLED_ANALYZERS: '' +DS_DISABLED_ANALYZERS: '' + + + In GitLab 16.9 and earlier: + + + If the CI/CD variables suffixed _EXCLUDED_PATHS were declared in a policy, their values could +be overridden by group or project CI/CD variables. + If the CI/CD variables suffixed _DISABLED_ANALYZERS were declared in a policy, their values were +ignored, regardless of where they were defined: policy, group, or project. + + + + +Example security policies project + + +You can use this example in a .gitlab/security-policies/policy.yml file stored in a +security policy project: + +--- +scan_execution_policy: +- name: Enforce DAST in every release pipeline + description: This policy enforces pipeline configuration to have a job with DAST scan for release branches + enabled: true + rules: + - type: pipeline + branches: + - release/* + actions: + - scan: dast + scanner_profile: Scanner Profile A + site_profile: Site Profile B +- name: Enforce DAST and secret detection scans every 10 minutes + description: This policy enforces DAST and secret detection scans to run every 10 minutes + enabled: true + rules: + - type: schedule + branches: + - main + cadence: ""*/10 * * * *"" + actions: + - scan: dast + scanner_profile: Scanner Profile C + site_profile: Site Profile D + - scan: secret_detection +- name: Enforce Secret Detection and Container Scanning in every default branch pipeline + description: This policy enforces pipeline configuration to have a job with Secret Detection and Container Scanning scans for the default branch + enabled: true + rules: + - type: pipeline + branches: + - main + actions: + - scan: secret_detection + - scan: sast + variables: + SAST_EXCLUDED_ANALYZERS: brakeman + - scan: container_scanning + + +In this example: + + + For every pipeline executed on branches that match the release/* wildcard (for example, branch +release/v1.2.1) + + DAST scans run with Scanner Profile A and Site Profile B. + + + DAST and secret detection scans run every 10 minutes. The DAST scan runs with Scanner Profile C +and Site Profile D. + Secret detection, container scanning, and SAST scans run for every pipeline executed on the main +branch. The SAST scan runs with the SAST_EXCLUDED_ANALYZER variable set to ""brakeman"". + + +Example for scan execution policy editor + + +You can use this example in the YAML mode of the scan execution policy editor. +It corresponds to a single object from the previous example. + +name: Enforce Secret Detection and Container Scanning in every default branch pipeline +description: This policy enforces pipeline configuration to have a job with Secret Detection and Container Scanning scans for the default branch +enabled: true +rules: + - type: pipeline + branches: + - main +actions: + - scan: secret_detection + - scan: container_scanning + + +Avoiding duplicate scans + + +Scan execution policies can cause the same type of scanner to run more than once if developers include scan jobs in the project’s +.gitlab-ci.yml file. This behavior is intentional as scanners can run more than once with different variables and settings. For example, a +developer may want to try running a SAST scan with different variables than the one enforced by the security and compliance team. In +this case, two SAST jobs run in the pipeline, one with the developer’s variables and one with the security and compliance team’s variables. + +If you want to avoid running duplicate scans, you can either remove the scans from the project’s .gitlab-ci.yml file or disable your +local jobs by setting SAST_DISABLED: ""true"". Disabling jobs this way does not prevent the security jobs defined by scan execution +policies from running. + +Experimental features + + + +Status: Experiment + +These experimental features have limitations: + + + Enforcing pipeline execution using the pipeline execution action in projects +without a .gitlab-ci.yml is not supported. + The pipeline execution action cannot be used with a scheduled trigger type. + + +Have feedback on our experimental features? We’d love to hear it! Please share your thoughts in our +feedback issue. + +Pipeline execution policy action + + +Prerequisites: + + + + To enable the pipeline execution policy action feature, a Group owner or administrator must enable +the experimental feature: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > General. + Expand Permissions and group features. + Select the Security policy pipeline execution action checkbox. + + Optional. Select Enforce for all subgroups. + + If the setting is not enforced for all subgroups, subgroup owners can manage the setting per subgroup. + + + + + +The pipeline execution policy action introduces a new scan action type into +scan execution policies for creating and enforcing custom CI in your target +development projects. + +This custom scan type uses a remote CI configuration file to define the custom +CI you want enforced. Scan execution policies then merge this file with the +project’s .gitlab-ci.yml to execute the compliance jobs for each project +enforced by the policy. + + +ci_configuration_path object + + + + + + Field + Type + Required + Description + + + + + project + string + true + A project namespace path. + + + file + string + true + The filename of the CI/CD YAML file. + + + ref + string + false + The branch name, tag name, or commit SHA. If not specified, uses the default branch. + + + + + +scan action type + + +This action executes the selected scan with additional parameters when +conditions for at least one rule in the defined policy are met. + + + + + Field + Type + Possible values + Description + + + + + scan + string + custom + The action’s type. + + + ci_configuration + string +   + GitLab CI YAML as formatted as string. + + + ci_configuration_path + object +   + Object with project path and filename pointing to a CI configuration. + + + + +Note the following: + + + For custom scans, you must specify one of ci_configuration or ci_configuration_path. + +custom scans are being executed for triggered rules only. + Jobs variables from custom scans take precedence over the project’s CI/CD configuration. + Users triggering a pipeline must have at least read access to CI files specified in the ci_configuration_path or included in the CI/CD configuration. + It is not possible to define custom stages using the stages keyword in a custom scan action. Instead three default stages will be added to the pipeline: + + +.pipeline-policy-preat the beginning of the pipeline, before the .pre stage. + +.pipeline-policy-test after the test stage. If the test stage does not exist, it will be injected after the build stage. If the build stage does not exist, it will be injected at the beginning of the pipeline after the .pre stage. + +.pipeline-policy-post at the very end of the pipeline, after the .post stage. + + + Jobs without a stage are assigned to the .pipeline-policy-test stage by default. + + +Example security policies project + + +You can use this example in a .gitlab/security-policies/policy.yml file stored in a +security policy project: + +--- +scan_execution_policy: +- name: Create a custom scan that injects test job + description: This policy enforces pipeline configuration to have a job with DAST scan for release branches + enabled: true + rules: + - type: pipeline + branches: + - release/* + actions: + - scan: custom + ci_configuration: |- + test job: + stage: test + script: + - echo ""Hello World"" + + +In this example a test job is injected into the test stage of the pipeline, printing Hello World. + +Security policy scopes + + +Prerequisites: + + + + To enable the pipeline execution policy action feature, a group owner or administrator must enable +the experimental feature: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > General. + Expand Permissions and group features. + Select the Security Policy Scopes checkbox. + + Optional. Select Enforce for all subgroups. + + If the setting is not enforced for all subgroups, subgroup owners can manage the setting per subgroup. + + + + + +Security policy enforcement depends first on establishing a link between the group, subgroup, or +project on which you want to enforce policies, and the security policy project that contains the +policies. For example, if you are linking policies to a group, a group owner must create the link to +the security policy project. Then, all policies in the security policy project are inherited by all +projects in the group. + +You can refine a security policy’s scope to: + + + +Include only projects containing a compliance framework label. + +Include or exclude selected projects from enforcement. + + +Policy scope schema + + + + + + Field + Type + Required + Possible values + Description + + + + + policy_scope + object + false + +compliance_frameworks, projects + + Scopes the policy based on compliance framework labels or projects you define. + + + + + +policy_scope scope type + + + + + + Field + Type + Possible values + Description + + + + + compliance_frameworks + object + ids + List of IDs of the compliance frameworks in scope of enforcement, in an ids array. + + + projects + object + +including, excluding + + Use excluding: or including: then list the IDs of the projects you wish to include or exclude, in an ids array. + + + + +Example policy.yml with security policy scopes + + +--- +scan_execution_policy: +- name: Enforce DAST in every release pipeline + description: This policy enforces pipeline configuration to have a job with DAST scan for release branches + enabled: true + rules: + - type: pipeline + branches: + - release/* + actions: + - scan: dast + scanner_profile: Scanner Profile A + site_profile: Site Profile B + policy_scope: + compliance_frameworks: + ids: + - 2 + - 11 +- name: Enforce Secret Detection and Container Scanning in every default branch pipeline + description: This policy enforces pipeline configuration to have a job with Secret Detection and Container Scanning scans for the default branch + enabled: true + rules: + - type: pipeline + branches: + - main + actions: + - scan: secret_detection + - scan: sast + variables: + SAST_EXCLUDED_ANALYZERS: brakeman + policy_scope: + projects: + excluding: + ids: + - 24 + - 27 + + + +" +how can i achieve high availability with gitlab?,,"1. Reference architectures + + + +Reference architectures + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + +The GitLab Reference Architectures have been designed and tested by the +GitLab Quality Engineering and Support teams to provide recommended deployments at scale. + +Available reference architectures + + +The following Reference Architectures are available as recommended starting points for your environment. + +The architectures are named in terms of total load, both manual and automated, correlated to user count and based on real data along with substantial headroom added to add additional coverage for most scenarios. + +However, it should be noted that in some cases, known heavy scenarios such as large monorepos or notable additional workloads may require adjustments to be made. + +For details about what each Reference Architecture has been tested against, see the “Testing Methodology” section of each page. + +GitLab package (Omnibus) + + +Below is a list of Linux package based architectures: + + + +Up to 1,000 users API: 20 RPS, Web: 2 RPS, Git (Pull): 2 RPS, Git (Push): 1 RPS + + +Up to 2,000 users API: 40 RPS, Web: 4 RPS, Git (Pull): 4 RPS, Git (Push): 1 RPS + + +Up to 3,000 users API: 60 RPS, Web: 6 RPS, Git (Pull): 6 RPS, Git (Push): 1 RPS + + +Up to 5,000 users API: 100 RPS, Web: 10 RPS, Git (Pull): 10 RPS, Git (Push): 2 RPS + + +Up to 10,000 users API: 200 RPS, Web: 20 RPS, Git (Pull): 20 RPS, Git (Push): 4 RPS + + +Up to 25,000 users API: 500 RPS, Web: 50 RPS, Git (Pull): 50 RPS, Git (Push): 10 RPS + + +Up to 50,000 users API: 1000 RPS, Web: 100 RPS, Git (Pull): 100 RPS, Git (Push): 20 RPS + + + +Cloud native hybrid + + +Below is a list of Cloud Native Hybrid reference architectures, where select recommended components can be run in Kubernetes: + + + +Up to 2,000 users API: 40 RPS, Web: 4 RPS, Git (Pull): 4 RPS, Git (Push): 1 RPS + + +Up to 3,000 users API: 60 RPS, Web: 6 RPS, Git (Pull): 6 RPS, Git (Push): 1 RPS + + +Up to 5,000 users API: 100 RPS, Web: 10 RPS, Git (Pull): 10 RPS, Git (Push): 2 RPS + + +Up to 10,000 users API: 200 RPS, Web: 20 RPS, Git (Pull): 20 RPS, Git (Push): 4 RPS + + +Up to 25,000 users API: 500 RPS, Web: 50 RPS, Git (Pull): 50 RPS, Git (Push): 10 RPS + + +Up to 50,000 users API: 1000 RPS, Web: 100 RPS, Git (Pull): 100 RPS, Git (Push): 20 RPS + + + +Before you start + + +The first choice to consider is whether a Self Managed approach is correct for you and your requirements. + +Running any application in production is complex, and the same applies for GitLab. While we aim to make this as smooth as possible, there are still the general complexities. This depends on the design chosen, but typically you’ll need to manage all aspects such as hardware, operating systems, networking, storage, security, GitLab itself, and more. This includes both the initial setup of the environment and the longer term maintenance. + +As such, it’s recommended that you have a working knowledge of running and maintaining applications in production when deciding on going down this route. If you aren’t in this position, our Professional Services team offers implementation services, but for those who want a more managed solution long term, it’s recommended to instead explore our other offerings such as GitLab SaaS or GitLab Dedicated. + +If Self Managed is the approach you’re considering, it’s strongly encouraged to read through this page in full, in particular the Deciding which architecture to use, Large monorepos and Additional workloads sections. + +Deciding which architecture to use + + +The Reference Architectures are designed to strike a balance between two important factors–performance and resilience. + +While they are designed to make it easier to set up GitLab at scale, it can still be a challenge to know which one meets your requirements. + +As a general guide, the more performant and/or resilient you want your environment to be, the more complex it is. + +This section explains the designs you can choose from. It begins with the least complexity, goes to the most, and ends with a decision tree. + +Expected Load (RPS) + + +The first thing to check is what the expected load is your environment would be expected to serve. + +The Reference Architectures have been designed with substantial headroom by default, but it’s recommended to also check the +load of what each architecture has been tested against under the “Testing Methodology” section found on each page, +comparing those values with what load you are expecting against your existing GitLab environment to help select the right Reference Architecture +size. + +Load is given in terms of Requests per Second (RPS) for each endpoint type (API, Web, Git). This information on your existing infrastructure +can typically be surfaced by most reputable monitoring solutions or in some other ways such as load balancer metrics. For example, on existing GitLab environments, +Prometheus metrics such as gitlab_transaction_duration_seconds can be used to see this data. + +Standalone (non-HA) + + +For environments serving 2,000 or fewer users, we generally recommend a standalone approach by deploying a non-highly available single or multi-node environment. With this approach, you can employ strategies such as automated backups for recovery to provide a good level of RPO / RTO while avoiding the complexities that come with HA. + +With standalone setups, especially single node environments, there are various options available for installation and management including the ability to deploy directly via select cloud provider marketplaces that reduce the complexity a little further. + +High Availability (HA) + + +High Availability ensures every component in the GitLab setup can handle failures through various mechanisms. However, to achieve this is complex, and the environments required can be sizable. + +For environments serving 3,000 or more users we generally recommend that a HA strategy is used as at this level outages have a bigger impact against more users. All the architectures in this range have HA built in by design for this reason. + +Do you need High Availability (HA)? + + +As mentioned above, achieving HA does come at a cost. The environment requirements are sizable as each component needs to be multiplied, which comes with additional actual and maintenance costs. + +For a lot of our customers with fewer than 3,000 users, we’ve found a backup strategy is sufficient and even preferable. While this does have a slower recovery time, it also means you have a much smaller architecture and less maintenance costs as a result. + +In general then, we’d only recommend you employ HA in the following scenarios: + + + When you have 3,000 or more users. + When GitLab being down would critically impact your workflow. + + +Scaled-down High Availability (HA) approaches + + +If you still need to have HA for a lower number of users, this can be achieved with an adjusted 3K architecture. + +Zero Downtime Upgrades + + +Zero Downtime Upgrades are available for standard Reference Architecture environments with HA (Cloud Native Hybrid is not supported). This allows for an environment to stay up during an upgrade, but the process is more complex as a result and has some limitations as detailed in the documentation. + +When going through this process it’s worth noting that there may still be brief moments of downtime when the HA mechanisms take effect. + +In most cases the downtime required for doing an upgrade shouldn’t be substantial, so this is only recommended if it’s a key requirement for you. + +Cloud Native Hybrid (Kubernetes HA) + + +As an additional layer of HA resilience you can deploy select components in Kubernetes, known as a Cloud Native Hybrid Reference Architecture. For stability +reasons, stateful components such as Gitaly cannot be deployed in Kubernetes. + +This is an alternative and more advanced setup compared to a standard Reference Architecture. Running services in Kubernetes is well known to be complex. This setup is only recommended if you have strong working knowledge and experience in Kubernetes. + +GitLab Geo (Cross Regional Distribution / Disaster Recovery) + + +With GitLab Geo, you can achieve distributed environments in +different regions with a full Disaster Recovery (DR) setup in place. GitLab Geo +requires at least two separate environments: + + + One primary site. + One or more secondary sites that serve as replicas. + + +If the primary site becomes unavailable, you can fail over to one of the secondary sites. + +This advanced and complex setup should only be undertaken if DR is +a key requirement for your environment. You must also make additional decisions +on how each site is configured, such as if each secondary site would be the +same architecture as the primary, or if each site is configured for HA. + +Large monorepos / Additional workloads + + +If you have any large monorepos or significant additional workloads, these can affect the performance of the environment notably and adjustments may be required depending on the context. + +If either applies to you, it’s encouraged for you to reach out to your Customer Success Manager or our Support team +for further guidance. + +Cloud provider services + + +For all the previously described strategies, you can run select GitLab components on equivalent cloud provider services such as the PostgreSQL database or Redis. + +For more information, see the recommended cloud providers and services. + +Decision Tree + + +Below you can find the above guidance in the form of a decision tree. It’s recommended you read through the above guidance in full first before though. + +#mermaid-1710941558034{font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:16px;fill:#333;}#mermaid-1710941558034 .error-icon{fill:hsl(220.5882352941, 100%, 98.3333333333%);}#mermaid-1710941558034 .error-text{fill:rgb(8.5000000002, 5.7500000001, 0);stroke:rgb(8.5000000002, 5.7500000001, 0);}#mermaid-1710941558034 .edge-thickness-normal{stroke-width:2px;}#mermaid-1710941558034 .edge-thickness-thick{stroke-width:3.5px;}#mermaid-1710941558034 .edge-pattern-solid{stroke-dasharray:0;}#mermaid-1710941558034 .edge-pattern-dashed{stroke-dasharray:3;}#mermaid-1710941558034 .edge-pattern-dotted{stroke-dasharray:2;}#mermaid-1710941558034 .marker{fill:#0b0b0b;stroke:#0b0b0b;}#mermaid-1710941558034 .marker.cross{stroke:#0b0b0b;}#mermaid-1710941558034 svg{font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:16px;}#mermaid-1710941558034 .label{font-family:""trebuchet ms"",verdana,arial,sans-serif;color:#333;}#mermaid-1710941558034 .cluster-label text{fill:rgb(8.5000000002, 5.7500000001, 0);}#mermaid-1710941558034 .cluster-label span,#mermaid-1710941558034 p{color:rgb(8.5000000002, 5.7500000001, 0);}#mermaid-1710941558034 .label text,#mermaid-1710941558034 span,#mermaid-1710941558034 p{fill:#333;color:#333;}#mermaid-1710941558034 .node rect,#mermaid-1710941558034 .node circle,#mermaid-1710941558034 .node ellipse,#mermaid-1710941558034 .node polygon,#mermaid-1710941558034 .node path{fill:#fff4dd;stroke:hsl(40.5882352941, 60%, 83.3333333333%);stroke-width:1px;}#mermaid-1710941558034 .flowchart-label text{text-anchor:middle;}#mermaid-1710941558034 .node .label{text-align:center;}#mermaid-1710941558034 .node.clickable{cursor:pointer;}#mermaid-1710941558034 .arrowheadPath{fill:#0b0b0b;}#mermaid-1710941558034 .edgePath .path{stroke:#0b0b0b;stroke-width:2.0px;}#mermaid-1710941558034 .flowchart-link{stroke:#0b0b0b;fill:none;}#mermaid-1710941558034 .edgeLabel{background-color:hsl(-79.4117647059, 100%, 93.3333333333%);text-align:center;}#mermaid-1710941558034 .edgeLabel rect{opacity:0.5;background-color:hsl(-79.4117647059, 100%, 93.3333333333%);fill:hsl(-79.4117647059, 100%, 93.3333333333%);}#mermaid-1710941558034 .labelBkg{background-color:rgba(243.9999999999, 220.9999999998, 255, 0.5);}#mermaid-1710941558034 .cluster rect{fill:hsl(220.5882352941, 100%, 98.3333333333%);stroke:hsl(220.5882352941, 60%, 88.3333333333%);stroke-width:1px;}#mermaid-1710941558034 .cluster text{fill:rgb(8.5000000002, 5.7500000001, 0);}#mermaid-1710941558034 .cluster span,#mermaid-1710941558034 p{color:rgb(8.5000000002, 5.7500000001, 0);}#mermaid-1710941558034 div.mermaidTooltip{position:absolute;text-align:center;max-width:200px;padding:2px;font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:12px;background:hsl(220.5882352941, 100%, 98.3333333333%);border:1px solid hsl(220.5882352941, 60%, 88.3333333333%);border-radius:2px;pointer-events:none;z-index:100;}#mermaid-1710941558034 .flowchartTitleText{text-anchor:middle;font-size:18px;fill:#333;}#mermaid-1710941558034 :root{--mermaid-font-family:""trebuchet ms"",verdana,arial,sans-serif;}#mermaid-1710941558034 .default>*{fill:#FCA326!important;}#mermaid-1710941558034 .default span{fill:#FCA326!important;}YesYesNoYesNoYesYesWhat Reference Architecture should I use?What is your expected load?Equivalent to 3,000 users or more?Equivalent to 2,000 users or less?Do you need HA?(or Zero-Downtime Upgrades)Do you have experience withand want additional resiliencewith select components in Kubernetes?Recommendation3K architecture with HAand supported reductionsRecommendationArchitecture closest to usercount with HARecommendationCloud Native Hybrid architectureclosest to user countRecommendationStandalone 1K or 2Karchitecture with BackupsDo you need cross regional distribution or disaster recovery?Additional Recommendation GitLab GeoDo you have Large Monorepos or expect to have substantial additional workloads?Additional Recommendation Contact Customer Success Manager or Support + +Requirements + + +Before implementing a reference architecture, refer to the following requirements and guidance. + +Supported CPUs + + +These reference architectures were built and tested on Google Cloud Platform (GCP) using the +Intel Xeon E5 v3 (Haswell) +CPU platform as the lowest common denominator baseline (Sysbench benchmark). +Newer, similarly-sized CPUs are supported and may have improved performance as a result. + +ARM CPUs are supported for Linux package environments as well as for any Cloud Provider services where applicable. + + + note Any “burstable” instance types are not recommended due to inconsistent performance. + + +Supported disk types + + +As a general guidance, most standard disk types are expected to work for GitLab, but be aware of the following specific call-outs: + + + +Gitaly requires at least 8,000 input/output operations per second (IOPS) for read operations, and 2,000 IOPS for write operations. + We don’t recommend the use of any disk types that are “burstable” due to inconsistent performance. + + +Outside the above standard, disk types are expected to work for GitLab and the choice of each depends on your specific requirements around areas, such as durability or costs. + +Supported infrastructure + + +As a general guidance, GitLab should run on most infrastructure such as reputable Cloud Providers (AWS, GCP, Azure) and +their services, or self-managed (ESXi) that meet both: + + + The specifications detailed in each reference architecture. + Any requirements in this section. + + +However, this does not constitute a guarantee for every potential permutation. + +See Recommended cloud providers and services for more information. + +Large Monorepos + + +The reference architectures were tested with repositories of varying sizes that follow best practices. + +However, large monorepos (several gigabytes or more) can significantly impact the performance of Git and in turn the environment itself. +Their presence, as well as how they are used, can put a significant strain on the entire system from Gitaly through to the underlying infrastructure. + + + caution If this applies to you, we strongly recommended referring to the linked documentation as well as reaching out to your Customer Success Manager or our Support team for further guidance. + + +As such, large monorepos come with notable cost. If you have such a repository we strongly recommend +the following guidance is followed to ensure the best chance of good performance and to keep costs in check: + + + +Optimize the large monorepo. Using features such as +LFS to not store binaries, and other approaches for reducing repository size, can +dramatically improve performance and reduce costs. + Depending on the monorepo, increased environment specifications may be required to compensate. Gitaly in particular will likely require additional resources along with Praefect, GitLab Rails, and Load Balancers. This depends notably on the monorepo itself and the usage against it. + When the monorepo is significantly large (20 gigabytes or more) further additional strategies maybe required such as even further increased specifications or in some cases a separate Gitaly backend for the monorepo alone. + Network and disk bandwidth is another potential consideration with large monorepos. In very heavy cases, it’s possible to see bandwidth saturation if there’s a high amount of concurrent clones (such as with CI). It’s strongly recommended reducing full clones wherever possible in this scenario. Otherwise, additional environment specifications may be required to increase bandwidth, but this differs between cloud providers. + + +Additional workloads + + +These reference architectures have been designed and tested for standard GitLab +setups with good headroom in mind to cover most scenarios. + +However, additional workloads can multiply the impact of operations by triggering follow-up actions. +You may need to adjust the suggested specifications to compensate if you use, for example: + + + Security software on the nodes. + Hundreds of concurrent CI jobs for large repositories. + Custom scripts that run at high frequency. + +Integrations in many large projects. + +Server hooks. + +System hooks. + + +As a general rule, you should have robust monitoring in place to measure the impact of any additional workloads to +inform any changes needed to be made. It’s also strongly encouraged for you to reach out to your Customer Success Manager or our Support team +for further guidance. + +Load Balancers + + +The Reference Architectures make use of up to two Load Balancers depending on the class: + + + External Load Balancer - Serves traffic to any external facing components, primarily Rails. + Internal Load Balancer - Serves traffic to select internal components that have been deployed in an HA fashion such as Praefect or PgBouncer. + + +The specifics on which load balancer to use, or its exact configuration is beyond the scope of GitLab documentation. The most common options +are to set up load balancers on machine nodes or to use a service such as one offered by Cloud Providers. If deploying a Cloud Native Hybrid environment the Charts can handle the set-up of the External Load Balancer via Kubernetes Ingress. + +For each Reference Architecture class a base machine size has given to help get you started if you elect to deploy directly on machines, but these may need to be adjusted accordingly depending on the load balancer used and amount of workload. Of note machines can have varying network bandwidth that should also be taken into consideration. + +Note the following sections of additional guidance for Load Balancers. + +Balancing algorithm + + +We recommend that a least-connection-based load balancing algorithm or equivalent is used wherever possible to ensure equal spread of calls to the nodes and good performance. + +We don’t recommend the use of round-robin algorithms as they are known to not spread connections equally in practice. + +Network Bandwidth + + +The total network bandwidth available to a load balancer when deployed on a machine can vary notably across Cloud Providers. In particular some Cloud Providers, like AWS, may operate on a burst system with credits to determine the bandwidth at any time. + +The network bandwidth your environment’s load balancers will require is dependent on numerous factors such as data shape and workload. The recommended base sizes for each Reference Architecture class have been selected to give a good level of bandwidth with adequate headroom but in some scenarios, such as consistent clones of large monorepos, the sizes may need to be adjusted accordingly. + +No swap + + +Swap is not recommended in the reference architectures. It’s a failsafe that impacts performance greatly. The +reference architectures are designed to have memory headroom to avoid needing swap. + +Praefect PostgreSQL + + +Praefect requires its own database server and +that to achieve full High Availability, a third-party PostgreSQL database solution is required. + +We hope to offer a built-in solution for these restrictions in the future. In the meantime, a non-HA PostgreSQL server +can be set up using the Linux package as the specifications reflect. Refer to the following issues for more information: + + + +omnibus-gitlab#7292. + +gitaly#3398. + + +Recommended cloud providers and services + + + + note The following lists are non-exhaustive. Generally, other cloud providers not listed +here likely work with the same specs, but this hasn’t been validated. +Additionally, when it comes to other cloud provider services not listed here, +it’s advised to be cautious as each implementation can be notably different +and should be tested thoroughly before production use. + + +Through testing and real life usage, the Reference Architectures are recommended on the following cloud providers: + + + + + Reference Architecture + GCP + AWS + Azure + Bare Metal + + + + + Linux package + 🟢 + 🟢 + 🟢1 + + 🟢 + + + Cloud Native Hybrid + 🟢 + 🟢 + + + + + + +Additionally, the following cloud provider services are recommended for use as part of the Reference Architectures: + + + + + Cloud Service + GCP + AWS + Azure + Bare Metal + + + + + Object Storage + 🟢   Cloud Storage + + 🟢   S3 + + 🟢   Azure Blob Storage + + 🟢   MinIO + + + + Database + 🟢   Cloud SQL + + 🟢   RDS + + 🟢   Azure Database for PostgreSQL Flexible Server + + + + + Redis + 🟢   Memorystore + + 🟢   ElastiCache + + 🟢   Azure Cache for Redis (Premium) + + + + + + +Recommendation notes for the database services + + +When selecting to use an external database service, it should run a standard, performant, and supported version. + +If you choose to use a third party external service: + + + Note that the HA Linux package PostgreSQL setup encompasses PostgreSQL, PgBouncer and Consul. All of these components would no longer be required when using a third party external service. + The number of nodes required to achieve HA may differ depending on the service compared to the Linux package and doesn’t need to match accordingly. + However, if Database Load Balancing via Read Replicas is desired for further improved performance it’s recommended to follow the node count for the Reference Architecture. + Ensure that if a pooler is offered as part of the service that it can handle the total load without bottlenecking. +For example, Azure Database for PostgreSQL Flexible Server can optionally deploy a PgBouncer pooler in front of the Database, but PgBouncer is single threaded, so this in turn may cause bottlenecking. However, if using Database Load Balancing, this could be enabled on each node in distributed fashion to compensate. + If GitLab Geo is to be used the service will need to support Cross Region replication. + + +Recommendation notes for the Redis services + + +When selecting to use an external Redis service, it should run a standard, performant, and supported version. Note that this specifically must not be run in Cluster mode as this is unsupported by GitLab. + +Redis is primarily single threaded. For the 10,000 user and above Reference Architectures, separate out the instances as specified into Cache and Persistent data to achieve optimum performance at this scale. + +Recommendation notes for Object Storage + + +GitLab has been tested against various Object Storage providers that are expected to work. + +As a general guidance, it’s recommended to use a reputable solution that has full S3 compatibility. + +Unsupported database services + + +Several database cloud provider services are known not to support the above or have been found to have other issues and aren’t recommended: + + + +Amazon Aurora is incompatible and not supported. See 14.4.0 for more details. + +Azure Database for PostgreSQL Single Server is not supported as the service is now deprecated and runs on an unsupported version of PostgreSQL. It was also found to have notable performance and stability issues. + +Google AlloyDB and Amazon RDS Multi-AZ DB cluster have not been tested and are not recommended. Both solutions are specifically not expected to work with GitLab Geo. + + +Amazon RDS Multi-AZ DB instance is a separate product and is supported. + + + + +Deviating from the suggested reference architectures + + +As a general guideline, the further away you move from the reference architectures, +the harder it is to get support for it. With any deviation, you’re introducing +a layer of complexity that adds challenges to finding out where potential +issues might lie. + +The reference architectures use the official Linux packages or Helm Charts to +install and configure the various components. The components are +installed on separate machines (virtualized or bare metal), with machine hardware +requirements listed in the “Configuration” column and equivalent VM standard sizes listed +in GCP/AWS/Azure columns of each available reference architecture. + +Running components on Docker (including Docker Compose) with the same specs should be fine, as Docker is well known in terms of support. +However, it is still an additional layer and may still add some support complexities, such as not being able to run strace easily in containers. + +Unsupported designs + + +While we endeavour to try and have a good range of support for GitLab environment designs, there are certain approaches we know definitively not to work, and as a result are not supported. Those approaches are detailed in the following sections. + +Stateful components in Kubernetes + + +Running stateful components in Kubernetes, such as Gitaly Cluster, is not supported. + +Gitaly Cluster is only supported on conventional virtual machines. Kubernetes enforces strict memory restrictions, but Git memory usage is unpredictable, which +can cause sporadic OOM termination of Gitaly pods, leading to significant disruptions and potential data loss. For this reason and others, Gitaly is not tested +or supported in Kubernetes. For more information, see epic 6127. + +This also applies to other third-party stateful components such as Postgres and Redis, but you can explore other third-party solutions for those components if desired such as supported Cloud Provider services unless called out specifically as unsupported. + +Autoscaling of stateful nodes + + +As a general guidance, only stateless components of GitLab can be run in Autoscaling groups, namely GitLab Rails +and Sidekiq. + +Other components that have state, such as Gitaly, are not supported in this fashion (for more information, see issue 2997). + +This also applies to other third-party stateful components such as Postgres and Redis, but you can explore other third-party solutions for those components if desired such as supported Cloud Provider services unless called out specifically as unsupported. + +Spreading one environment over multiple data centers + + +Deploying one GitLab environment over multiple data centers is not supported due to potential split brain edge cases +if a data center were to go down. For example, several components of the GitLab setup, namely Consul, Redis Sentinel and Praefect require an odd number quorum to function correctly and splitting over multiple data centers can impact this notably. + +For deploying GitLab over multiple data centers or regions we offer GitLab Geo as a comprehensive solution. + +Validation and test results + + +The Quality Engineering team +does regular smoke and performance tests for the reference architectures to ensure they +remain compliant. + +Why we perform the tests + + +The Quality Department has a focus on measuring and improving the performance +of GitLab, as well as creating and validating reference architectures that +self-managed customers can rely on as performant configurations. + +For more information, see our handbook page. + +How we perform the tests + + +Testing occurs against all reference architectures and cloud providers in an automated and ad-hoc fashion. This is done by two tools: + + + The GitLab Environment Toolkit Terraform and Ansible scripts for building the environments. + The GitLab Performance Tool for performance testing. + + +Network latency on the test environments between components on all Cloud Providers were measured at <5 ms. This is shared as an observation and not as an implicit recommendation. + +We aim to have a “test smart” approach where architectures tested have a good range that can also apply to others. Testing focuses on a 10k Linux package +installation on GCP as the testing has shown this is a good bellwether for the other architectures and cloud providers as well as Cloud Native Hybrids. + +The Standard Reference Architectures are designed to be platform-agnostic, with everything being run on VMs through the Linux package. While testing occurs primarily on GCP, ad-hoc testing has shown that they perform similarly on hardware with equivalent specs on other Cloud Providers or if run on premises (bare-metal). + +Testing on these reference architectures is performed with the +GitLab Performance Tool +at specific coded workloads, and the throughputs used for testing are +calculated based on sample customer data. Select the +reference architecture that matches your scale. + +Each endpoint type is tested with the following number of requests per second (RPS) +per 1,000 users: + + + API: 20 RPS + Web: 2 RPS + Git (Pull): 2 RPS + Git (Push): 0.4 RPS (rounded to the nearest integer) + + +The above targets were selected based on real customer data of total environmental loads corresponding to the user count, including CI and other workloads along with additional substantial headroom added. + +How to interpret the results + + + + note Read our blog post on how our QA team leverages GitLab performance testing tool. + + +Testing is done publicly, and all results are shared. + +The following table details the testing done against the reference architectures along with the frequency and results. Additional testing is continuously evaluated, and the table is updated accordingly. + + +table.test-coverage td { + border-top: 1px solid #dbdbdb; + border-left: 1px solid #dbdbdb; + border-right: 1px solid #dbdbdb; + border-bottom: 1px solid #dbdbdb; +} + +table.test-coverage th { + border-top: 1px solid #dbdbdb; + border-left: 1px solid #dbdbdb; + border-right: 1px solid #dbdbdb; + border-bottom: 1px solid #dbdbdb; +} + + + + + + + + ReferenceArchitecture + GCP (* also proxy for Bare-Metal) + AWS + Azure + + + Linux package + Cloud Native Hybrid + Linux package + Cloud Native Hybrid + Linux package + + + 1k + Weekly + + + + + + + 2k + Weekly + + + + Planned + + + 3k + Weekly + + + Weekly + + + + 5k + Weekly + + + + + + + 10k + Daily + Weekly + Weekly + Weekly + + + + 25k + Weekly + + + + + + + 50k + Weekly + + + + + + + +Cost to run + + +As a starting point, the following table details initial costs for the different reference architectures across GCP, AWS, and Azure through the Linux package. + + + note Due to the nature of Cloud Native Hybrid, it’s not possible to give a static cost calculation. +Bare-metal costs are also not included here as it varies widely depending on each configuration. + + + + + + + + ReferenceArchitecture + GCP + AWS + Azure + + + Linux package + Linux package + Linux package + + + 1k + Calculated cost + Calculated cost + Calculated cost + + + 2k + Calculated cost + Calculated cost + Calculated cost + + + 3k + Calculated cost + Calculated cost + Calculated cost + + + 5k + Calculated cost + Calculated cost + Calculated cost + + + 10k + Calculated cost + Calculated cost + Calculated cost + + + 25k + Calculated cost + Calculated cost + Calculate cost + + + 50k + Calculated cost + Calculated cost + Calculated cost + + + +Maintaining a Reference Architecture environment + + +Maintaining a Reference Architecture environment is generally the same as any other GitLab environment is generally covered in other sections of this documentation. + +In this section you’ll find links to documentation for relevant areas as well as any specific Reference Architecture notes. + +Upgrades + + +Upgrades for a Reference Architecture environment is the same as any other GitLab environment. +The main Upgrade GitLab section has detailed steps on how to approach this. + +Zero-downtime upgrades are also available. + + + note You should upgrade a Reference Architecture in the same order as you created it. + + +Scaling an environment + + +The Reference Architectures have been designed to support scaling in various ways depending on your use case and circumstances. +This can be done iteratively or wholesale to the next size of architecture depending on if metrics suggest a component is being exhausted. + + + note If you’re seeing a component continuously exhausting it’s given resources it’s strongly recommended for you to reach out to our Support team before performing any scaling. This is especially so if you’re planning to scale any component significantly. + + +For most components vertical and horizontal scaling can be applied as normal. However, before doing so though please be aware of the below caveats: + + + When scaling Puma or Sidekiq vertically the amount of workers will need to be adjusted to use the additional specs. Puma will be scaled automatically on the next reconfigure but Sidekiq will need its configuration changed beforehand. + Redis and PgBouncer are primarily single threaded. If these components are seeing CPU exhaustion they may need to be scaled out horizontally. + Scaling certain components significantly can result in notable knock on effects that affect the performance of the environment. Refer to the dedicated section below for more guidance. + + +Conversely, if you have robust metrics in place that show the environment is over-provisioned, you can scale downwards similarly. +You should take an iterative approach when scaling downwards, however, to ensure there are no issues. + +Scaling knock on effects + + +In some cases scaling a component significantly may result in knock on effects for downstream components, impacting performance. The Reference Architectures were designed with balance in mind to ensure components that depend on each other are congruent in terms of specs. As such you may find when notably scaling a component that it’s increase may result in additional throughput being passed to the other components it depends on and that they, in turn, may need to be scaled as well. + + + note As a general rule most components have good headroom to accommodate an upstream component being scaled, so this is typically on a case by case basis and specific to what has been changed. It’s recommended for you to reach out to our Support team before you make any significant changes to the environment. + + +The following components can impact others when they have been significantly scaled: + + + Puma and Sidekiq - Notable scale ups of either Puma or Sidekiq workers will result in higher concurrent connections to the internal load balancer, PostgreSQL (via PgBouncer if present), Gitaly (via Praefect if present) and Redis respectively. + + Redis is primarily single threaded and in some cases may need to be split up into different instances (Cache / Persistent) if the increased throughput causes CPU exhaustion if a combined cluster is currently being used. + PgBouncer is also single threaded but note that a scale out will result in a new pool being added that in turn will increase total connections to Postgres. It’s strongly recommended to only do this if you have experience in managing Postgres connections and to seek assistance if in doubt. + + + Gitaly Cluster / PostgreSQL - A notable scale out of additional nodes can have a detrimental effect on the HA system and performance due to increased replication calls to the primary node. + + +Scaling from a non-HA to an HA architecture + + +While in most cases vertical scaling is only required to increase an environment’s resources, if you are moving to an HA environment +additional steps will be required for the following components to switch over to their HA forms respectively by following the given +documentation for each as follows + + + Redis to multi-node Redis w/ Redis Sentinel + Postgres to multi-node Postgres w/ Consul + PgBouncer + Gitaly to Gitaly Cluster w/ Praefect + + +Monitoring + + +There are numerous options available to monitor your infrastructure, as well as GitLab itself, and you should refer to your selected monitoring solution’s documentation for more information. + +Of note, the GitLab application is bundled with Prometheus as well as various Prometheus compatible exporters that could be hooked into your solution. + +Update history + + +Below is a history of notable updates for the Reference Architectures (2021-01-01 onward, ascending order), which we aim to keep updated at least once per quarter. + +You can find a full history of changes on the GitLab project. + +2024: + + + +2024-02: Updated recommended sizings for Load Balancer nodes if deployed on VMs. Also added notes on network bandwidth considerations. + +2024-02: Remove the Sidekiq Max Concurrency setting in examples as this is deprecated and no longer required to be set explicitly. + +2024-02: Adjusted the Sidekiq recommendations on 2k to disable Sidekiq on Rails nodes and updated architecture diagram. + +2024-01: Updated recommendations for Azure for all Reference Architecture sizes and latest cloud services. + + +2023: + + + +2023-12-12: Updated notes on Load Balancers to be more reflective that any reputable offering is expected to work. + +2023-11-03: Expanded details on what each Reference Architecture is designed for, the testing methodology used as well as added details on how to scale environments. + +2023-11-03: Added expanded notes on disk types, object storage and monitoring. + +2023-10-25: Adjusted Sidekiq configuration example to use Linux Package role. + +2023-10-15: Adjusted the Sidekiq recommendations to include a separate node for 2k and tweaks to instance type and counts for 3k and 5k. + +2023-10-08: Added more expanded notes throughout to warn about the use of Large Monorepos and their impacts for increased awareness. + +2023-10-04: Updated name of Task Runner pod to its new name of Toolbox. + +2023-10-02: Expanded guidance on using an external service for Redis further, in particular for separated Cache and Persistent services with 10k and up. + +2023-09-21: Expanded details on the challenges of running Gitaly in Kubernetes. + +2023-09-20: Removed references to Grafana after deprecation and removal. + +2023-08-30: Expanded section on Geo under the Decision Tree. + +2023-08-08: Switch config example to use the Sidekiq role for Linux package. + +2023-08-03: Fixed an AWS Machine type typo for the 50k architecture. + +2023-11-03: Expand details on what each Reference Architecture is designed for, the testing methodology used as well as added details on how to scale environments. + +2023-11-03: Add expanded notes on disk types, object storage and monitoring. + +2023-10-25: Adjust Sidekiq configuration example to use Linux Package role. + +2023-10-15: Adjust the Sidekiq recommendations to include a separate node for 2k and tweaks to instance type and counts for 3k and 5k. + +2023-10-08: Add more expanded notes throughout to warn about the use of Large Monorepos and their impacts for increased awareness. + +2023-10-04: Update name of Task Runner pod to its new name of Toolbox. + +2023-10-02: Expand guidance on using an external service for Redis further, in particular for separated Cache and Persistent services with 10k and up. + +2023-09-21: Expand details on the challenges of running Gitaly in Kubernetes. + +2023-09-20: Remove references to Grafana after deprecation and removal. + +2023-08-30: Expand section on Geo under the Decision Tree. + +2023-08-08: Switch configuration example to use the Sidekiq role for Linux package. + +2023-08-03: Fix an AWS Machine type typo for the 50k architecture. + +2023-06-30: Update PostgreSQL configuration examples to remove a now unneeded setting to instead use the Linux package default. + +2023-06-30: Add explicit example on main page that reflects Google Memorystore is recommended. + +2023-06-11: Fix IP examples for the 3k and 5k architectures. + +2023-05-25: Expand notes on usage of external Cloud Provider Services and the recommendation of separated Redis servers for 10k environments and up. + +2023-05-03: Update documentation to reflect correct requirement of Redis 6 instead of 5. + +2023-04-28: Add a note that the Azure Active Directory authentication method is not supported for use with Azure PostgreSQL Flexible service. + +2023-03-23: Add more details about known unsupported designs. + +2023-03-16: Update Redis configuration examples for multi-node to have correct config to ensure all components can connect. + +2023-03-15: Update Gitaly configuration examples to the new format. + +2023-03-14: Update cost estimates to no longer include NFS VMs. + +2023-02-17: Update Praefect configuration examples to the new format. + +2023-02-14: Add examples of what automations may be considered additional workloads. + +2023-02-13: Add a new ‘Before you Start’ section that gives more context about what’s involved with running production software self-managed. Also added more details for Standalone setups and Cloud Provider services in the Decision Tree section. + +2023-02-01: Switch to use more common “complex” terminology instead of less known “involved”. + +2023-01-31: Expand and centralize the requirements’ section on the main page. + +2023-01-26: Add notes on migrating Git Data from NFS, that object data is still supported on NFS and handling SSH keys correctly across multiple Rails nodes. + + +2022: + + + +2022-12-14: Remove guidance for using NFS for Git data as support for this is now ended with 15.6 onwards. + +2022-12-12: Add note to clarify difference between Amazon RDS Multi-AZ DB cluster and instance, with the latter being supported. Also increase PostgreSQL max connections setting to new default of 500. + +2022-12-12: Update Sidekiq max concurrency configuration to match new default of 20. + +2022-11-16: Correct guidance for Praefect and Gitaly in reduced 3k architecture section that an odd number quorum is required. + +2022-11-15: Add guidance on how to handle GitLab Secrets in Cloud Native Hybrids and further links to the GitLab Charts documentation. + +2022-11-14: Fix a typo with Sidekiq configuration for the 10k architecture. + +2022-11-09: Add guidance on large monorepos and additional workloads impact on performance. Also expanded Load Balancer guidance around SSL and a recommendation for least connection based routing methods. + +2022-10-18: Adjust Object Storage guidance to make it clearer it’s recommended over NFS. + +2022-10-11: Update guidance for Azure to recommend up to 2k only due to performance issues. + +2022-09-27: Add Decision Tree section to help users better decide what architecture to use. + +2022-09-22: Add explicit step to enable Incremental Logging when only Object Storage is being used. + +2022-09-22: Expand guidance on recommended cloud providers and services. + +2022-09-09: Expand Object Storage guidance and updated that NFS support for Git data ends with 15.6. + +2022-08-24: Add clearer note that Gitaly Cluster is not supported in Kubernetes. + +2022-08-24: Add section on supported CPUs and types. + +2022-08-18: Update architecture tables to be clearer for Object Storage support. + +2022-08-17: Increase Cloud Native Hybrid pool specs for 2k architecture to ensure enough resources present for pods. Also increased Sidekiq worker count. + +2022-08-02: Add note to use newer Gitaly check command from GitLab 15 and later. + +2022-07-25: Move troubleshooting section to a more general location. + +2022-07-14: Add guidance that Amazon Aurora is no longer compatible and not supported from GitLab 14.4.0 and later. + +2022-07-07: Add call out not to remove the default section from Gitaly storages config as it’s required. + +2022-06-08: Move Incremental Logging guidance to separate section. + +2022-04-29: Expand testing results’ section with new regular pipelines. + +2022-04-26: Update Praefect configuration to reflect setting name changes. + +2022-04-15: Add missing setting to enable Object Storage correctly. + +2022-04-14: Expand Cloud Native Hybrid guidance with AWS machine types. + +2022-04-08: Add cost estimates for AWS and Azure. + +2022-04-06: Update configuration examples for most components to be correctly included for Prometheus monitoring auto discovery. + +2022-03-30: Expand validation and testing result’s section with more clearly language and more detail. + +2022-03-21: Add note that additional specs may be needed for Gitaly in some scenarios. + +2022-03-04: Add guidance for preventing the GitLab KAS service running on nodes where not required. + +2022-03-01: Fix a typo for Praefect TLS port in configuration examples. + +2022-02-22: Add guidance to enable the Gitaly Pack-objects cache. + +2022-02-22: Add a general section on recommended Cloud Providers and services. + +2022-02-14: Link to blog post about GPT testing added. + +2022-01-26: Merge testing process and cost estimates into one section with expanded details. + +2022-01-13: Expand guidance on recommended Kubernetes platforms. + + +2021: + + + +2021-12-31: Fix typo for 25k Redis AWS machine size. + +2021-12-28: Add Cloud Provider breakdowns to testing process & results section. + +2021-12-17: Add more detail to testing process and results section. + +2021-12-17: Add note on Database Load Balancing requirements when using a modified 3k architecture. + +2021-12-17: Add diagram for 1k architecture (single node). + +2021-12-15: Add sections on estimated costs (GCP), testing process and results and further Cloud Provider service details. + +2021-12-14: Expand external database service guidance for components and what Cloud Provider services are recommended. + +2021-11-24: Add recommendations for Database Load Balancing. + +2021-11-04: Add more detail about testing targets used for the architectures. + +2021-10-13: Add guidance around optionally enabling Incremental Logging via Redis. + +2021-10-07: Update Sidekiq config to include required external_url setting. + +2021-10-02: Expand guidance around Gitaly Cluster and Gitaly Sharded. + +2021-09-29: Add note on what Cloud Native Hybrid architecture to use with small user counts. + +2021-09-27: Change guidance to now co-locate Redis Sentinel beside Redis on the same node. + +2021-08-18: Add 2k Cloud Native Hybrid architecture. + +2021-08-04: Add links to performance test results for each architecture. + +2021-07-30: Fix the replication settings in PostgreSQL configuration examples to have correct values. + +2021-07-22: Add 3k Cloud Native Hybrid architecture. + +2021-07-16: Update architecture diagrams to correctly reflect no direct connection between Rails and Sidekiq. + +2021-07-15: Update Patroni configuration to include Rest API authentication settings. + +2021-07-15: Add 5k Cloud Native Hybrid architecture. + +2021-07-08: Add 25k Cloud Native Hybrid architecture. + +2021-06-29: Add 50k Cloud Native Hybrid architecture. + +2021-06-23: Make additions to main page for Cloud Native Hybrid and reduce 3k architecture. + +2021-06-16: Update PostgreSQL steps and configuration to use the latest roles and prep for any Geo replication. + +2021-06-14: Update configuration examples for Monitoring node to follow latest. + +2021-06-11: Expand notes on external services with more detail. + +2021-06-09: Add additional guidance and expand on how to correctly manage GitLab secrets and database migrations. + +2021-06-09: Update Praefect configuration examples to follow the new storages format. + +2021-06-03: Removed references for the Unicorn webserver, which has been replaced by Puma. + +2021-04-23: Update Sidekiq configuration examples to show how to correctly configure multiple workers on each node. + +2021-04-23: Add initial guidance on how to modify the 3k Reference Architecture for lower user counts. + +2021-04-13: Add further clarification on using external services (PostgreSQL, Redis). + +2021-04-12: Add additional guidance on using Load Balancers and their routing methods. + +2021-04-08: Add additional guidance on how to correctly configure only one node to do database migrations for Praefect. + +2021-04-06: Expand 10k Cloud Native Hybrid documentation with more details and clear naming. + +2021-03-04: Expand Gitaly Cluster documentation to all other applicable Reference Architecture sizes. + +2021-02-19: Add additional Object Storage guidance of using separated buckets for different data types as per recommendations. + +2021-02-12: Add documentation for setting up Object Storage with Rails and Sidekiq. + +2021-02-12: Add documentation for setting up Gitaly Cluster for the 10k Reference Architecture. + +2021-02-09: Add the first iteration of the 10k Cloud Native Hybrid reference architecture. + +2021-01-07: Add documentation for using Patroni as PostgreSQL replication manager. + + + +2. Omnibus GitLab High Availability Roles + + + +Omnibus GitLab High Availability Roles + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + +Omnibus GitLab includes various software components/services to support running GitLab in +a high availability configuration. By default, some of these supporting services +are disabled, and Omnibus GitLab is configured to run as a single node installation. +Each service can be enabled or disabled using configuration settings in /etc/gitlab/gitlab.rb, +but the introduction of roles allows you to easily enable a group of services, +and provides better default configuration based on the high availability roles you +have enabled. + +Not specifying any Roles (the default configuration) + + +When you don’t configure GitLab with any roles, GitLab enables the default services for +a single node install. These include things like PostgreSQL, Redis, Puma, Sidekiq, +Gitaly, GitLab Workhorse, NGINX, etc. + +These can still be individually enable/disabled by the settings in your /etc/gitlab/gitlab.rb. + +Specifying Roles + + +Roles are passed as an array in /etc/gitlab/gitlab.rb + +Example specifying multiple roles: + +roles ['redis_sentinel_role', 'redis_master_role'] + + +Example specifying a single role: + +roles ['geo_primary_role'] + + +Roles + + +The majority of the following roles will only work on a +GitLab Enterprise Edition, meaning +a gitlab-ee Omnibus package. It will be mentioned next to each role. + +GitLab App Role + + + + + application_role (gitlab-ce/gitlab-ee) + + The GitLab App role is used to configure an instance where only GitLab is running. Redis, PostgreSQL, and Consul services are disabled by default. + + + +Redis Server Roles + + +Documentation on the use of the Redis Roles can be found in Configuring Redis for Scaling + + + + redis_sentinel_role (gitlab-ee) + + Enables the sentinel service on the machine, + + By default, enables no other services. + + + redis_master_role (gitlab-ee) + + Enables the Redis service and monitoring, and allows configuring the master password + + By default, enables no other services. + + + redis_replica_role (gitlab-ee, introduced in GitLab 13.0) + + Enables the Redis service and monitoring + + By default, enables no other services. + + + +GitLab Geo Roles + + +The GitLab Geo roles are used for configuration of GitLab Geo sites. See the +Geo Setup Documentation +for configuration steps. + + + + geo_primary_role (gitlab-ee) + + This role: + + + Configures a single-node PostgreSQL database as a leader for streaming replication. + Prevents automatic upgrade of PostgreSQL since it requires downtime of streaming replication to Geo secondary sites. + Enables all single-node GitLab services including NGINX, Puma, Redis, and Sidekiq. If you are segregating services, then you must explicitly disable unwanted services in /etc/gitlab/gitlab.rb. Therefore, this role is only useful on a single-node PostgreSQL in a Geo primary site. + Cannot be used to set up a PostgreSQL cluster in a Geo primary site. Instead, see Geo multi-node database replication. + + + By default, enables standard single-node GitLab services including NGINX, Puma, Redis, and Sidekiq. + + + geo_secondary_role (gitlab-ee) + + + Configures the secondary read-only replica database for incoming +replication. + Configures the Rails connection to the Geo tracking database. + Enables the Geo tracking database geo-postgresql. + Enables the Geo Log Cursor geo-logcursor. + Disables automatic database migrations on the read-only replica database +during reconfigure. + Reduces the number of Puma workers to save memory for other services. + Sets gitlab_rails['enable'] = true. + + + This role is intended to be used in a Geo secondary site running on a single +node. If using this role in a Geo site with multiple nodes, undesired +services will need to be explicitly disabled in /etc/gitlab/gitlab.rb. See +Geo for multiple nodes. + + This role should not be used to set up a PostgreSQL cluster in a Geo secondary +site. Instead, see Geo multi-node database replication. + + By default, enables all of the GitLab default single node services. (NGINX, Puma, Redis, Sidekiq, etc) + + + +Monitoring Roles + + + +History + + + + + +Introduced in GitLab 12.1. + + + + + + +Monitoring roles are used to set up monitoring of installations. For additional information, see the Monitoring documentation. + + + + monitoring_role (gitlab-ce/gitlab-ee) + + Configures a central monitoring server to collect metrics and provide dashboards. + + Enables Prometheus and Alertmanager. + + + +PostgreSQL Roles + + +Documentation on the usage of the PostgreSQL Roles can be found in Configuring PostgreSQL for Scaling + + + + postgres_role (gitlab-ce/gitlab-ee) + + Enables the PostgreSQL service on the machine + + By default, enables no other services. + + + patroni_role (gitlab-ee, introduced in GitLab 13.8) + + Enables the PostgreSQL, patroni, and Consul services on the machine + + By default, enables no other services. + + + pgbouncer_role (gitlab-ee) + + Enables the PgBouncer and Consul services on the machine + + By default, enables no other services. + + + consul_role (gitlab-ee) + + Enables the Consul service on the machine + + By default, enables no other services. + + + +GitLab Pages Roles + + + +History + + + + + +Introduced in GitLab 13.6. + + + + + + +GitLab Pages roles are used to setup and configure GitLab Pages. For additional +information, see the +GitLab Pages Administration documentation + + + + pages_role (gitlab-ce/gitlab-ee) + + Configures the server with a GitLab Pages instance. + + By default, enables no other services. + + + +Sidekiq Roles + + + +History + + + + + +Introduced in GitLab 14.1. + + + + + + +Sidekiq roles are used to setup and configure Sidekiq. For additional +information, see the +Sidekiq Administration documentation + + + + sidekiq_role (gitlab-ce/gitlab-ee) + + Configures the server with Sidekiq service. + + By default, enables no other services. + + + +Spamcheck Roles + + + +History + + + + + +Introduced in GitLab 14.9. + + + + + + +Spamcheck roles are used to setup and configure Spamcheck services. For additional +information, see the +Spamcheck documentation + + + + spamcheck_role (gitlab-ee) + + Configures the server with spamcheck and spam-classifier services. + + By default, enables no other services. + + + + +" +how do i create custom rulesets with the secret detection scanner,,"1. Secret Detection + + +Secret Detection + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + + In GitLab 14.0, Secret Detection jobs secret_detection_default_branch and secret_detection were consolidated into one job, secret_detection. + + +People sometimes accidentally commit secrets like keys or API tokens to Git repositories. After a +sensitive value is pushed to a remote repository, anyone with access to the repository can +impersonate the authorized user of the secret for malicious purposes. Most organizations require +exposed secrets to be revoked and replaced to address this risk. + +Secret Detection scans your repository to help prevent your secrets from being exposed. Secret +Detection scanning works on all text files, regardless of the language or framework used. + +GitLab has two methods for detecting secrets which can be used simultaneously: + + + The pipeline method detects secrets during the project’s CI/CD pipeline. This method cannot reject pushes. + The pre-receive method detects secrets when users push changes to the +remote Git branch. This method can reject pushes if a secret is detected. + + + +2. Customize rulesets + + + +Customize rulesets + + + +Tier: Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 13.5. + +Enabled support for +passthrough chains. Expanded to include additional passthrough types of file, git, and url in GitLab 14.6. + +Enabled support for overriding rules in GitLab 14.8. + +Enabled support for specifying ambiguous passthrough refs in GitLab 16.2. + + + + + + +You can customize the behavior of our SAST analyzers by defining a ruleset configuration file in the +repository being scanned. There are two kinds of customization: + + + Modifying the behavior of predefined rules. This includes: + + +Disabling predefined rules. Available for all analyzers. + +Overriding predefined rules. Available for all analyzers. + + + Replacing predefined rules by synthesizing a custom configuration +using passthroughs. Available for only nodejs-scan +and semgrep. + + +Disable predefined rules + + +You can disable predefined rules for any SAST analyzer. + +When you disable a rule: + + + Most analyzers still scan for the vulnerability. The results are removed as a processing step after the scan completes, and they don’t appear in the gl-sast-report.json artifact. + Findings for the disabled rule no longer appear in the pipeline security tab. + Existing findings for the disabled rule on the default branch are marked as No longer detected in the vulnerability report. + + +The Semgrep-based analyzer handles disabled rules differently: + + + To improve performance, the Semgrep-based analyzer doesn’t scan for disabled rules at all. + If you disable a rule in the Semgrep-based analyzer, existing vulnerability findings for that rule are automatically resolved after you merge the sast-ruleset.toml file to the default branch. + + +See the Schema and Examples sections for information on how +to configure this behavior. + +Override predefined rules + + +Certain attributes of predefined rules can be overridden for any SAST analyzer. This +can be useful when adapting SAST to your existing workflow or tools. For example, you +might want to override the severity of a vulnerability based on organizational policy, +or choose a different message to display in the Vulnerability Report. + +See the Schema and Examples sections for information on how +to configure this behavior. + +Synthesize a custom configuration + + +You can completely replace the predefined rules of some SAST analyzers: + + + +nodejs-scan - you +can replace the default njsscan configuration file +with your own. + +semgrep - you can replace +the GitLab-maintained ruleset +with your own. + + +You provide your customizations via passthroughs, which are composed into a +passthrough chain at runtime and evaluated to produce a complete configuration. The +underlying scanner is then executed against this new configuration. + +There are multiple passthrough types that let you provide configuration in different +ways, such as using a file committed to your repository or inline in the ruleset +configuration file. You can also choose how subsequent passthroughs in the chain are +handled; they can overwrite or append to previous configuration. + +See the Schema and Examples sections for information on how +to configure this behavior. + +Create the configuration file + + +To create the ruleset configuration file: + + + Create a .gitlab directory at the root of your project, if one doesn’t already exist. + Create a file named sast-ruleset.toml in the .gitlab directory. + + +Specify a remote configuration file + + + + + +Introduced in 16.1. + + + +You can set a CI/CD variable to use a ruleset configuration file that’s stored outside of the current repository. +This can help you apply the same rules across multiple projects. + +The SAST_RULESET_GIT_REFERENCE variable uses a format similar to +Git URLs for specifying a project URI, +optional authentication, and optional Git SHA. The variable uses the following format: + +[[:]@][@] + + + + note If a project has a .gitlab/sast-ruleset.toml file committed, that local configuration takes precedence and the file from SAST_RULESET_GIT_REFERENCE isn’t used. + + +The following example enables SAST and uses a shared ruleset customization file. +In this example, the file is committed on the default branch of example-ruleset-project at the path .gitlab/sast-ruleset.toml. + +include: + - template: Jobs/SAST.gitlab-ci.yml + +variables: + SAST_RULESET_GIT_REFERENCE: ""gitlab.com/example-group/example-ruleset-project"" + + +See specify a private remote configuration example for advanced usage. + +Troubleshooting remote configuration files + + +If remote configuration file doesn’t seem to be applying customizations correctly, the causes can be: + + + Your repository has a local .gitlab/sast-ruleset.toml file. + + A local file is used if it’s present, even if a remote configuration is set as a variable. + A change to this logic is considered in issue 414732. + + + There is a problem with authentication. + + To check whether this is the cause of the problem, try referencing a configuration file from a repository location that doesn’t require authentication. + + + + +Schema + + +The top-level section + + +The top-level section contains one or more configuration sections, defined as TOML tables. + + + + + Setting + Description + + + + + [$analyzer] + Declares a configuration section for an analyzer. The name follows the snake-case names defined in the list of SAST analyzers. + + + + +Configuration example: + +[semgrep] +... + + +Avoid creating configuration sections that modify existing rules and synthesize a custom ruleset, as +the latter replaces predefined rules completely. + +The [$analyzer] configuration section + + +The [$analyzer] section lets you customize the behavior of an analyzer. Valid properties +differ based on the kind of configuration you’re making. + + + + + Setting + Applies to + Description + + + + + [[$analyzer.ruleset]] + Predefined rules + Defines modifications to an existing rule. + + + interpolate + All + If set to true, you can use $VAR in the configuration to evaluate environment variables. Use this feature with caution, so you don’t leak secrets or tokens. (Default: false) + + + description + Passthroughs + Description of the custom ruleset. + + + targetdir + Passthroughs + The directory where the final configuration should be persisted. If empty, a directory with a random name is created. The directory can contain up to 100 MB of files. + + + validate + Passthroughs + If set to true, the content of each passthrough is validated. The validation works for yaml, xml, json and toml content. The proper validator is identified based on the extension used in the target parameter of the [[$analyzer.passthrough]] section. (Default: false) + + + timeout + Passthroughs + The maximum time to spend to evaluate the passthrough chain, before timing out. The timeout cannot exceed 300 seconds. (Default: 60) + + + + + +interpolate + + + + caution To reduce the risk of leaking secrets, use this feature with caution. + + +The example below shows a configuration that uses the $GITURL environment variable to access a +private repository. The variable contains a username and token (for example https://user:token@url), so +they’re not explicitly stored in the configuration file. + +[semgrep] + description = ""My private Semgrep ruleset"" + interpolate = true + + [[semgrep.passthrough]] + type = ""git"" + value = ""$GITURL"" + ref = ""main"" + + +The [[$analyzer.ruleset]] section + + +The [[$analyzer.ruleset]] section targets and modifies a single predefined rule. You can define +one to many of these sections per analyzer. + + + + + Setting + Description + + + + + disable + Whether the rule should be disabled. (Default: false) + + + [$analyzer.ruleset.identifier] + Selects the predefined rule to be modified. + + + [$analyzer.ruleset.override] + Defines the overrides for the rule. + + + + +Configuration example: + +[semgrep] + [[semgrep.ruleset]] + disable = true + ... + + +The [$analyzer.ruleset.identifier] section + + +The [$analyzer.ruleset.identifier] section defines the identifiers of the predefined +rule that you wish to modify. + + + + + Setting + Description + + + + + type + The type of identifier used by the predefined rule. + + + value + The value of the identifier used by the predefined rule. + + + + +You can look up the correct values for type and value by viewing the +gl-sast-report.json produced by the analyzer. +You can download this file as a job artifact from the analyzer’s CI job. + +For example, the snippet below shows a finding from a semgrep rule with three +identifiers. The type and value keys in the JSON object correspond to the +values you should provide in this section. + +... + ""vulnerabilities"": [ + { + ""id"": ""7331a4b7093875f6eb9f6eb1755b30cc792e9fb3a08c9ce673fb0d2207d7c9c9"", + ""category"": ""sast"", + ""message"": ""Key Exchange without Entity Authentication"", + ""description"": ""Audit the use of ssh.InsecureIgnoreHostKey\n"", + ... + ""identifiers"": [ + { + ""type"": ""semgrep_id"", + ""name"": ""gosec.G106-1"", + ""value"": ""gosec.G106-1"" + }, + { + ""type"": ""cwe"", + ""name"": ""CWE-322"", + ""value"": ""322"", + ""url"": ""https://cwe.mitre.org/data/definitions/322.html"" + }, + { + ""type"": ""gosec_rule_id"", + ""name"": ""Gosec Rule ID G106"", + ""value"": ""G106"" + } + ] + } + ... + ] +... + + +Configuration example: + +[semgrep] + [[semgrep.ruleset]] + [semgrep.ruleset.identifier] + type = ""semgrep_id"" + value = ""gosec.G106-1 + ... + + +The [$analyzer.ruleset.override] section + + +The [$analyzer.ruleset.override] section allows you to override attributes of a predefined rule. + + + + + Setting + Description + + + + + description + A detailed description of the issue. + + + message + (Deprecated) A description of the issue. + + + name + The name of the rule. + + + severity + The severity of the rule. Valid options are: Critical, High, Medium, Low, Unknown, Info) + + + + + + note While message is populated by the analyzers, it has been deprecated +in favor of name and description. + + +Configuration example: + +[semgrep] + [[semgrep.ruleset]] + [semgrep.ruleset.override] + severity = ""Critical"" + name = ""Command injection"" + ... + + +The [[$analyzer.passthrough]] section + + + + note This is currently supported by the nodejs-scan and semgrep analyzers only. + + +The [[$analyzer.passthrough]] section allows you to synthesize a custom configuration for an analyzer. You +can define up to 20 of these sections per analyzer. Passthroughs are composed into a passthrough chain +that evaluates into a complete configuration that replaces the predefined rules of the analyzer. + +Passthroughs are evaluated in order. Passthroughs listed later in the chain have +a higher precedence and can overwrite or append to data yielded by previous +passthroughs (depending on the mode). This is useful for cases where you need +to use or modify an existing configuration. + +The amount of data generated by a single passthrough is limited to 1 MB. + + + + + Setting + Applies to + Description + + + + + type + All + One of file, raw, git or url. + + + target + All + The target file to contain the data written by the passthrough evaluation. If empty, a random filename is used. + + + mode + All + If overwrite, the target file is overwritten. If append, new content is appended to the target file. The git type only supports overwrite. (Default: overwrite) + + + ref + type = ""git"" + Contains the name of the branch, tag, or the SHA to pull + + + subdir + type = ""git"" + Used to select a subdirectory of the Git repository as the configuration source. + + + value + All + For the file, url, and git types, defines the location of the file or Git repository. For the raw type, contains the inline configuration. + + + validator + All + Used to explicitly invoke validators (xml, yaml, json, toml) on the target file after the evaluation of a passthrough. + + + + +Passthrough types + + + + + + Type + Description + + + + + file + Use a file that is present in the Git repository. + + + raw + Provide the configuration inline. + + + git + Pull the configuration from a remote Git repository. + + + url + Fetch the configuration using HTTP. + + + + + + caution When using the raw passthrough with a YAML snippet, it’s recommended to format all indentation +in the sast-ruleset.toml file as spaces. The YAML specification mandates spaces over tabs, and the +analyzer fails to parse your custom ruleset unless the indentation is represented accordingly. + + +Examples + + +Disable predefined rules of SAST analyzers + + +With the following custom ruleset configuration, the following rules are omitted from the report: + + + +semgrep rules with a semgrep_id of gosec.G106-1 or a cwe of 322. + +sobelow rules with a sobelow_rule_id of sql_injection. + +flawfinder rules with a flawfinder_func_name of memcpy. + + +[semgrep] + [[semgrep.ruleset]] + disable = true + [semgrep.ruleset.identifier] + type = ""semgrep_id"" + value = ""gosec.G106-1"" + + [[semgrep.ruleset]] + disable = true + [semgrep.ruleset.identifier] + type = ""cwe"" + value = ""322"" + +[sobelow] + [[sobelow.ruleset]] + disable = true + [sobelow.ruleset.identifier] + type = ""sobelow_rule_id"" + value = ""sql_injection"" + +[flawfinder] + [[flawfinder.ruleset]] + disable = true + [flawfinder.ruleset.identifier] + type = ""flawfinder_func_name"" + value = ""memcpy"" + + +Override predefined rules of SAST analyzers + + +With the following custom ruleset configuration, vulnerabilities found with +semgrep with a type CWE and a value 322 have their severity overridden to Critical. + +[semgrep] + [[semgrep.ruleset]] + [semgrep.ruleset.identifier] + type = ""cwe"" + value = ""322"" + [semgrep.ruleset.override] + severity = ""Critical"" + + +Synthesize a custom configuration using a raw passthrough for nodejs-scan + + +With the following custom ruleset configuration, the predefined behavior +of the nodejs-scan analyzer is replaced with a custom configuration. + +The syntax used for the value follows the njsscan config format. + +[nodejs-scan] + description = ""My custom ruleset for nodejs-scan"" + + [[nodejs-scan.passthrough]] + type = ""raw"" + value = ''' +--- +- nodejs-extensions: + - .js + + template-extensions: + - .new + - .hbs + - '' + + ignore-filenames: + - skip.js + + ignore-paths: + - __MACOSX + - skip_dir + - node_modules + + ignore-extensions: + - .hbs + + ignore-rules: + - regex_injection_dos + - pug_jade_template + - express_xss +''' + + +Synthesize a custom configuration using a file passthrough for semgrep + + +With the following custom ruleset configuration, the predefined ruleset +of the semgrep analyzer is replaced with a custom ruleset contained in +a file called my-semgrep-rules.yaml in the repository being scanned. + +# my-semgrep-rules.yml +--- +rules: +- id: my-custom-rule + pattern: print(""Hello World"") + message: | + Unauthorized use of Hello World. + severity: ERROR + languages: + - python + + +[semgrep] + description = ""My custom ruleset for Semgrep"" + + [[semgrep.passthrough]] + type = ""file"" + value = ""my-semgrep-rules.yml"" + + +Synthesize a custom configuration using a passthrough chain for semgrep + + +With the following custom ruleset configuration, the predefined ruleset +of the semgrep analyzer is replaced with a custom ruleset produced by +evaluating a chain of four passthroughs. Each passthrough produces a file +that’s written to the /sgrules directory within the container. A +timeout of 60 seconds is set in case any Git remotes are unresponsive. + +Different passthrough types are demonstrated in this example: + + + Two git passthroughs, the first pulling develop branch from the +myrules Git repository, and the second pulling revision 97f7686 +from the sast-rules repository, and considering only files in the +go subdirectory. + + The sast-rules entry has a higher precedence because it appears later in +the configuration. + If there’s a filename collision between the two checkouts, files +from the sast-rules repository overwrite files from the +myrules repository. + + + A raw passthrough, which writes its value to /sgrules/insecure.yml. + A url passthrough, which fetches a configuration hosted at a URL and +writes it to /sgrules/gosec.yml. + + +Afterwards, Semgrep is invoked with the final configuration located under +/sgrules. + +[semgrep] + description = ""My custom ruleset for Semgrep"" + targetdir = ""/sgrules"" + timeout = 60 + + [[semgrep.passthrough]] + type = ""git"" + value = ""https://gitlab.com/user/myrules.git"" + ref = ""develop"" + + [[semgrep.passthrough]] + type = ""git"" + value = ""https://gitlab.com/gitlab-org/secure/gsoc-sast-vulnerability-rules/playground/sast-rules.git"" + ref = ""97f7686db058e2141c0806a477c1e04835c4f395"" + subdir = ""go"" + + [[semgrep.passthrough]] + type = ""raw"" + target = ""insecure.yml"" + value = """""" +rules: +- id: ""insecure"" + patterns: + - pattern: ""func insecure() {...}"" + message: | + Insecure function insecure detected + metadata: + cwe: ""CWE-200: Exposure of Sensitive Information to an Unauthorized Actor"" + severity: ""ERROR"" + languages: + - ""go"" +"""""" + + [[semgrep.passthrough]] + type = ""url"" + value = ""https://semgrep.dev/c/p/gosec"" + target = ""gosec.yml"" + + +Configure the mode for passthroughs in a chain + + +You can choose how to handle filename conflicts that occur between +passthroughs in a chain. The default behavior is to overwrite +existing files with the same name, but you can choose mode = append +instead to append the content of later files onto earlier ones. + +You can use the append mode for the file, url, and raw +passthrough types only. + +With the following custom ruleset configuration, two raw passthroughs +are used to iteratively assemble the /sgrules/my-rules.yml file, which +is then provided to Semgrep as the ruleset. Each passthrough appends a +single rule to the ruleset. The first passthrough is responsible for +initialising the top-level rules object, according to the +Semgrep rule syntax. + +[semgrep] + description = ""My custom ruleset for Semgrep"" + targetdir = ""/sgrules"" + validate = true + + [[semgrep.passthrough]] + type = ""raw"" + target = ""my-rules.yml"" + value = """""" +rules: +- id: ""insecure"" + patterns: + - pattern: ""func insecure() {...}"" + message: | + Insecure function 'insecure' detected + metadata: + cwe: ""..."" + severity: ""ERROR"" + languages: + - ""go"" +"""""" + + [[semgrep.passthrough]] + type = ""raw"" + mode = ""append"" + target = ""my-rules.yml"" + value = """""" +- id: ""secret"" + patterns: + - pattern-either: + - pattern: '$MASK = ""...""' + - metavariable-regex: + metavariable: ""$MASK"" + regex: ""(password|pass|passwd|pwd|secret|token)"" + message: | + Use of hard-coded password + metadata: + cwe: ""..."" + severity: ""ERROR"" + languages: + - ""go"" +"""""" + + +# /sgrules/my-rules.yml +rules: +- id: ""insecure"" + patterns: + - pattern: ""func insecure() {...}"" + message: | + Insecure function 'insecure' detected + metadata: + cwe: ""..."" + severity: ""ERROR"" + languages: + - ""go"" +- id: ""secret"" + patterns: + - pattern-either: + - pattern: '$MASK = ""...""' + - metavariable-regex: + metavariable: ""$MASK"" + regex: ""(password|pass|passwd|pwd|secret|token)"" + message: | + Use of hard-coded password + metadata: + cwe: ""..."" + severity: ""ERROR"" + languages: + - ""go"" + + +Specify a private remote configuration + + +The following example enables SAST and uses a shared ruleset customization file. The file is: + + + Downloaded from a private project that requires authentication, by using a Group Access Token securely stored within a CI variable. + Checked out at a specific Git commit SHA instead of the default branch. + + +See group access tokens for how to find the username associated with a group token. + +include: + - template: Jobs/SAST.gitlab-ci.yml + +variables: + SAST_RULESET_GIT_REFERENCE: ""group_2504721_bot_7c9311ffb83f2850e794d478ccee36f5:$PERSONAL_ACCESS_TOKEN@gitlab.com/example-group/example-ruleset-project@c8ea7e3ff126987fb4819cc35f2310755511c2ab"" + + + +" +can i store ruby gems in the package registry? what are the limitations?,,"1. Ruby gems in the package registry + + + +Ruby gems in the package registry + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated +Status: Experiment + + +History + + + + + +Introduced in GitLab 13.10. + + + + + + + + caution The Ruby gems package registry for GitLab is under development and isn’t ready for production use due to +limited functionality. This epic details the remaining +work and timelines to make it production ready. + + +You can publish Ruby gems in your project’s package registry, then install the packages when you +need to use them as a dependency. Although you can push gems to the registry, you cannot install +them from the registry. However, you can download gem files directly from the package registry’s +UI, or by using the API. + +For documentation of the specific API endpoints that the Ruby gems and Bundler package manager +clients use, see the Ruby gems API documentation. + +Enable the Ruby gems registry + + +The Ruby gems registry for GitLab is behind a feature flag that is disabled by default. GitLab +administrators with access to the GitLab Rails console can enable this registry for your instance. + +To enable it: + +Feature.enable(:rubygem_packages) + + +To disable it: + +Feature.disable(:rubygem_packages) + + +To enable or disable it for specific projects: + +Feature.enable(:rubygem_packages, Project.find(1)) +Feature.disable(:rubygem_packages, Project.find(2)) + + +Create a Ruby gem + + +If you need help creating a Ruby gem, see the RubyGems documentation. + +Authenticate to the package registry + + +Before you can push to the package registry, you must authenticate. + +To do this, you can use: + + + A personal access token +with the scope set to api. + A deploy token with the scope set to +read_package_registry, write_package_registry, or both. + A CI job token. + + +Authenticate with a personal access token or deploy token + + +To authenticate with a personal access token, create or edit the ~/.gem/credentials file and add: + +--- +https://gitlab.example.com/api/v4/projects//packages/rubygems: '' + + + + + must be the token value of either your personal access token or deploy token. + Your project ID is displayed on the project overview page. + + +Authenticate with a CI job token + + +To work with RubyGems commands within GitLab CI/CD, +you can use the CI_JOB_TOKEN predefined environment variable instead of a personal access token or deploy token. + +For example: + +# assuming a my_gem.gemspec file is present in the repository with the version currently set to 0.0.1 +image: ruby + +run: + before_script: + - mkdir ~/.gem + - echo ""---"" > ~/.gem/credentials + - | + echo ""${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/rubygems: '${CI_JOB_TOKEN}'"" >> ~/.gem/credentials + - chmod 0600 ~/.gem/credentials # rubygems requires 0600 permissions on the credentials file + script: + - gem build my_gem + - gem push my_gem-0.0.1.gem --host ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/rubygems + + +You can also use CI_JOB_TOKEN in a ~/.gem/credentials file that you check in to +GitLab: + +--- +https://gitlab.example.com/api/v4/projects/${env.CI_PROJECT_ID}/packages/rubygems: '${env.CI_JOB_TOKEN}' + + +Push a Ruby gem + + +Prerequisites: + + + You must authenticate to the package registry. + The maximum allowed gem size is 3 GB. + + +To push your gem, run a command like this one: + +gem push my_gem-0.0.1.gem --host + + + is the URL you used when setting up authentication. For example: + +gem push my_gem-0.0.1.gem --host https://gitlab.example.com/api/v4/projects/1/packages/rubygems + + +This message indicates that the gem uploaded successfully: + +Pushing gem to https://gitlab.example.com/api/v4/projects/1/packages/rubygems... +{""message"":""201 Created""} + + +To view the published gem, go to your project’s Packages and registries page. Gems pushed to +GitLab aren’t displayed in your project’s Packages UI immediately. It can take up to 10 minutes to +process a gem. + +Pushing gems with the same name or version + + +You can push a gem if a package of the same name and version already exists. +Both are visible and accessible in the UI. However, only the most recently +pushed gem is used for installs. + +Install a Ruby gem + + +The Ruby gems registry for GitLab is under development, and isn’t ready for production use. You +cannot install Gems from the registry. However, you can download .gem files directly from the UI +or by using the API. + + +2. Caching in GitLab CI/CD + + + +Caching in GitLab CI/CD + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +A cache is one or more files a job downloads and saves. Subsequent jobs that use +the same cache don’t have to download the files again, so they execute more quickly. + +To learn how to define the cache in your .gitlab-ci.yml file, +see the cache reference. + +How cache is different from artifacts + + +Use cache for dependencies, like packages you download from the internet. +Cache is stored where GitLab Runner is installed and uploaded to S3 if +distributed cache is enabled. + +Use artifacts to pass intermediate build results between stages. +Artifacts are generated by a job, stored in GitLab, and can be downloaded. + +Both artifacts and caches define their paths relative to the project directory, and +can’t link to files outside it. + +Cache + + + + Define cache per job by using the cache keyword. Otherwise it is disabled. + Subsequent pipelines can use the cache. + Subsequent jobs in the same pipeline can use the cache, if the dependencies are identical. + Different projects cannot share the cache. + By default, protected and non-protected branches do not share the cache. However, you can change this behavior. + + +Artifacts + + + + Define artifacts per job. + Subsequent jobs in later stages of the same pipeline can use artifacts. + Different projects cannot share artifacts. + Artifacts expire after 30 days by default. You can define a custom expiration time. + The latest artifacts do not expire if keep latest artifacts is enabled. + Use dependencies to control which jobs fetch the artifacts. + + +Good caching practices + + +To ensure maximum availability of the cache, do one or more of the following: + + + +Tag your runners and use the tag on jobs +that share the cache. + +Use runners that are only available to a particular project. + +Use a key that fits your workflow. For example, +you can configure a different cache for each branch. + + +For runners to work with caches efficiently, you must do one of the following: + + + Use a single runner for all your jobs. + Use multiple runners that have +distributed caching, +where the cache is stored in S3 buckets. Instance runners on GitLab.com behave this way. These runners can be in autoscale mode, +but they don’t have to be. To manage cache objects, +apply lifecycle rules to delete the cache objects after a period of time. +Lifecycle rules are available on the object storage server. + Use multiple runners with the same architecture and have these runners +share a common network-mounted directory to store the cache. This directory should use NFS or something similar. +These runners must be in autoscale mode. + + +Use multiple caches + + + +History + + + + + +Introduced in GitLab 13.10. + +Feature flag removed, in GitLab 13.12. + + + + + + +You can have a maximum of four caches: + +test-job: + stage: build + cache: + - key: + files: + - Gemfile.lock + paths: + - vendor/ruby + - key: + files: + - yarn.lock + paths: + - .yarn-cache/ + script: + - bundle config set --local path 'vendor/ruby' + - bundle install + - yarn install --cache-folder .yarn-cache + - echo Run tests... + + +If multiple caches are combined with a fallback cache key, +the global fallback cache is fetched every time a cache is not found. + +Use a fallback cache key + + +Per-cache fallback keys + + + +History + + + + + +Introduced in GitLab 16.0 + + + + + + +Each cache entry supports up to five fallback keys with the fallback_keys keyword. +When a job does not find a cache key, the job attempts to retrieve a fallback cache instead. +Fallback keys are searched in order until a cache is found. If no cache is found, +the job runs without using a cache. For example: + +test-job: + stage: build + cache: + - key: cache-$CI_COMMIT_REF_SLUG + fallback_keys: + - cache-$CI_DEFAULT_BRANCH + - cache-default + paths: + - vendor/ruby + script: + - bundle config set --local path 'vendor/ruby' + - bundle install + - echo Run tests... + + +In this example: + + + The job looks for the cache-$CI_COMMIT_REF_SLUG cache. + If cache-$CI_COMMIT_REF_SLUG is not found, the job looks for cache-$CI_DEFAULT_BRANCH +as a fallback option. + If cache-$CI_DEFAULT_BRANCH is also not found, the job looks for cache-default +as a second fallback option. + If none are found, the job downloads all the Ruby dependencies without using a cache, +but creates a new cache for cache-$CI_COMMIT_REF_SLUG when the job completes. + + +Fallback keys follow the same processing logic as cache:key: + + + If you clear caches manually, per-cache fallback keys are appended +with an index like other cache keys. + If the Use separate caches for protected branches setting is enabled, +per-cache fallback keys are appended with -protected or -non_protected. + + +Global fallback key + + + +History + + + + + +Introduced in GitLab Runner 13.4. + + + + + + +You can use the $CI_COMMIT_REF_SLUG predefined variable +to specify your cache:key. For example, if your +$CI_COMMIT_REF_SLUG is test, you can set a job to download cache that’s tagged with test. + +If a cache with this tag is not found, you can use CACHE_FALLBACK_KEY to +specify a cache to use when none exists. + +In the following example, if the $CI_COMMIT_REF_SLUG is not found, the job uses the key defined +by the CACHE_FALLBACK_KEY variable: + +variables: + CACHE_FALLBACK_KEY: fallback-key + +job1: + script: + - echo + cache: + key: ""$CI_COMMIT_REF_SLUG"" + paths: + - binaries/ + + +The order of caches extraction is: + + + Retrieval attempt for cache:key + + Retrieval attempts for each entry in order in fallback_keys + + Retrieval attempt for the global fallback key in CACHE_FALLBACK_KEY + + + +The cache extraction process stops after the first successful cache is retrieved. + +Disable cache for specific jobs + + +If you define the cache globally, each job uses the +same definition. You can override this behavior for each job. + +To disable it completely for a job, use an empty list: + +job: + cache: [] + + +Inherit global configuration, but override specific settings per job + + +You can override cache settings without overwriting the global cache by using +anchors. For example, if you want to override the +policy for one job: + +default: + cache: &global_cache + key: $CI_COMMIT_REF_SLUG + paths: + - node_modules/ + - public/ + - vendor/ + policy: pull-push + +job: + cache: + # inherit all global cache settings + <<: *global_cache + # override the policy + policy: pull + + +For more information, see cache: policy. + +Common use cases for caches + + +Usually you use caches to avoid downloading content, like dependencies +or libraries, each time you run a job. Node.js packages, +PHP packages, Ruby gems, Python libraries, and others can be cached. + +For examples, see the GitLab CI/CD templates. + +Share caches between jobs in the same branch + + +To have jobs in each branch use the same cache, define a cache with the key: $CI_COMMIT_REF_SLUG: + +cache: + key: $CI_COMMIT_REF_SLUG + + +This configuration prevents you from accidentally overwriting the cache. However, the +first pipeline for a merge request is slow. The next time a commit is pushed to the branch, the +cache is re-used and jobs run faster. + +To enable per-job and per-branch caching: + +cache: + key: ""$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"" + + +To enable per-stage and per-branch caching: + +cache: + key: ""$CI_JOB_STAGE-$CI_COMMIT_REF_SLUG"" + + +Share caches across jobs in different branches + + +To share a cache across all branches and all jobs, use the same key for everything: + +cache: + key: one-key-to-rule-them-all + + +To share a cache between branches, but have a unique cache for each job: + +cache: + key: $CI_JOB_NAME + + +Use a variable to control a job’s cache policy + + + +History + + + + + +Introduced in GitLab 16.1. + + + + + + +To reduce duplication of jobs where the only difference is the pull policy, you can use a CI/CD variable. + +For example: + +conditional-policy: + rules: + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + variables: + POLICY: pull-push + - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH + variables: + POLICY: pull + stage: build + cache: + key: gems + policy: $POLICY + paths: + - vendor/bundle + script: + - echo ""This job pulls and pushes the cache depending on the branch"" + - echo ""Downloading dependencies..."" + + +In this example, the job’s cache policy is: + + + +pull-push for changes to the default branch. + +pull for changes to other branches. + + +Cache Node.js dependencies + + +If your project uses npm to install Node.js +dependencies, the following example defines a default cache so that all jobs inherit it. +By default, npm stores cache data in the home folder (~/.npm). However, you +can’t cache things outside of the project directory. +Instead, tell npm to use ./.npm, and cache it per-branch: + +default: + image: node:latest + cache: # Cache modules in between jobs + key: $CI_COMMIT_REF_SLUG + paths: + - .npm/ + before_script: + - npm ci --cache .npm --prefer-offline + +test_async: + script: + - node ./specs/start.js ./specs/async.spec.js + + +Compute the cache key from the lock file + + +You can use cache:key:files to compute the cache +key from a lock file like package-lock.json or yarn.lock, and reuse it in many jobs. + +default: + cache: # Cache modules using lock file + key: + files: + - package-lock.json + paths: + - .npm/ + + +If you’re using Yarn, you can use yarn-offline-mirror +to cache the zipped node_modules tarballs. The cache generates more quickly, because +fewer files have to be compressed: + +job: + script: + - echo 'yarn-offline-mirror "".yarn-cache/""' >> .yarnrc + - echo 'yarn-offline-mirror-pruning true' >> .yarnrc + - yarn install --frozen-lockfile --no-progress + cache: + key: + files: + - yarn.lock + paths: + - .yarn-cache/ + + +Cache PHP dependencies + + +If your project uses Composer to install +PHP dependencies, the following example defines a default cache so that +all jobs inherit it. PHP libraries modules are installed in vendor/ and +are cached per-branch: + +default: + image: php:7.2 + cache: # Cache libraries in between jobs + key: $CI_COMMIT_REF_SLUG + paths: + - vendor/ + before_script: + # Install and run Composer + - curl --show-error --silent ""https://getcomposer.org/installer"" | php + - php composer.phar install + +test: + script: + - vendor/bin/phpunit --configuration phpunit.xml --coverage-text --colors=never + + +Cache Python dependencies + + +If your project uses pip to install +Python dependencies, the following example defines a default cache so that +all jobs inherit it. pip’s cache is defined under .cache/pip/ and is cached per-branch: + +default: + image: python:latest + cache: # Pip's cache doesn't store the python packages + paths: # https://pip.pypa.io/en/stable/topics/caching/ + - .cache/pip + before_script: + - python -V # Print out python version for debugging + - pip install virtualenv + - virtualenv venv + - source venv/bin/activate + +variables: # Change pip's cache directory to be inside the project directory since we can only cache local items. + PIP_CACHE_DIR: ""$CI_PROJECT_DIR/.cache/pip"" + +test: + script: + - python setup.py test + - pip install ruff + - ruff --format=gitlab . + + +Cache Ruby dependencies + + +If your project uses Bundler to install +gem dependencies, the following example defines a default cache so that all +jobs inherit it. Gems are installed in vendor/ruby/ and are cached per-branch: + +default: + image: ruby:2.6 + cache: # Cache gems in between builds + key: $CI_COMMIT_REF_SLUG + paths: + - vendor/ruby + before_script: + - ruby -v # Print out ruby version for debugging + - bundle config set --local path 'vendor/ruby' # The location to install the specified gems to + - bundle install -j $(nproc) # Install dependencies into ./vendor/ruby + +rspec: + script: + - rspec spec + + +If you have jobs that need different gems, use the prefix +keyword in the global cache definition. This configuration generates a different +cache for each job. + +For example, a testing job might not need the same gems as a job that deploys to +production: + +default: + cache: + key: + files: + - Gemfile.lock + prefix: $CI_JOB_NAME + paths: + - vendor/ruby + +test_job: + stage: test + before_script: + - bundle config set --local path 'vendor/ruby' + - bundle install --without production + script: + - bundle exec rspec + +deploy_job: + stage: production + before_script: + - bundle config set --local path 'vendor/ruby' # The location to install the specified gems to + - bundle install --without test + script: + - bundle exec deploy + + +Cache Go dependencies + + +If your project uses Go Modules to install +Go dependencies, the following example defines cache in a go-cache template, that +any job can extend. Go modules are installed in ${GOPATH}/pkg/mod/ and +are cached for all of the go projects: + +.go-cache: + variables: + GOPATH: $CI_PROJECT_DIR/.go + before_script: + - mkdir -p .go + cache: + paths: + - .go/pkg/mod/ + +test: + image: golang:1.13 + extends: .go-cache + script: + - go test ./... -v -short + + +Availability of the cache + + +Caching is an optimization, but it isn’t guaranteed to always work. You might need +to regenerate cached files in each job that needs them. + +After you define a cache in .gitlab-ci.yml, +the availability of the cache depends on: + + + The runner’s executor type. + Whether different runners are used to pass the cache between jobs. + + +Where the caches are stored + + +All caches defined for a job are archived in a single cache.zip file. +The runner configuration defines where the file is stored. By default, the cache +is stored on the machine where GitLab Runner is installed. The location also depends on the type of executor. + + + + + Runner executor + Default path of the cache + + + + + Shell + Locally, under the gitlab-runner user’s home directory: /home/gitlab-runner/cache////cache.zip. + + + Docker + Locally, under Docker volumes: /var/lib/docker/volumes//_data////cache.zip. + + + +Docker Machine (autoscale runners) + The same as the Docker executor. + + + + +If you use cache and artifacts to store the same path in your jobs, the cache might +be overwritten because caches are restored before artifacts. + +Cache key names + + + +History + + + + + +Introduced in GitLab 15.0. + + + + + + +A suffix is added to the cache key, with the exception of the global fallback cache key. + +As an example, assuming that cache.key is set to $CI_COMMIT_REF_SLUG, and that we have two branches main +and feature, then the following table represents the resulting cache keys: + + + + + Branch name + Cache key + + + + + main + main-protected + + + feature + feature-non_protected + + + + +Use the same cache for all branches + + + +History + + + + + +Introduced in GitLab 15.0. + + + + + + +If you do not want to use cache key names, +you can have all branches (protected and unprotected) use the same cache. + +The cache separation with cache key names is a security feature +and should only be disabled in an environment where all users with Developer role are highly trusted. + +To use the same cache for all branches: + + + On the left sidebar, select Search or go to and find your project. + Select Settings > CI/CD. + Expand General pipelines. + Clear the Use separate caches for protected branches checkbox. + Select Save changes. + + +How archiving and extracting works + + +This example shows two jobs in two consecutive stages: + +stages: + - build + - test + +default: + cache: + key: build-cache + paths: + - vendor/ + before_script: + - echo ""Hello"" + +job A: + stage: build + script: + - mkdir vendor/ + - echo ""build"" > vendor/hello.txt + after_script: + - echo ""World"" + +job B: + stage: test + script: + - cat vendor/hello.txt + + +If one machine has one runner installed, then all jobs for your project +run on the same host: + + + Pipeline starts. + +job A runs. + The cache is extracted (if found). + +before_script is executed. + +script is executed. + +after_script is executed. + +cache runs and the vendor/ directory is zipped into cache.zip. +This file is then saved in the directory based on the +runner’s setting and the cache: key. + +job B runs. + The cache is extracted (if found). + +before_script is executed. + +script is executed. + Pipeline finishes. + + +By using a single runner on a single machine, you don’t have the issue where +job B might execute on a runner different from job A. This setup guarantees the +cache can be reused between stages. It only works if the execution goes from the build stage +to the test stage in the same runner/machine. Otherwise, the cache might not be available. + +During the caching process, there’s also a couple of things to consider: + + + If some other job, with another cache configuration had saved its +cache in the same zip file, it is overwritten. If the S3 based shared cache is +used, the file is additionally uploaded to S3 to an object based on the cache +key. So, two jobs with different paths, but the same cache key, overwrites +their cache. + When extracting the cache from cache.zip, everything in the zip file is +extracted in the job’s working directory (usually the repository which is +pulled down), and the runner doesn’t mind if the archive of job A overwrites +things in the archive of job B. + + +It works this way because the cache created for one runner +often isn’t valid when used by a different one. A different runner may run on a +different architecture (for example, when the cache includes binary files). Also, +because the different steps might be executed by runners running on different +machines, it is a safe default. + +Clearing the cache + + +Runners use cache to speed up the execution +of your jobs by reusing existing data. This can sometimes lead to +inconsistent behavior. + +There are two ways to start with a fresh copy of the cache. + +Clear the cache by changing cache:key + + +Change the value for cache: key in your .gitlab-ci.yml file. +The next time the pipeline runs, the cache is stored in a different location. + +Clear the cache manually + + +You can clear the cache in the GitLab UI: + + + On the left sidebar, select Search or go to and find your project. + Select Build > Pipelines. + In the upper-right corner, select Clear runner caches. + + +On the next commit, your CI/CD jobs use a new cache. + + + note Each time you clear the cache manually, the internal cache name is updated. The name uses the format cache-, and the index increments by one. The old cache is not deleted. You can manually delete these files from the runner storage. + + +Troubleshooting + + +Cache mismatch + + +If you have a cache mismatch, follow these steps to troubleshoot. + + + + + Reason for a cache mismatch + How to fix it + + + + + You use multiple standalone runners (not in autoscale mode) attached to one project without a shared cache. + Use only one runner for your project or use multiple runners with distributed cache enabled. + + + You use runners in autoscale mode without a distributed cache enabled. + Configure the autoscale runner to use a distributed cache. + + + The machine the runner is installed on is low on disk space or, if you’ve set up distributed cache, the S3 bucket where the cache is stored doesn’t have enough space. + Make sure you clear some space to allow new caches to be stored. There’s no automatic way to do this. + + + You use the same key for jobs where they cache different paths. + Use different cache keys so that the cache archive is stored to a different location and doesn’t overwrite wrong caches. + + + You have not enabled the distributed runner caching on your runners. + Set Shared = false and re-provision your runners. + + + + +Cache mismatch example 1 + + +If you have only one runner assigned to your project, the cache +is stored on the runner’s machine by default. + +If two jobs have the same cache key but a different path, the caches can be overwritten. +For example: + +stages: + - build + - test + +job A: + stage: build + script: make build + cache: + key: same-key + paths: + - public/ + +job B: + stage: test + script: make test + cache: + key: same-key + paths: + - vendor/ + + + + +job A runs. + +public/ is cached as cache.zip. + +job B runs. + The previous cache, if any, is unzipped. + +vendor/ is cached as cache.zip and overwrites the previous one. + The next time job A runs it uses the cache of job B which is different +and thus isn’t effective. + + +To fix this issue, use different keys for each job. + +Cache mismatch example 2 + + +In this example, you have more than one runner assigned to your +project, and distributed cache is not enabled. + +The second time the pipeline runs, you want job A and job B to re-use their cache (which in this case +is different): + +stages: + - build + - test + +job A: + stage: build + script: build + cache: + key: keyA + paths: + - vendor/ + +job B: + stage: test + script: test + cache: + key: keyB + paths: + - vendor/ + + +Even if the key is different, the cached files might get “cleaned” before each +stage if the jobs run on different runners in subsequent pipelines. + +Concurrent runners missing local cache + + +If you have configured multiple concurrent runners with the Docker executor, locally cached files might +not be present for concurrently-running jobs as you expect. The names of cache volumes are constructed +uniquely for each runner instance, so files cached by one runner instance are not found in the cache by another runner +instance. + +To share the cache between concurrent runners, you can either: + + + Use the [runners.docker] section of the runners’ config.toml to configure a single mount point on the host that +is mapped to /cache in each container, preventing the runner from creating unique volume names. + Use a distributed cache. + + + +" +how does remote development feature work?,,"1. Remote development + + + +Remote development + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 15.4 with a flag named vscode_web_ide. Disabled by default. + +Enabled on GitLab.com in GitLab 15.7. + +Enabled on self-managed in GitLab 15.11. + + + + + + + + On self-managed GitLab, by default this feature is available. To hide the feature, an administrator can disable the feature flag named vscode_web_ide. On GitLab.com and GitLab Dedicated, this feature is available. The feature is not ready for production use. + + +You can use remote development to write and compile code hosted on GitLab. +With remote development, you can: + + + Create a secure development environment in the cloud. + Connect to that environment from your local machine through a web browser or client-based solution. + + +Web IDE as a frontend + + +You can use the Web IDE to make, commit, and push changes to a project directly from your web browser. +This way, you can update any project without having to install any dependencies or clone any repositories locally. + +The Web IDE, however, lacks a native runtime environment where you could compile code, run tests, or generate real-time feedback. +With remote development, you can use: + + + The Web IDE as a frontend + A separate machine as a backend runtime environment + + +For a complete IDE experience, connect the Web IDE to a development environment configured to run as a remote host. +You can create this environment inside or outside of GitLab. + +Workspaces + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +A workspace is a virtual sandbox environment for your code in GitLab that includes: + + + A runtime environment + Dependencies + Configuration files + + +You can create a workspace from scratch or from a template that you can also customize. + +When you configure and connect a workspace to the Web IDE, you can: + + + Edit files directly from the Web IDE and commit and push changes to GitLab. + Use the Web IDE to run tests, debug code, and view real-time feedback. + + +Manage a development environment + + +Create a development environment + + +To create a development environment, run this command: + +export CERTS_DIR=""/home/ubuntu/.certbot/config/live/${DOMAIN}"" +export PROJECTS_DIR=""/home/ubuntu"" + +docker run -d \ + --name my-environment \ + -p 3443:3443 \ + -v ""${CERTS_DIR}/fullchain.pem:/gitlab-rd-web-ide/certs/fullchain.pem"" \ + -v ""${CERTS_DIR}/privkey.pem:/gitlab-rd-web-ide/certs/privkey.pem"" \ + -v ""${PROJECTS_DIR}:/projects"" \ + registry.gitlab.com/gitlab-org/remote-development/gitlab-rd-web-ide-docker:0.2-alpha \ + --log-level warn --domain ""${DOMAIN}"" --ignore-version-mismatch + + +The new development environment starts automatically. + +Stop a development environment + + +To stop a running development environment, run this command: + +docker container stop my-environment + + +Start a development environment + + +To start a stopped development environment, run this command: + +docker container start my-environment + + +The token changes every time you start the development environment. + +Remove a development environment + + +To remove a development environment: + + + +Stop the development environment. + + Run this command: + + +docker container rm my-environment + + + + + +2. Workspaces + + + +Workspaces + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 15.11 with a flag named remote_development_feature_flag. Disabled by default. + +Enabled on GitLab.com and self-managed in GitLab 16.0. + +Generally available in GitLab 16.7. Feature flag remote_development_feature_flag removed. + + + + + + +A workspace is a virtual sandbox environment for your code in GitLab. +You can use workspaces to create and manage isolated development environments for your GitLab projects. +These environments ensure that different projects don’t interfere with each other. + +Each workspace includes its own set of dependencies, libraries, and tools, +which you can customize to meet the specific needs of each project. + +Workspaces and projects + + +Workspaces are scoped to a project. +When you create a workspace, you must: + + + Assign the workspace to a specific project. + Select a project with a .devfile.yaml file. + + +The workspace can interact with the GitLab API, with the access level defined by current user permissions. +A running workspace remains accessible even if user permissions are later revoked. + +Manage workspaces from a project + + + +History + + + + + +Introduced in GitLab 16.2. + + + + + + +To manage workspaces from a project: + + + On the left sidebar, select Search or go to and find your project. + In the upper right, select Edit. + From the dropdown list, under Your workspaces, you can: + + Restart, stop, or terminate an existing workspace. + Create a new workspace. + + + + + + caution When you terminate a workspace, any unsaved or uncommitted data +in that workspace is deleted and cannot be recovered. + + +Deleting data associated with a workspace + + +When you delete a project, agent, user, or token associated with a workspace: + + + The workspace is deleted from the user interface. + In the Kubernetes cluster, the running workspace resources become orphaned and are not automatically deleted. + + +To clean up orphaned resources, an administrator must manually delete the workspace in Kubernetes. + +Issue 414384 proposes to change this behavior. + +Manage workspaces at the agent level + + + +History + + + + + +Introduced in GitLab 16.8. + + + + + + +To manage all workspaces associated with an agent: + + + On the left sidebar, select Search or go to and find your project. + Select Operate > Kubernetes clusters. + Select the agent configured for remote development. + Select the Workspaces tab. + From the list, you can restart, stop, or terminate an existing workspace. + + + + caution When you terminate a workspace, any unsaved or uncommitted data +in that workspace is deleted and cannot be recovered. + + +Identify an agent from a running workspace + + +In deployments that contain multiple agents, you might want to identify an agent from a running workspace. + +To identify an agent associated with a running workspace, use one of the following GraphQL endpoints: + + + +agent-id to return the project the agent belongs to. + +Query.workspaces to return: + + The cluster agent associated with the workspace. + The project the agent belongs to. + + + + +Devfile + + +A devfile is a file that defines a development environment by specifying the necessary +tools, languages, runtimes, and other components for a GitLab project. + +Workspaces have built-in support for devfiles. +You can specify a devfile for your project in the GitLab configuration file. +The devfile is used to automatically configure the development environment with the defined specifications. + +This way, you can create consistent and reproducible development environments +regardless of the machine or platform you use. + +Validation rules + + + + +schemaVersion must be 2.2.0. + The devfile must have at least one component. + For components: + + Names must not start with gl-. + Only container and volume are supported. + + + For commands, IDs must not start with gl-. + For events: + + Names must not start with gl-. + Only preStart is supported. + + + +parent, projects, and starterProjects are not supported. + For variables, keys must not start with gl-, gl_, GL-, or GL_. + + + +container component type + + +Use the container component type to define a container image as the execution environment for a workspace. +You can specify the base image, dependencies, and other settings. + +The container component type supports the following schema properties only: + + + + + Property + Description + + + + + image + Name of the container image to use for the workspace. + + + memoryRequest + Minimum amount of memory the container can use. + + + memoryLimit + Maximum amount of memory the container can use. + + + cpuRequest + Minimum amount of CPU the container can use. + + + cpuLimit + Maximum amount of CPU the container can use. + + + env + Environment variables to use in the container. Names must not start with gl-. + + + endpoints + Port mappings to expose from the container. Names must not start with gl-. + + + volumeMounts + Storage volume to mount in the container. + + + + +Example configurations + + +The following is an example devfile configuration: + +schemaVersion: 2.2.0 +variables: + registry-root: registry.gitlab.com +components: + - name: tooling-container + attributes: + gl/inject-editor: true + container: + image: ""{{registry-root}}/gitlab-org/remote-development/gitlab-remote-development-docs/ubuntu:22.04"" + env: + - name: KEY + value: VALUE + endpoints: + - name: http-3000 + targetPort: 3000 + + +For more information, see the devfile documentation. +For other examples, see the examples projects. + +This container image is for demonstration purposes only. +To use your own container image, see Arbitrary user IDs. + +GitLab VS Code fork + + +By default, workspaces inject and start the GitLab VS Code fork +in the container that has a defined gl/inject-editor attribute in the devfile. +The workspace container where the GitLab VS Code fork is injected +must meet the following system requirements: + + + +System architecture: AMD64 + +System libraries: + + +glibc 2.28 and later + +glibcxx 3.4.25 and later + + + + +These requirements have been tested on Debian 10.13 and Ubuntu 20.04. +For more information, see the VS Code documentation. + +Personal access token + + + +History + + + + + +Introduced in GitLab 16.4. + + + + + + +When you create a workspace, you get a personal access token with write_repository permission. +This token is used to initially clone the project while starting the workspace. + +Any Git operation you perform in the workspace uses this token for authentication and authorization. +When you terminate the workspace, the token is revoked. + +Pod interaction in a cluster + + +Workspaces run as pods in a Kubernetes cluster. +GitLab does not impose any restrictions on the manner in which pods interact with each other. + +Because of this requirement, you might want to isolate this feature from other containers in your cluster. + +Network access and workspace authorization + + +It’s the client’s responsibility to restrict network access to the Kubernetes control plane +because GitLab does not have control over the API. + +Only the workspace creator can access the workspace and any endpoints exposed in that workspace. +The workspace creator is only authorized to access the workspace after user authentication with OAuth. + +Compute resources and volume storage + + +When you stop a workspace, the compute resources for that workspace are scaled down to zero. +However, the volume provisioned for the workspace still exists. + +To delete the provisioned volume, you must terminate the workspace. + +Arbitrary user IDs + + +You can provide your own container image, which can run as any Linux user ID. + +It’s not possible for GitLab to predict the Linux user ID for a container image. +GitLab uses the Linux root group ID permission to create, update, or delete files in a container. +The container runtime used by the Kubernetes cluster must ensure all containers have a default Linux group ID of 0. + +If you have a container image that does not support arbitrary user IDs, +you cannot create, update, or delete files in a workspace. +To create a container image that supports arbitrary user IDs, +see Create a custom workspace image that supports arbitrary user IDs. + +For more information, see the +OpenShift documentation. + +Related topics + + + + GitLab workspaces demo + + + +" +how can i convert a vscode devcontainer into a gitlab workspace?,,"1. Workspaces + + + +Workspaces + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 15.11 with a flag named remote_development_feature_flag. Disabled by default. + +Enabled on GitLab.com and self-managed in GitLab 16.0. + +Generally available in GitLab 16.7. Feature flag remote_development_feature_flag removed. + + + + + + +A workspace is a virtual sandbox environment for your code in GitLab. +You can use workspaces to create and manage isolated development environments for your GitLab projects. +These environments ensure that different projects don’t interfere with each other. + +Each workspace includes its own set of dependencies, libraries, and tools, +which you can customize to meet the specific needs of each project. + +Workspaces and projects + + +Workspaces are scoped to a project. +When you create a workspace, you must: + + + Assign the workspace to a specific project. + Select a project with a .devfile.yaml file. + + +The workspace can interact with the GitLab API, with the access level defined by current user permissions. +A running workspace remains accessible even if user permissions are later revoked. + +Manage workspaces from a project + + + +History + + + + + +Introduced in GitLab 16.2. + + + + + + +To manage workspaces from a project: + + + On the left sidebar, select Search or go to and find your project. + In the upper right, select Edit. + From the dropdown list, under Your workspaces, you can: + + Restart, stop, or terminate an existing workspace. + Create a new workspace. + + + + + + caution When you terminate a workspace, any unsaved or uncommitted data +in that workspace is deleted and cannot be recovered. + + +Deleting data associated with a workspace + + +When you delete a project, agent, user, or token associated with a workspace: + + + The workspace is deleted from the user interface. + In the Kubernetes cluster, the running workspace resources become orphaned and are not automatically deleted. + + +To clean up orphaned resources, an administrator must manually delete the workspace in Kubernetes. + +Issue 414384 proposes to change this behavior. + +Manage workspaces at the agent level + + + +History + + + + + +Introduced in GitLab 16.8. + + + + + + +To manage all workspaces associated with an agent: + + + On the left sidebar, select Search or go to and find your project. + Select Operate > Kubernetes clusters. + Select the agent configured for remote development. + Select the Workspaces tab. + From the list, you can restart, stop, or terminate an existing workspace. + + + + caution When you terminate a workspace, any unsaved or uncommitted data +in that workspace is deleted and cannot be recovered. + + +Identify an agent from a running workspace + + +In deployments that contain multiple agents, you might want to identify an agent from a running workspace. + +To identify an agent associated with a running workspace, use one of the following GraphQL endpoints: + + + +agent-id to return the project the agent belongs to. + +Query.workspaces to return: + + The cluster agent associated with the workspace. + The project the agent belongs to. + + + + +Devfile + + +A devfile is a file that defines a development environment by specifying the necessary +tools, languages, runtimes, and other components for a GitLab project. + +Workspaces have built-in support for devfiles. +You can specify a devfile for your project in the GitLab configuration file. +The devfile is used to automatically configure the development environment with the defined specifications. + +This way, you can create consistent and reproducible development environments +regardless of the machine or platform you use. + +Validation rules + + + + +schemaVersion must be 2.2.0. + The devfile must have at least one component. + For components: + + Names must not start with gl-. + Only container and volume are supported. + + + For commands, IDs must not start with gl-. + For events: + + Names must not start with gl-. + Only preStart is supported. + + + +parent, projects, and starterProjects are not supported. + For variables, keys must not start with gl-, gl_, GL-, or GL_. + + + +container component type + + +Use the container component type to define a container image as the execution environment for a workspace. +You can specify the base image, dependencies, and other settings. + +The container component type supports the following schema properties only: + + + + + Property + Description + + + + + image + Name of the container image to use for the workspace. + + + memoryRequest + Minimum amount of memory the container can use. + + + memoryLimit + Maximum amount of memory the container can use. + + + cpuRequest + Minimum amount of CPU the container can use. + + + cpuLimit + Maximum amount of CPU the container can use. + + + env + Environment variables to use in the container. Names must not start with gl-. + + + endpoints + Port mappings to expose from the container. Names must not start with gl-. + + + volumeMounts + Storage volume to mount in the container. + + + + +Example configurations + + +The following is an example devfile configuration: + +schemaVersion: 2.2.0 +variables: + registry-root: registry.gitlab.com +components: + - name: tooling-container + attributes: + gl/inject-editor: true + container: + image: ""{{registry-root}}/gitlab-org/remote-development/gitlab-remote-development-docs/ubuntu:22.04"" + env: + - name: KEY + value: VALUE + endpoints: + - name: http-3000 + targetPort: 3000 + + +For more information, see the devfile documentation. +For other examples, see the examples projects. + +This container image is for demonstration purposes only. +To use your own container image, see Arbitrary user IDs. + +GitLab VS Code fork + + +By default, workspaces inject and start the GitLab VS Code fork +in the container that has a defined gl/inject-editor attribute in the devfile. +The workspace container where the GitLab VS Code fork is injected +must meet the following system requirements: + + + +System architecture: AMD64 + +System libraries: + + +glibc 2.28 and later + +glibcxx 3.4.25 and later + + + + +These requirements have been tested on Debian 10.13 and Ubuntu 20.04. +For more information, see the VS Code documentation. + +Personal access token + + + +History + + + + + +Introduced in GitLab 16.4. + + + + + + +When you create a workspace, you get a personal access token with write_repository permission. +This token is used to initially clone the project while starting the workspace. + +Any Git operation you perform in the workspace uses this token for authentication and authorization. +When you terminate the workspace, the token is revoked. + +Pod interaction in a cluster + + +Workspaces run as pods in a Kubernetes cluster. +GitLab does not impose any restrictions on the manner in which pods interact with each other. + +Because of this requirement, you might want to isolate this feature from other containers in your cluster. + +Network access and workspace authorization + + +It’s the client’s responsibility to restrict network access to the Kubernetes control plane +because GitLab does not have control over the API. + +Only the workspace creator can access the workspace and any endpoints exposed in that workspace. +The workspace creator is only authorized to access the workspace after user authentication with OAuth. + +Compute resources and volume storage + + +When you stop a workspace, the compute resources for that workspace are scaled down to zero. +However, the volume provisioned for the workspace still exists. + +To delete the provisioned volume, you must terminate the workspace. + +Arbitrary user IDs + + +You can provide your own container image, which can run as any Linux user ID. + +It’s not possible for GitLab to predict the Linux user ID for a container image. +GitLab uses the Linux root group ID permission to create, update, or delete files in a container. +The container runtime used by the Kubernetes cluster must ensure all containers have a default Linux group ID of 0. + +If you have a container image that does not support arbitrary user IDs, +you cannot create, update, or delete files in a workspace. +To create a container image that supports arbitrary user IDs, +see Create a custom workspace image that supports arbitrary user IDs. + +For more information, see the +OpenShift documentation. + +Related topics + + + + GitLab workspaces demo + + + +2. Remote development + + + +Remote development + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 15.4 with a flag named vscode_web_ide. Disabled by default. + +Enabled on GitLab.com in GitLab 15.7. + +Enabled on self-managed in GitLab 15.11. + + + + + + + + On self-managed GitLab, by default this feature is available. To hide the feature, an administrator can disable the feature flag named vscode_web_ide. On GitLab.com and GitLab Dedicated, this feature is available. The feature is not ready for production use. + + +You can use remote development to write and compile code hosted on GitLab. +With remote development, you can: + + + Create a secure development environment in the cloud. + Connect to that environment from your local machine through a web browser or client-based solution. + + +Web IDE as a frontend + + +You can use the Web IDE to make, commit, and push changes to a project directly from your web browser. +This way, you can update any project without having to install any dependencies or clone any repositories locally. + +The Web IDE, however, lacks a native runtime environment where you could compile code, run tests, or generate real-time feedback. +With remote development, you can use: + + + The Web IDE as a frontend + A separate machine as a backend runtime environment + + +For a complete IDE experience, connect the Web IDE to a development environment configured to run as a remote host. +You can create this environment inside or outside of GitLab. + +Workspaces + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +A workspace is a virtual sandbox environment for your code in GitLab that includes: + + + A runtime environment + Dependencies + Configuration files + + +You can create a workspace from scratch or from a template that you can also customize. + +When you configure and connect a workspace to the Web IDE, you can: + + + Edit files directly from the Web IDE and commit and push changes to GitLab. + Use the Web IDE to run tests, debug code, and view real-time feedback. + + +Manage a development environment + + +Create a development environment + + +To create a development environment, run this command: + +export CERTS_DIR=""/home/ubuntu/.certbot/config/live/${DOMAIN}"" +export PROJECTS_DIR=""/home/ubuntu"" + +docker run -d \ + --name my-environment \ + -p 3443:3443 \ + -v ""${CERTS_DIR}/fullchain.pem:/gitlab-rd-web-ide/certs/fullchain.pem"" \ + -v ""${CERTS_DIR}/privkey.pem:/gitlab-rd-web-ide/certs/privkey.pem"" \ + -v ""${PROJECTS_DIR}:/projects"" \ + registry.gitlab.com/gitlab-org/remote-development/gitlab-rd-web-ide-docker:0.2-alpha \ + --log-level warn --domain ""${DOMAIN}"" --ignore-version-mismatch + + +The new development environment starts automatically. + +Stop a development environment + + +To stop a running development environment, run this command: + +docker container stop my-environment + + +Start a development environment + + +To start a stopped development environment, run this command: + +docker container start my-environment + + +The token changes every time you start the development environment. + +Remove a development environment + + +To remove a development environment: + + + +Stop the development environment. + + Run this command: + + +docker container rm my-environment + + + + + +" +where can i find documentation for azure saml sso,,"1. SAML SSO for GitLab.com groups + + + +SAML SSO for GitLab.com groups + + + +Tier: Premium, Ultimate +Offering: GitLab.com + + +History + + + + + Introduced in GitLab 11.0. + + + + + + +Users can sign in to GitLab through their SAML identity provider. + +SCIM synchronizes users with the group on GitLab.com. + + + When you add or remove a user from the SCIM app, SCIM adds or removes the user +from the GitLab group. + If the user is not already a group member, the user is added to the group as part of the sign-in process. + + +You can configure SAML SSO for the top-level group only. + +Set up your identity provider + + +The SAML standard means that you can use a wide range of identity providers with GitLab. Your identity provider might have relevant documentation. It can be generic SAML documentation or specifically targeted for GitLab. + +When setting up your identity provider, use the following provider-specific documentation +to help avoid common issues and as a guide for terminology used. + +For identity providers not listed, you can refer to the instance SAML notes on configuring an identity provider +for additional guidance on information your provider may require. + +GitLab provides the following information for guidance only. +If you have any questions on configuring the SAML app, contact your provider’s support. + +If you are having issues setting up your identity provider, see the +troubleshooting documentation. + +Azure + + +To set up SSO with Azure as your identity provider: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > SAML SSO. + Note the information on this page. + + Go to Azure and follow the instructions for configuring SSO for an application. The following GitLab settings correspond to the Azure fields. + + + + + GitLab setting + Azure field + + + + + Identifier + Identifier (Entity ID) + + + Assertion consumer service URL + Reply URL (Assertion Consumer Service URL) + + + GitLab single sign-on URL + Sign on URL + + + Identity provider single sign-on URL + Login URL + + + Certificate fingerprint + Thumbprint + + + + + You should set the following attributes: + + +Unique User Identifier (Name identifier) to user.objectID. + +nameid-format to persistent. For more information, see how to manage user SAML identity. + +email to user.mail or similar. + +Additional claims to supported attributes. + + + + Make sure the identity provider is set to have provider-initiated calls +to link existing GitLab accounts. + + Optional. If you use Group Sync, customize the name of the +group claim to match the required attribute. + + + +View a demo of SCIM provisioning on Azure using SAML SSO for groups. The objectID mapping is outdated in this video. Follow the SCIM documentation instead. + +For more information, see an example configuration page. + +Google Workspace + + +To set up Google Workspace as your identity provider: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > SAML SSO. + Note the information on this page. + + Follow the instructions for setting up SSO with Google as your identity provider. The following GitLab settings correspond to the Google Workspace fields. + + + + + GitLab setting + Google Workspace field + + + + + Identifier + Entity ID + + + Assertion consumer service URL + ACS URL + + + GitLab single sign-on URL + Start URL + + + Identity provider single sign-on URL + SSO URL + + + + + Google Workspace displays a SHA256 fingerprint. To retrieve the SHA1 fingerprint +required by GitLab to configure SAML: + + Download the certificate. + + Run this command: + + +openssl x509 -noout -fingerprint -sha1 -inform pem -in ""GoogleIDPCertificate-domain.com.pem"" + + + + + Set these values: + + For Primary email: email. + For First name: first_name. + For Last name: last_name. + For Name ID format: EMAIL. + For NameID: Basic Information > Primary email. +For more information, see supported attributes. + + + Make sure the identity provider is set to have provider-initiated calls +to link existing GitLab accounts. + + +On the GitLab SAML SSO page, when you select Verify SAML Configuration, disregard +the warning that recommends setting the NameID format to persistent. + +For more information, see an example configuration page. + + +View a demo of how to configure SAML with Google Workspaces and set up Group Sync. + +Okta + + +To set up SSO with Okta as your identity provider: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > SAML SSO. + Note the information on this page. + + Follow the instructions for setting up a SAML application in Okta. + + The following GitLab settings correspond to the Okta fields. + + + + + GitLab setting + Okta field + + + + + Identifier + Audience URI + + + Assertion consumer service URL + Single sign-on URL + + + GitLab single sign-on URL + +Login page URL (under Application Login Page settings) + + + Identity provider single sign-on URL + Identity Provider Single Sign-On URL + + + + + + Under the Okta Single sign-on URL field, select the Use this for Recipient URL and Destination URL checkbox. + + Set these values: + + For Application username (NameID): Custom user.getInternalProperty(""id""). + For Name ID Format: Persistent. For more information, see manage user SAML identity. + For email: user.email or similar. + For additional Attribute Statements, see supported attributes. + + + Make sure the identity provider is set to have provider-initiated calls +to link existing GitLab accounts. + + +The Okta GitLab application available in the App Catalog only supports SCIM. Support +for SAML is proposed in issue 216173. + + +For a demo of the Okta SAML setup including SCIM, see Demo: Okta Group SAML & SCIM setup. + +For more information, see an example configuration page + +OneLogin + + +OneLogin supports its own GitLab (SaaS) application. + +To set up OneLogin as your identity provider: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > SAML SSO. + Note the information on this page. + + If you use the OneLogin generic +SAML Test Connector (Advanced), +you should use the OneLogin SAML Test Connector. The following GitLab settings correspond +to the OneLogin fields: + + + + + GitLab setting + OneLogin field + + + + + Identifier + Audience + + + Assertion consumer service URL + Recipient + + + Assertion consumer service URL + ACS (Consumer) URL + + + Assertion consumer service URL (escaped version) + ACS (Consumer) URL Validator + + + GitLab single sign-on URL + Login URL + + + Identity provider single sign-on URL + SAML 2.0 Endpoint + + + + + For NameID, use OneLogin ID. For more information, see manage user SAML identity. + Configure required and supported attributes. + Make sure the identity provider is set to have provider-initiated calls +to link existing GitLab accounts. + + +Configure assertions + + + + note The attributes are case-sensitive. + + +At minimum, you must configure the following assertions: + + + +NameID. + Email. + + +Optionally, you can pass user information to GitLab as attributes in the SAML assertion. + + + The user’s email address can be an email or mail attribute. + The username can be either a username or nickname attribute. You should specify only +one of these. + + +For more information, see the attributes available for self-managed GitLab instances. + +Use metadata + + +To configure some identity providers, you need a GitLab metadata URL. +To find this URL: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > SAML SSO. + Copy the provided GitLab metadata URL. + Follow your identity provider’s documentation and paste the metadata URL when it’s requested. + + +Check your identity provider’s documentation to see if it supports the GitLab metadata URL. + +Manage the identity provider + + +After you have set up your identity provider, you can: + + + Change the identity provider. + Change email domains. + + +Change the identity provider + + +You can change to a different identity provider. During the change process, +users cannot access any of the SAML groups. To mitigate this, you can disable +SSO enforcement. + +To change identity providers: + + + +Configure the group with the new identity provider. + Optional. If the NameID is not identical, change the NameID for users. + + +Change email domains + + +To migrate users to a new email domain, tell users to: + + + +Add their new email as the primary email to their accounts and verify it. + Optional. Remove their old email from the account. + + +If the NameID is configured with the email address, change the NameID for users. + +Configure GitLab + + + +History + + + + + Ability to set a custom role as the default membership role introduced in GitLab 16.7. + + + + + + +After you set up your identity provider to work with GitLab, you must configure GitLab to use it for authentication: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > SAML SSO. + Complete the fields: + + In the Identity provider single sign-on URL field, enter the SSO URL from your identity provider. + In the Certificate fingerprint field, enter the fingerprint for the SAML token signing certificate. + + + In the Default membership role field, select the role to assign to new users. +The default role is Guest. That role becomes the starting role of all users +added to the group: + + In GitLab 13.3 and +later, group Owners can set a default membership role other than Guest. + In GitLab 16.7 and later, group Owners can set a custom role +as the default membership role. + + + Select the Enable SAML authentication for this group checkbox. + Optional. Select: + + +Enforce SSO-only authentication for web activity for this group. + +Enforce SSO-only authentication for Git activity for this group. +For more information, see the SSO enforcement documentation. + + + Select Save changes. + + + + note The certificate fingerprint algorithm must be in SHA1. When configuring the identity provider (such as Google Workspace), use a secure signature algorithm. + + +If you are having issues configuring GitLab, see the troubleshooting documentation. + +User access and management + + + +History + + + + + SAML user provisioning introduced in GitLab 13.7. + + + + + + +After group SSO is configured and enabled, users can access the GitLab.com group through the identity provider’s dashboard. +If SCIM is configured, see user access on the SCIM page. + +When a user tries to sign in with Group SSO, GitLab attempts to find or create a user based on the following: + + + Find an existing user with a matching SAML identity. This would mean the user either had their account created by SCIM or they have previously signed in with the group’s SAML IdP. + If there is no conflicting user with the same email address, create a new account automatically. + If there is a conflicting user with the same email address, redirect the user to the sign-in page to: + + Create a new account with another email address. + Sign-in to their existing account to link the SAML identity. + + + + +Link SAML to your existing GitLab.com account + + + +History + + + + + +Remember me checkbox introduced in GitLab 15.7. + + + + + + +To link SAML to your existing GitLab.com account: + + + Sign in to your GitLab.com account. Reset your password +if necessary. + Locate and visit the GitLab single sign-on URL for the group you’re signing +in to. A group owner can find this on the group’s Settings > SAML SSO page. +If the sign-in URL is configured, users can connect to the GitLab app from the identity provider. + Optional. Select the Remember me checkbox to stay signed in to GitLab for 2 weeks. +You may still be asked to re-authenticate with your SAML provider more frequently. + Select Authorize. + Enter your credentials on the identity provider if prompted. + You are then redirected back to GitLab.com and should now have access to the group. +In the future, you can use SAML to sign in to GitLab.com. + + +If a user is already a member of the group, linking the SAML identity does not +change their role. + +On subsequent visits, you should be able to sign in to GitLab.com with SAML +or by visiting links directly. If the enforce SSO option is turned on, you +are then redirected to sign in through the identity provider. + +Sign in to GitLab.com with SAML + + + + Sign in to your identity provider. + From the list of apps, select the “GitLab.com” app. (The name is set by the administrator of the identity provider.) + You are then signed in to GitLab.com and redirected to the group. + + +Manage user SAML identity + + + +History + + + + + Update of SAML identities using the SAML API introduced in GitLab 15.5. + + + + + + +GitLab.com uses the SAML NameID to identify users. The NameID is: + + + A required field in the SAML response. + Case sensitive. + + +The NameID must: + + + Be unique to each user. + Be a persistent value that never changes, such as a randomly generated unique user ID. + Match exactly on subsequent sign-in attempts, so it should not rely on user input +that could change between upper and lower case. + + +The NameID should not be an email address or username because: + + + Email addresses and usernames are more likely to change over time. For example, +when a person’s name changes. + Email addresses are case-insensitive, which can result in users being unable to +sign in. + + +The NameID format must be Persistent, unless you are using a field, like email, that +requires a different format. You can use any format except Transient. + +Change user NameID + + +Group owners can use the SAML API to change their group members’ NameID and update their SAML identities. + +If SCIM is configured, group owners can update the SCIM identities using the SCIM API. + +Alternatively, ask the users to reconnect their SAML account. + + + Ask relevant users to unlink their account from the group. + Ask relevant users to link their account to the new SAML app. + + + + caution After users have signed into GitLab using SSO SAML, changing the NameID value +breaks the configuration and could lock users out of the GitLab group. + + +For more information on the recommended value and format for specific identity +providers, see set up your identity provider. + +Configure enterprise user settings from SAML response + + + +History + + + + + +Introduced in GitLab 13.7. + +Changed to configure only enterprise user settings in GitLab 16.7. + + + + + + +GitLab allows setting certain user attributes based on values from the SAML response. +An existing user’s attributes are updated from the SAML response values if that +user is an enterprise user of the group. + +Supported user attributes + + + + +can_create_group - true or false to indicate whether an enterprise user can create +new top-level groups. Default is true. + +projects_limit - The total number of personal projects an enterprise user can create. +A value of 0 means the user cannot create new projects in their personal +namespace. Default is 100000. + + +Example SAML response + + +You can find SAML responses in the developer tools or console of your browser, +in base64-encoded format. Use the base64 decoding tool of your choice to +convert the information to XML. An example SAML response is shown here. + + + + user.email + + + user.nickName + + + user.firstName + + + user.lastName + + + true + + + 10 + + + + +Bypass user email confirmation with verified domains + + + +History + + + + + +Introduced in GitLab 15.4. + + + + + + +By default, users provisioned with SAML or SCIM are sent a verification email to verify their identity. Instead, you can +configure GitLab with a custom domain and GitLab +automatically confirms user accounts. Users still receive an +enterprise user welcome email. Confirmation is bypassed if both of the following are true: + + + The user is provisioned with SAML or SCIM. + The user has an email address that belongs to the verified domain. + + +Block user access + + +To rescind a user’s access to the group when only SAML SSO is configured, either: + + + Remove (in order) the user from: + + The user data store on the identity provider or the list of users on the specific app. + The GitLab.com group. + + + Use Group Sync at the top-level of +your group with the default role set to minimal access +to automatically block access to all resources in the group. + + +To rescind a user’s access to the group when also using SCIM, refer to Remove access. + +Unlink accounts + + +Users can unlink SAML for a group from their profile page. This can be helpful if: + + + You no longer want a group to be able to sign you in to GitLab.com. + Your SAML NameID has changed and so GitLab can no longer find your user. + + + + caution Unlinking an account removes all roles assigned to that user in the group. +If a user re-links their account, roles need to be reassigned. + + +Groups require at least one owner. If your account is the only owner in the +group, you are not allowed to unlink the account. In that case, set up another user as a +group owner, and then you can unlink the account. + +For example, to unlink the MyOrg account: + + + On the left sidebar, select your avatar. + Select Edit profile. + On the left sidebar, select Account. + In the Service sign-in section, select Disconnect next to the connected account. + + +SSO enforcement + + + +History + + + + + +Introduced in GitLab 11.8. + +Improved in GitLab 11.11 with ongoing enforcement in the GitLab UI. + +Improved in GitLab 13.8, with an updated timeout experience. + +Improved in GitLab 13.8 with allowing group owners to not go through SSO. + +Improved in GitLab 13.11 with enforcing open SSO session to use Git if this setting is switched on. + +Improved in GitLab 14.7 to not enforce SSO checks for Git activity originating from CI/CD jobs. + +Improved in GitLab 15.5 with a flag named transparent_sso_enforcement to include transparent enforcement even when SSO enforcement is not enabled. Disabled on GitLab.com. + +Improved in GitLab 15.8 by enabling transparent SSO by default on GitLab.com. + +Generally available in GitLab 15.10. Feature flag transparent_sso_enforcement removed. + + + + + + +On GitLab.com, SSO is enforced: + + + When SAML SSO is enabled. + For users with an existing SAML identity when accessing groups and projects in the organization’s +group hierarchy. Users can view other groups and projects as well as their user settings without SSO sign in by using their GitLab.com credentials. + + +A user has a SAML identity if one or both of the following are true: + + + They have signed in to GitLab by using their GitLab group’s single sign-on URL. + They were provisioned by SCIM. + + +Users are not prompted to sign in through SSO on each visit. GitLab checks +whether a user has authenticated through SSO. If the user last signed in more +than 24 hours ago, GitLab prompts the user to sign in again through SSO. + +SSO is enforced as follows: + + + + + Project/Group visibility + Enforce SSO setting + Member with identity + Member without identity + Non-member or not signed in + + + + + Private + Off + Enforced + Not enforced + Not enforced + + + Private + On + Enforced + Enforced + Enforced + + + Public + Off + Enforced + Not enforced + Not enforced + + + Public + On + Enforced + Enforced + Not enforced + + + + +An issue exists to add a similar SSO requirement for API activity. + +SSO-only for web activity enforcement + + +When the Enforce SSO-only authentication for web activity for this group option is enabled: + + + All members must access GitLab by using their GitLab group’s single sign-on URL +to access group resources, regardless of whether they have an existing SAML +identity. + SSO is enforced when users access groups and projects in the organization’s +group hierarchy. Users can view other groups and projects without SSO sign in. + Users cannot be added as new members manually. + Users with the Owner role can use the standard sign in process to make +necessary changes to top-level group settings. + For non-members or users who are not signed in: + + SSO is not enforced when they access public group resources. + SSO is enforced when they access private group resources. + + + For items in the organization’s group hierarchy, dashboard visibility is as +follows: + + SSO is enforced when viewing your To-Do List. Your +to-do items are hidden if your SSO session has expired, and an +alert is shown. + SSO is enforced when viewing your list of assigned issues. Your issues are +hidden if your SSO session has expired. +Issue 414475 proposes to change this +behavior so that issues are visible. + SSO is not enforced when viewing lists of merge requests where you are the +assignee or your review is requested. You can see merge requests even if +your SSO session has expired. + + + + +SSO enforcement for web activity has the following effects when enabled: + + + For groups, users cannot share a project in the group outside the top-level +group, even if the project is forked. + Git activity originating from CI/CD jobs do not have the SSO check enforced. + Credentials that are not tied to regular users (for example, project and group +access tokens, and deploy keys) do not have the SSO check enforced. + Users must be signed-in through SSO before they can pull images using the +Dependency Proxy. + When the Enforce SSO-only authentication for Git and Dependency Proxy +activity for this group option is enabled, any API endpoint that involves +Git activity is under SSO enforcement. For example, creating or deleting a +branch, commit, or tag. For Git activity over SSH and HTTPS, users must +have at least one active session signed-in through SSO before they can push to or +pull from a GitLab repository. + + +When SSO for web activity is enforced, non-SSO group members do not lose access +immediately. If the user: + + + Has an active session, they can continue accessing the group for up to 24 +hours until the identity provider session times out. + Is signed out, they cannot access the group after being removed from the +identity provider. + + +Related topics + + + + SAML SSO for self-managed GitLab instances + Glossary + Blog post: The ultimate guide to enabling SAML and SSO on GitLab.com + Authentication comparison between SaaS and self-managed + Passwords for users created through integrated authentication + SAML Group Sync + + +Troubleshooting + + +If you find it difficult to match the different SAML terms between GitLab and the +identity provider: + + + Check your identity provider’s documentation. Look at their example SAML +configurations for information on the terms they use. + Check the SAML SSO for self-managed GitLab instances documentation. +The self-managed GitLab instance SAML configuration file supports more options +than the GitLab.com file. You can find information on the self-managed instance +file in the: + + External OmniAuth SAML documentation. + +ruby-saml library. + + + Compare the XML response from your provider with our +example XML used for internal testing. + + +For other troubleshooting information, see the troubleshooting SAML guide. + + +2. SAML Group Sync + + + +SAML Group Sync + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced for self-managed instances in GitLab 15.1. + + + + + + + + caution Adding or changing Group Sync configuration can remove users from the mapped GitLab group. +Removal happens if there is any mismatch between the group names and the list of groups in the SAML response. +Before making changes, ensure either the SAML response includes the groups attribute +and the AttributeValue value matches the SAML Group Name in GitLab, +or that all groups are removed from GitLab to disable Group Sync. + + + +For a demo of Group Sync using Azure, see Demo: SAML Group Sync. + +Configure SAML Group Links + + +SAML Group Sync only manages a group if that group has one or more SAML group links. + +Prerequisites: + + + Self-managed GitLab instances must have configured SAML Group Sync. GitLab.com +instances are already configured for SAML Group Sync, and require no extra configuration. + + +When SAML is enabled, users with the Owner role see a new menu +item in group Settings > SAML Group Links. + + + You can configure one or more SAML Group Links to map a SAML identity +provider group name to a GitLab role. + Members of the SAML identity provider group are added as members of the GitLab +group on their next SAML sign-in. + Group membership is evaluated each time a user signs in using SAML. + SAML Group Links can be configured for a top-level group or any subgroup. + If a SAML group link is created then removed, and there are: + + Other SAML group links configured, users that were in the removed group +link are automatically removed from the group during sync. + No other SAML group links configured, users remain in the group during sync. +Those users must be manually removed from the group. + + + + +To link the SAML groups: + + + In SAML Group Name, enter the value of the relevant saml:AttributeValue. The value entered here must exactly match the value sent in the SAML response. For some IdPs, this may be a group ID or object ID (Azure AD) instead of a friendly group name. + Choose the role in Access Level. + Select Save. + Repeat to add additional group links if required. + + + + +If a user is a member of multiple SAML groups mapped to the same GitLab group, +the user gets the highest role from the groups. For example, if one group +is linked as Guest and another Maintainer, a user in both groups gets the Maintainer +role. + +Users granted: + + + A higher role with Group Sync are displayed as having +direct membership of the group. + A lower or the same role with Group Sync are displayed as having +inherited membership of the group. + + +Use the API + + + +History + + + + + +Introduced in GitLab 15.3. + + + + + + +You can use the GitLab API to list, add, and delete SAML group links. + +Configure SAML Group Sync + + + + note You must include the SAML configuration block on all Sidekiq nodes in addition to Rails application nodes if you use SAML Group Sync and have multiple GitLab nodes, for example in a distributed or highly available architecture. + + + + caution To prevent users being accidentally removed from the GitLab group, follow these instructions closely before +enabling Group Sync in GitLab. + + +To configure SAML Group Sync for self-managed GitLab instances: + + + Configure the SAML OmniAuth Provider. + + Ensure your SAML identity provider sends an attribute statement with the same name as the value of the groups_attribute setting. See the following provider configuration example in /etc/gitlab/gitlab.rb for reference: + + +gitlab_rails['omniauth_providers'] = [ + { + name: ""saml"", + label: ""Provider name"", # optional label for login button, defaults to ""Saml"", + groups_attribute: 'Groups', + args: { + assertion_consumer_service_url: ""https://gitlab.example.com/users/auth/saml/callback"", + idp_cert_fingerprint: ""43:51:43:a1:b5:fc:8b:b7:0a:3a:a9:b1:0f:66:73:a8"", + idp_sso_target_url: ""https://login.example.com/idp"", + issuer: ""https://gitlab.example.com"", + name_identifier_format: ""urn:oasis:names:tc:SAML:2.0:nameid-format:persistent"" + } + } +] + + + + +To configure SAML Group Sync for GitLab.com instances: + + + See SAML SSO for GitLab.com groups. + Ensure your SAML identity provider sends an attribute statement named Groups or groups. + + + + note The value for Groups or groups in the SAML response may be either the group name or an ID. +For example, Azure AD sends the Azure Group Object ID instead of the name. Use the ID value when configuring SAML Group Links. + + + + + Developers + Product Managers + + + + +Other attribute names such as http://schemas.microsoft.com/ws/2008/06/identity/claims/groups +are not accepted as a source of groups. + +For more information on configuring the +required group attribute name in the SAML identity provider’s settings, see +example configurations for Azure AD and Okta. + +Microsoft Azure Active Directory integration + + + +History + + + + + +Introduced in GitLab 16.3. + + + + + + + + note Microsoft has announced that Azure Active Directory (AD) is being renamed to Entra ID. + + +Azure AD sends up to 150 groups in the groups claim. When users are members of more than 150 groups Azure AD sends a +group overage claim attribute in the SAML response. Then group memberships must be obtained using the Microsoft Graph API. + +To integrate Microsoft Azure AD, you: + + + Configure Azure AD to enable GitLab to communicate with the Microsoft Graph API. + Configure GitLab. + + +GitLab settings to Azure AD fields + + + + + + GitLab setting + Azure field + + + + + Tenant ID + Directory (tenant) ID + + + Client ID + Application (client) ID + + + Client Secret + Value (on Certificates & secrets page) + + + + +Configure Azure AD + + + + + + In the Azure Portal, go to Microsoft Entra ID > App registrations > All applications, and select your GitLab SAML application. + Under Essentials, the Application (client) ID and Directory (tenant) ID values are displayed. Copy these values, because you need them for the GitLab configuration. + In the left navigation, select Certificates & secrets. + On the Client secrets tab, select New client secret. + + In the Description text box, add a description. + In the Expires dropdown list, set the expiration date for the credentials. If the secret expires, the GitLab integration will no longer work until the credentials are updated. + To generate the credentials, select Add. + Copy the Value of the credential. This value is displayed only once, and you need it for the GitLab configuration. + + + In the left navigation, select API permissions. + Select Microsoft Graph > Application permissions. + Select the checkboxes GroupMember.Read.All and User.Read.All. + Select Add permissions to save. + Select Grant admin consent for , then on the confirmation dialog select Yes. The Status column for both permissions should change to a green check with Granted for . + + + + +Configure GitLab + + +To configure for a GitLab.com group: + + + On the left sidebar, select Search or go to and find your top-level group. + Select Settings > SAML SSO. + Configure SAML SSO for the group. + In the Microsoft Azure integration section, select the Enable Microsoft Azure integration for this group checkbox. +This section will only be visible if SAML SSO is configured and enabled for the group. + Enter the Tenant ID, Client ID, and Client secret obtained earlier when configuring Azure Active Directory in the Azure Portal. + Optional. If using Azure AD for US Government or Azure AD China, enter the appropriate Login API endpoint and Graph API endpoint. The default values work for most organizations. + Select Save changes. + + +To configure for self-managed: + + + Configure SAML SSO for the instance. + On the left sidebar, at the bottom, select Admin Area. + Select Settings > General. + In the Microsoft Azure integration section, select the Enable Microsoft Azure integration for this group checkbox. + Enter the Tenant ID, Client ID, and Client secret obtained earlier when configuring Azure Active Directory in the Azure Portal. + Optional. If using Azure AD for US Government or Azure AD China, enter the appropriate Login API endpoint and Graph API endpoint. The default values work for most organizations. + Select Save changes. + + +With this configuration, if a user signs in with SAML and Azure sends a group overage claim in the response, +GitLab initiates a Group Sync job to call the Microsoft Graph API and retrieve the user’s group membership. +Then the GitLab Group membership is updated according to SAML Group Links. + +Global SAML group memberships lock + + + +Tier: Premium, Ultimate +Offering: Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 15.10. + + + + + + +GitLab administrators can use the global SAML group memberships lock to prevent group members from inviting new members to subgroups that have their membership synchronized with SAML Group Links. + +Global group memberships lock only applies to subgroups of a top-level group where SAML Group Links synchronization is configured. No user can modify the +membership of a top-level group configured for SAML Group Links synchronization. + +When global group memberships lock is enabled: + + + Only an administrator can manage memberships of any group including access levels. + Users cannot: + + Share a project with other groups. + Invite members to a project created in a group. + + + + +To enable global group memberships lock: + + + +Configure SAML for your self-managed GitLab instance. + On the left sidebar, at the bottom, select Admin Area. + Select Settings > General. + Expand the Visibility and access controls section. + Ensure that Lock memberships to SAML Group Links synchronization is selected. + + +Automatic member removal + + +After a group sync, users who are not members of a mapped SAML group are removed from the group. +On GitLab.com, users in the top-level group are assigned the +default membership role instead of being removed. + +For example, in the following diagram: + + + Alex Garcia signs into GitLab and is removed from GitLab Group C because they don’t belong +to SAML Group C. + Sidney Jones belongs to SAML Group C, but is not added to GitLab Group C because they have +not yet signed in. + + +#mermaid-1710941714812{font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:16px;fill:#000000;}#mermaid-1710941714812 .error-icon{fill:#552222;}#mermaid-1710941714812 .error-text{fill:#552222;stroke:#552222;}#mermaid-1710941714812 .edge-thickness-normal{stroke-width:2px;}#mermaid-1710941714812 .edge-thickness-thick{stroke-width:3.5px;}#mermaid-1710941714812 .edge-pattern-solid{stroke-dasharray:0;}#mermaid-1710941714812 .edge-pattern-dashed{stroke-dasharray:3;}#mermaid-1710941714812 .edge-pattern-dotted{stroke-dasharray:2;}#mermaid-1710941714812 .marker{fill:#666;stroke:#666;}#mermaid-1710941714812 .marker.cross{stroke:#666;}#mermaid-1710941714812 svg{font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:16px;}#mermaid-1710941714812 .label{font-family:""trebuchet ms"",verdana,arial,sans-serif;color:#000000;}#mermaid-1710941714812 .cluster-label text{fill:#333;}#mermaid-1710941714812 .cluster-label span,#mermaid-1710941714812 p{color:#333;}#mermaid-1710941714812 .label text,#mermaid-1710941714812 span,#mermaid-1710941714812 p{fill:#000000;color:#000000;}#mermaid-1710941714812 .node rect,#mermaid-1710941714812 .node circle,#mermaid-1710941714812 .node ellipse,#mermaid-1710941714812 .node polygon,#mermaid-1710941714812 .node path{fill:#eee;stroke:#999;stroke-width:1px;}#mermaid-1710941714812 .flowchart-label text{text-anchor:middle;}#mermaid-1710941714812 .node .label{text-align:center;}#mermaid-1710941714812 .node.clickable{cursor:pointer;}#mermaid-1710941714812 .arrowheadPath{fill:#333333;}#mermaid-1710941714812 .edgePath .path{stroke:#666;stroke-width:2.0px;}#mermaid-1710941714812 .flowchart-link{stroke:#666;fill:none;}#mermaid-1710941714812 .edgeLabel{background-color:white;text-align:center;}#mermaid-1710941714812 .edgeLabel rect{opacity:0.5;background-color:white;fill:white;}#mermaid-1710941714812 .labelBkg{background-color:rgba(255, 255, 255, 0.5);}#mermaid-1710941714812 .cluster rect{fill:hsl(0, 0%, 98.9215686275%);stroke:#707070;stroke-width:1px;}#mermaid-1710941714812 .cluster text{fill:#333;}#mermaid-1710941714812 .cluster span,#mermaid-1710941714812 p{color:#333;}#mermaid-1710941714812 div.mermaidTooltip{position:absolute;text-align:center;max-width:200px;padding:2px;font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:12px;background:hsl(-160, 0%, 93.3333333333%);border:1px solid #707070;border-radius:2px;pointer-events:none;z-index:100;}#mermaid-1710941714812 .flowchartTitleText{text-anchor:middle;font-size:18px;fill:#000000;}#mermaid-1710941714812 :root{--mermaid-font-family:""trebuchet ms"",verdana,arial,sans-serif;}SAML groupsSAML usersMemberMemberMemberMemberMemberMemberGroup BGroup AGroup CGroup DSidney JonesZhang WeiAlex GarciaCharlie Smith + +#mermaid-1710941714869{font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:16px;fill:#000000;}#mermaid-1710941714869 .error-icon{fill:#552222;}#mermaid-1710941714869 .error-text{fill:#552222;stroke:#552222;}#mermaid-1710941714869 .edge-thickness-normal{stroke-width:2px;}#mermaid-1710941714869 .edge-thickness-thick{stroke-width:3.5px;}#mermaid-1710941714869 .edge-pattern-solid{stroke-dasharray:0;}#mermaid-1710941714869 .edge-pattern-dashed{stroke-dasharray:3;}#mermaid-1710941714869 .edge-pattern-dotted{stroke-dasharray:2;}#mermaid-1710941714869 .marker{fill:#666;stroke:#666;}#mermaid-1710941714869 .marker.cross{stroke:#666;}#mermaid-1710941714869 svg{font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:16px;}#mermaid-1710941714869 .label{font-family:""trebuchet ms"",verdana,arial,sans-serif;color:#000000;}#mermaid-1710941714869 .cluster-label text{fill:#333;}#mermaid-1710941714869 .cluster-label span,#mermaid-1710941714869 p{color:#333;}#mermaid-1710941714869 .label text,#mermaid-1710941714869 span,#mermaid-1710941714869 p{fill:#000000;color:#000000;}#mermaid-1710941714869 .node rect,#mermaid-1710941714869 .node circle,#mermaid-1710941714869 .node ellipse,#mermaid-1710941714869 .node polygon,#mermaid-1710941714869 .node path{fill:#eee;stroke:#999;stroke-width:1px;}#mermaid-1710941714869 .flowchart-label text{text-anchor:middle;}#mermaid-1710941714869 .node .label{text-align:center;}#mermaid-1710941714869 .node.clickable{cursor:pointer;}#mermaid-1710941714869 .arrowheadPath{fill:#333333;}#mermaid-1710941714869 .edgePath .path{stroke:#666;stroke-width:2.0px;}#mermaid-1710941714869 .flowchart-link{stroke:#666;fill:none;}#mermaid-1710941714869 .edgeLabel{background-color:white;text-align:center;}#mermaid-1710941714869 .edgeLabel rect{opacity:0.5;background-color:white;fill:white;}#mermaid-1710941714869 .labelBkg{background-color:rgba(255, 255, 255, 0.5);}#mermaid-1710941714869 .cluster rect{fill:hsl(0, 0%, 98.9215686275%);stroke:#707070;stroke-width:1px;}#mermaid-1710941714869 .cluster text{fill:#333;}#mermaid-1710941714869 .cluster span,#mermaid-1710941714869 p{color:#333;}#mermaid-1710941714869 div.mermaidTooltip{position:absolute;text-align:center;max-width:200px;padding:2px;font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:12px;background:hsl(-160, 0%, 93.3333333333%);border:1px solid #707070;border-radius:2px;pointer-events:none;z-index:100;}#mermaid-1710941714869 .flowchartTitleText{text-anchor:middle;font-size:18px;fill:#000000;}#mermaid-1710941714869 :root{--mermaid-font-family:""trebuchet ms"",verdana,arial,sans-serif;}GitLab groupsGitLab usersMemberMemberMemberMemberMemberGroup B (SAML Group Link not configured)Group A (SAML configured)Group C (SAML Group Link configured)Group D (SAML Group Link configured)Sidney JonesZhang WeiAlex GarciaCharlie Smith + +#mermaid-1710941714896{font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:16px;fill:#000000;}#mermaid-1710941714896 .error-icon{fill:#552222;}#mermaid-1710941714896 .error-text{fill:#552222;stroke:#552222;}#mermaid-1710941714896 .edge-thickness-normal{stroke-width:2px;}#mermaid-1710941714896 .edge-thickness-thick{stroke-width:3.5px;}#mermaid-1710941714896 .edge-pattern-solid{stroke-dasharray:0;}#mermaid-1710941714896 .edge-pattern-dashed{stroke-dasharray:3;}#mermaid-1710941714896 .edge-pattern-dotted{stroke-dasharray:2;}#mermaid-1710941714896 .marker{fill:#666;stroke:#666;}#mermaid-1710941714896 .marker.cross{stroke:#666;}#mermaid-1710941714896 svg{font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:16px;}#mermaid-1710941714896 .label{font-family:""trebuchet ms"",verdana,arial,sans-serif;color:#000000;}#mermaid-1710941714896 .cluster-label text{fill:#333;}#mermaid-1710941714896 .cluster-label span,#mermaid-1710941714896 p{color:#333;}#mermaid-1710941714896 .label text,#mermaid-1710941714896 span,#mermaid-1710941714896 p{fill:#000000;color:#000000;}#mermaid-1710941714896 .node rect,#mermaid-1710941714896 .node circle,#mermaid-1710941714896 .node ellipse,#mermaid-1710941714896 .node polygon,#mermaid-1710941714896 .node path{fill:#eee;stroke:#999;stroke-width:1px;}#mermaid-1710941714896 .flowchart-label text{text-anchor:middle;}#mermaid-1710941714896 .node .label{text-align:center;}#mermaid-1710941714896 .node.clickable{cursor:pointer;}#mermaid-1710941714896 .arrowheadPath{fill:#333333;}#mermaid-1710941714896 .edgePath .path{stroke:#666;stroke-width:2.0px;}#mermaid-1710941714896 .flowchart-link{stroke:#666;fill:none;}#mermaid-1710941714896 .edgeLabel{background-color:white;text-align:center;}#mermaid-1710941714896 .edgeLabel rect{opacity:0.5;background-color:white;fill:white;}#mermaid-1710941714896 .labelBkg{background-color:rgba(255, 255, 255, 0.5);}#mermaid-1710941714896 .cluster rect{fill:hsl(0, 0%, 98.9215686275%);stroke:#707070;stroke-width:1px;}#mermaid-1710941714896 .cluster text{fill:#333;}#mermaid-1710941714896 .cluster span,#mermaid-1710941714896 p{color:#333;}#mermaid-1710941714896 div.mermaidTooltip{position:absolute;text-align:center;max-width:200px;padding:2px;font-family:""trebuchet ms"",verdana,arial,sans-serif;font-size:12px;background:hsl(-160, 0%, 93.3333333333%);border:1px solid #707070;border-radius:2px;pointer-events:none;z-index:100;}#mermaid-1710941714896 .flowchartTitleText{text-anchor:middle;font-size:18px;fill:#000000;}#mermaid-1710941714896 :root{--mermaid-font-family:""trebuchet ms"",verdana,arial,sans-serif;}GitLab groups after Alex Garcia signs inGitLab usersMemberMemberMemberMemberGroup A (SAML configured)Group B (SAML Group Link not configured)Group C (SAML Group Link configured)Group D (SAML Group Link configured)Sidney JonesZhang WeiAlex GarciaCharlie Smith + +User that belongs to many SAML groups automatically removed from GitLab group + + +When using Azure AD with SAML, if any user in your organization is a member of more than 150 groups and you use SAML Group Sync, +that user may lose their group memberships. +For more information, see +Microsoft Group overages. + +GitLab has a Microsoft Azure Active Directory integration that enables SAML Group Sync for organizations +with users in more than 150 groups. This integration uses the Microsoft Graph API to obtain all user memberships and is +not limited to 150 groups. + +Otherwise, you can work around this issue by changing the group claims to use the Groups assigned to the application option instead. + +. + + +" +what is the maximum artifact upload limit?,,"1. Continuous Integration and Deployment Admin Area settings + + + +Continuous Integration and Deployment Admin Area settings + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + +The Admin Area has the instance settings for Auto DevOps, runners, and +job artifacts. + +Auto DevOps + + +To enable (or disable) Auto DevOps +for all projects: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Check (or uncheck to disable) the box that says Default to Auto DevOps pipeline for all projects. + Optionally, set up the Auto DevOps base domain +which is used for Auto Deploy and Auto Review Apps. + Select Save changes for the changes to take effect. + + +From now on, every existing project and newly created ones that don’t have a +.gitlab-ci.yml use the Auto DevOps pipelines. + +If you want to disable it for a specific project, you can do so in +its settings. + +Enable instance runners for new projects + + +You can set all new projects to have instance runners available by default. + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Continuous Integration and Deployment. + Select the Enable instance runners for new projects checkbox. + + +Any time a new project is created, the instance runners are available. + +Instance runners compute quota + + +As an administrator you can set either a global or namespace-specific +limit on the number of compute minutes you can use. + +Enable a project runner for multiple projects + + +If you have already registered a project runner +you can assign that runner to other projects. + +To enable a project runner for more than one project: + + + On the left sidebar, at the bottom, select Admin Area. + From the left sidebar, select CI/CD > Runners. + Select the runner you want to edit. + In the upper-right corner, select Edit ( ). + Under Restrict projects for this runner, search for a project. + To the left of the project, select Enable. + Repeat this process for each additional project. + + +Add a message for instance runners + + +To display details about the instance runners in all projects’ +runner settings: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Continuous Integration and Deployment. + Enter text, including Markdown if you want, in the Instance runner details field. + + +To view the rendered details: + + + On the left sidebar, select Search or go to and find your project or group. + Select Settings > CI/CD. + Expand Runners. + + + + +Maximum artifacts size + + +An administrator can set the maximum size of the +job artifacts at: + + + The instance level + The project and group level + + +For the setting on GitLab.com, see Artifacts maximum size. + +The value is in MB, and the default is 100 MB per job. An administrator can change the default value at the: + + + + Instance level: + + + On the left sidebar, at the bottom, select Admin Area. + On the left sidebar, select Settings > CI/CD > Continuous Integration and Deployment. + Change the value of Maximum artifacts size (MB). + Select Save changes for the changes to take effect. + + + + Group level (this overrides the instance setting): + + + Go to the group’s Settings > CI/CD > General Pipelines. + Change the value of Maximum artifacts size (in MB). + Select Save changes for the changes to take effect. + + + + Project level (this overrides the instance and group settings): + + + Go to the project’s Settings > CI/CD > General Pipelines. + Change the value of Maximum artifacts size (in MB). + Select Save changes for the changes to take effect. + + + + +Default artifacts expiration + + +The default expiration time of the job artifacts +can be set in the Admin Area of your GitLab instance. The syntax of duration is +described in artifacts:expire_in +and the default value is 30 days. + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Change the value of default expiration time. + Select Save changes for the changes to take effect. + + +This setting is set per job and can be overridden in +.gitlab-ci.yml. +To disable the expiration, set it to 0. The default unit is in seconds. + + + note Any changes to this setting applies to new artifacts only. The expiration time is not +be updated for artifacts created before this setting was changed. +The administrator may need to manually search for and expire previously-created +artifacts, as described in the troubleshooting documentation. + + +Keep the latest artifacts for all jobs in the latest successful pipelines + + + +History + + + + + +Introduced in GitLab 13.9. + + + + + + +When enabled (default), the artifacts of the most recent pipeline for each Git ref +(branches and tags) +are locked against deletion and kept regardless of the expiry time. + +When disabled, the latest artifacts for any new successful or fixed pipelines +are allowed to expire. + +This setting takes precedence over the project level setting. +If disabled at the instance level, you cannot enable this per-project. + +To disable the setting: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Continuous Integration and Deployment. + Clear the Keep the latest artifacts for all jobs in the latest successful pipelines checkbox. + Select Save changes + + + +When you disable the feature, the latest artifacts do not immediately expire. +A new pipeline must run before the latest artifacts can expire and be deleted. + + + note All application settings have a customizable cache expiry interval which can delay the settings affect. + + +Archive jobs + + +You can archive old jobs to prevent them from being re-run individually. Archived jobs +display a lock icon ( ) and This job is archived at the top of the job log. + +Future work is planned to reduce the CI/CD footprint on the system for archived jobs +by removing metadata stored in the database needed to run the job. See the CI/CD data time decay +blueprint for more details. + +To set the duration for which the jobs are considered as old and expired: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Continuous Integration and Deployment section. + Set the value of Archive jobs. + Select Save changes for the changes to take effect. + + +After that time passes, the jobs are archived in the background and no longer able to be +retried. Make it empty to never expire jobs. It has to be no less than 1 day, +for example: 15 days, 1 month, 2 years. + +For the value set for GitLab.com, see Scheduled job archiving. + +Protect CI/CD variables by default + + +To set all new CI/CD variables as +protected by default: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Select Protect CI/CD variables by default. + + +Maximum includes + + + +History + + + + + +Introduced in GitLab 16.0. + + + + + + +The maximum number of includes per pipeline can be set at the instance level. +The default is 150. + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Change the value of Maximum includes. + Select Save changes for the changes to take effect. + + +Maximum downstream pipeline trigger rate + + + +History + + + + + +Introduced in GitLab 16.10. + + + + + + +The maximum number of downstream pipelines that can be triggered per minute +(for a given project, user, and commit) can be set at the instance level. +The default is 0 (no restriction). + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Change the value of Maximum downstream pipeline trigger rate. + Select Save changes for the changes to take effect. + + +Default CI/CD configuration file + + + +History + + + + + +Introduced in GitLab 12.5. + + + + + + +The default CI/CD configuration file and path for new projects can be set in the Admin Area +of your GitLab instance (.gitlab-ci.yml if not set): + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Input the new file and path in the Default CI/CD configuration file field. + Select Save changes for the changes to take effect. + + +It is also possible to specify a custom CI/CD configuration file for a specific project. + +Set CI/CD limits + + + +History + + + + + +Introduced in GitLab 14.10. + +Maximum number of active pipelines per project setting removed in GitLab 16.0. + + + + + + +You can configure some CI/CD limits +from the Admin Area: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Continuous Integration and Deployment section. + In the CI/CD limits section, you can set the following limits: + + Maximum number of jobs in a single pipeline + Total number of jobs in currently active pipelines + Maximum number of pipeline subscriptions to and from a project + Maximum number of pipeline schedules + Maximum number of DAG dependencies that a job can have + Maximum number of runners registered per group + Maximum number of runners registered per project + Maximum number of downstream pipelines in a pipeline’s hierarchy tree + + + + +Enable or disable the pipeline suggestion banner + + +By default, a banner displays in merge requests with no pipeline suggesting a +walkthrough on how to add one. + + + +To enable or disable the banner: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Select or clear the Enable pipeline suggestion banner checkbox. + Select Save changes. + + +Enable or disable the external redirect page for job artifacts + + +By default, GitLab Pages shows an external redirect page when a user tries to view +a job artifact served by GitLab Pages. This page warns about the potential for +malicious user-generated content, as described in +issue 352611. + +Self-managed administrators can disable the external redirect warning page, +so you can view job artifact pages directly: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Continuous Integration and Deployment. + Deselect Enable the external redirect page for job artifacts. + + +Required pipeline configuration + + + +Tier: Ultimate +Offering: Self-managed + + +History + + + + + +Moved from GitLab Premium to GitLab Ultimate in 15.0. + +Deprecated in GitLab 15.9. + + + + + + + + caution This feature was deprecated in GitLab 15.9 +and is planned for removal in 17.0. Use compliance pipelines +instead. This change is a breaking change. + + +You can set a CI/CD template +as a required pipeline configuration for all projects on a GitLab instance. You can +use a template from: + + + The default CI/CD templates. + + A custom template stored in an instance template repository. + + + note When you use a configuration defined in an instance template repository, +nested include: keywords +(including include:file, include:local, include:remote, and include:template) +do not work. + + + + +The project CI/CD configuration merges into the required pipeline configuration when +a pipeline runs. The merged configuration is the same as if the required pipeline configuration +added the project configuration with the include keyword. +To view a project’s full merged configuration, View full configuration +in the pipeline editor. + +To select a CI/CD template for the required pipeline configuration: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Required pipeline configuration section. + Select a CI/CD template from the dropdown list. + Select Save changes. + + +Package registry configuration + + +Maven Forwarding + + + +Tier: Premium, Ultimate +Offering: Self-managed + +GitLab administrators can disable the forwarding of Maven requests to Maven Central. + +To disable forwarding Maven requests: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Package Registry section. + Clear the checkbox Forward Maven package requests to the Maven Registry if the packages are not found in the GitLab Package Registry. + Select Save changes. + + +npm Forwarding + + + +Tier: Premium, Ultimate +Offering: Self-managed + +GitLab administrators can disable the forwarding of npm requests to npmjs.com. + +To disable it: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Package Registry section. + Clear the checkbox Forward npm package requests to the npm Registry if the packages are not found in the GitLab Package Registry. + Select Save changes. + + +PyPI Forwarding + + + +Tier: Premium, Ultimate +Offering: Self-managed + +GitLab administrators can disable the forwarding of PyPI requests to pypi.org. + +To disable it: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Package Registry section. + Clear the checkbox Forward PyPI package requests to the PyPI Registry if the packages are not found in the GitLab Package Registry. + Select Save changes. + + +Package file size limits + + +GitLab administrators can adjust the maximum allowed file size for each package type. + +To set the maximum file size: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Package Registry section. + Find the package type you would like to adjust. + Enter the maximum file size, in bytes. + Select Save size limits. + + +Restrict runner registration by all users in an instance + + + +History + + + + + +Introduced in GitLab 14.1. + +Enabled on GitLab.com and self-managed in GitLab 15.5. + + + + + + +GitLab administrators can adjust who is allowed to register runners, by showing and hiding areas of the UI. + +When the registration sections are hidden in the UI, members of the project or group must contact administrators to enable runner registration in the group or project. If you plan to prevent registration, ensure users have access to the runners they need to run jobs. + +By default, all members of a project and group are able to register runners. + +To restrict all users in an instance from registering runners: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Runners. + In the Runner registration section, clear the Members of the project can register runners and +Members of the group can register runners checkboxes to remove runner registration from the UI. + Select Save changes. + + + + note After you disable runner registration by members of a project, the registration +token automatically rotates. The token is no longer valid and you must +use the new registration token for the project. + + +Restrict runner registration by all members in a group + + +Prerequisites: + + + Runner registration must be enabled for all users in the instance. + + +GitLab administrators can adjust group permissions to restrict runner registration by group members. + +To restrict runner registration by members in a specific group: + + + On the left sidebar, at the bottom, select Admin Area. + Select Overview > Groups and find your group. + Select Edit. + Clear the New group runners can be registered checkbox if you want to disable runner registration by all members in the group. If the setting is read-only, you must enable runner registration for the instance. + Select Save changes. + + +Disable runner version management + + + +History + + + + + +Introduced in GitLab 15.10. + + + + + + +By default, GitLab instances periodically fetch official runner version data from GitLab.com to determine whether the runners need upgrades. + +To disable your instance fetching this data: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Runners. + In the Runner version management section, clear the Fetch GitLab Runner release version data from GitLab.com checkbox. + Select Save changes. + + +Troubleshooting + + + +413 Request Entity Too Large error + + +If the artifacts are too large, the job might fail with the following error: + +Uploading artifacts as ""archive"" to coordinator... too large archive responseStatus=413 Request Entity Too Large status=413"" at end of a build job on pipeline when trying to store artifacts to . + + +You might need to: + + + Increase the maximum artifacts size. + If you are using NGINX as a proxy server, increase the file upload size limit which is limited to 1 MB by default. +Set a higher value for client-max-body-size in the NGINX configuration file. + + + +2. Jobs artifacts administration + + + +Jobs artifacts administration + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + +This is the administration documentation. To learn how to use job artifacts in your GitLab CI/CD pipeline, +see the job artifacts configuration documentation. + +An artifact is a list of files and directories attached to a job after it +finishes. This feature is enabled by default in all GitLab installations. + +Disabling job artifacts + + +To disable artifacts site-wide: + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['artifacts_enabled'] = false + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + artifacts: + enabled: false + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['artifacts_enabled'] = false + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + artifacts: + enabled: false + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Storing job artifacts + + +GitLab Runner can upload an archive containing the job artifacts to GitLab. By default, +this is done when the job succeeds, but can also be done on failure, or always, with the +artifacts:when parameter. + +Most artifacts are compressed by GitLab Runner before being sent to the coordinator. The exception to this is +reports artifacts, which are compressed after uploading. + +Using local storage + + +If you’re using the Linux package or have a self-compiled installation, you +can change the location where the artifacts are stored locally. + + + note For Docker installations, you can change the path where your data is mounted. +For the Helm chart, use +object storage. + + +Linux package (Omnibus)Self-compiled (source)The artifacts are stored by default in /var/opt/gitlab/gitlab-rails/shared/artifacts. + + To change the storage path, for example to /mnt/storage/artifacts, edit +/etc/gitlab/gitlab.rb and add the following line: + + +gitlab_rails['artifacts_path'] = ""/mnt/storage/artifacts"" + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + +The artifacts are stored by default in /home/git/gitlab/shared/artifacts. + + To change the storage path, for example to /mnt/storage/artifacts, edit +/home/git/gitlab/config/gitlab.yml and add or amend the following lines: + + +production: &base + artifacts: + enabled: true + path: /mnt/storage/artifacts + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Using object storage + + +If you don’t want to use the local disk where GitLab is installed to store the +artifacts, you can use an object storage like AWS S3 instead. + +If you configure GitLab to store artifacts on object storage, you may also want to +eliminate local disk usage for job logs. +In both cases, job logs are archived and moved to object storage when the job completes. + + + caution In a multi-server setup you must use one of the options to +eliminate local disk usage for job logs, or job logs could be lost. + + +In GitLab 13.2 and later, you should use the +consolidated object storage settings. + +Migrating to object storage + + +You can migrate the job artifacts from local storage to object storage. The +processing is done in a background worker and requires no downtime. + + + +Configure the object storage. + + Migrate the artifacts: + + Linux package (Omnibus)DockerSelf-compiled (source) +sudo gitlab-rake gitlab:artifacts:migrate + +sudo docker exec -t gitlab-rake gitlab:artifacts:migrate + +sudo -u git -H bundle exec rake gitlab:artifacts:migrate RAILS_ENV=production + + + Optional. Track the progress and verify that all job artifacts migrated +successfully using the PostgreSQL console. + + + Open a PostgreSQL console: + + Linux package (Omnibus)DockerSelf-compiled (source) +sudo gitlab-psql + +sudo docker exec -it /bin/bash +gitlab-psql + +sudo -u git -H psql -d gitlabhq_production + + + + Verify that all artifacts migrated to object storage with the following +SQL query. The number of objectstg should be the same as total: + + +gitlabhq_production=# SELECT count(*) AS total, sum(case when file_store = '1' then 1 else 0 end) AS filesystem, sum(case when file_store = '2' then 1 else 0 end) AS objectstg FROM ci_job_artifacts; + +total | filesystem | objectstg +------+------------+----------- + 19 | 0 | 19 + + + + + + Verify that there are no files on disk in the artifacts directory: + + Linux package (Omnibus)DockerSelf-compiled (source) +sudo find /var/opt/gitlab/gitlab-rails/shared/artifacts -type f | grep -v tmp | wc -l + Assuming you mounted /var/opt/gitlab to /srv/gitlab: +sudo find /srv/gitlab/gitlab-rails/shared/artifacts -type f | grep -v tmp | wc -l + +sudo find /home/git/gitlab/shared/artifacts -type f | grep -v tmp | wc -l + + + + +In some cases, you need to run the orphan artifact file cleanup Rake task +to clean up orphaned artifacts. + +Migrating from object storage to local storage + + +To migrate artifacts back to local storage: + + + Run gitlab-rake gitlab:artifacts:migrate_to_local. + +Selectively disable the artifacts’ storage in gitlab.rb. + +Reconfigure GitLab. + + +Expiring artifacts + + +If artifacts:expire_in is used to set +an expiry for the artifacts, they are marked for deletion right after that date passes. +Otherwise, they expire per the default artifacts expiration setting. + +Artifacts are deleted by the expire_build_artifacts_worker cron job which Sidekiq +runs every 7 minutes (*/7 * * * * in Cron syntax). + +To change the default schedule on which expired artifacts are deleted: + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb and add the following line (or uncomment it if +it already exists and is commented out), substituting your schedule in cron +syntax: + + +gitlab_rails['expire_build_artifacts_worker_cron'] = ""*/7 * * * *"" + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + cron_jobs: + expire_build_artifacts_worker: + cron: ""*/7 * * * *"" + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['expire_build_artifacts_worker_cron'] = ""*/7 * * * *"" + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + cron_jobs: + expire_build_artifacts_worker: + cron: ""*/7 * * * *"" + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Set the maximum file size of the artifacts + + +If artifacts are enabled, you can change the maximum file size of the +artifacts through the Admin Area settings. + +Storage statistics + + +You can see the total storage used for job artifacts for groups and projects in: + + + The Admin Area + The groups and projects APIs + + +Implementation details + + +When GitLab receives an artifacts archive, an archive metadata file is also +generated by GitLab Workhorse. This metadata file describes all the entries +that are located in the artifacts archive itself. +The metadata file is in a binary format, with additional Gzip compression. + +GitLab doesn’t extract the artifacts archive to save space, memory, and disk +I/O. It instead inspects the metadata file which contains all the relevant +information. This is especially important when there is a lot of artifacts, or +an archive is a very large file. + +When selecting a specific file, GitLab Workhorse extracts it +from the archive and the download begins. This implementation saves space, +memory and disk I/O. + + +" +what is the artifact upload limit for self-managed?,,"1. Continuous Integration and Deployment Admin Area settings + + + +Continuous Integration and Deployment Admin Area settings + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + +The Admin Area has the instance settings for Auto DevOps, runners, and +job artifacts. + +Auto DevOps + + +To enable (or disable) Auto DevOps +for all projects: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Check (or uncheck to disable) the box that says Default to Auto DevOps pipeline for all projects. + Optionally, set up the Auto DevOps base domain +which is used for Auto Deploy and Auto Review Apps. + Select Save changes for the changes to take effect. + + +From now on, every existing project and newly created ones that don’t have a +.gitlab-ci.yml use the Auto DevOps pipelines. + +If you want to disable it for a specific project, you can do so in +its settings. + +Enable instance runners for new projects + + +You can set all new projects to have instance runners available by default. + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Continuous Integration and Deployment. + Select the Enable instance runners for new projects checkbox. + + +Any time a new project is created, the instance runners are available. + +Instance runners compute quota + + +As an administrator you can set either a global or namespace-specific +limit on the number of compute minutes you can use. + +Enable a project runner for multiple projects + + +If you have already registered a project runner +you can assign that runner to other projects. + +To enable a project runner for more than one project: + + + On the left sidebar, at the bottom, select Admin Area. + From the left sidebar, select CI/CD > Runners. + Select the runner you want to edit. + In the upper-right corner, select Edit ( ). + Under Restrict projects for this runner, search for a project. + To the left of the project, select Enable. + Repeat this process for each additional project. + + +Add a message for instance runners + + +To display details about the instance runners in all projects’ +runner settings: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Continuous Integration and Deployment. + Enter text, including Markdown if you want, in the Instance runner details field. + + +To view the rendered details: + + + On the left sidebar, select Search or go to and find your project or group. + Select Settings > CI/CD. + Expand Runners. + + + + +Maximum artifacts size + + +An administrator can set the maximum size of the +job artifacts at: + + + The instance level + The project and group level + + +For the setting on GitLab.com, see Artifacts maximum size. + +The value is in MB, and the default is 100 MB per job. An administrator can change the default value at the: + + + + Instance level: + + + On the left sidebar, at the bottom, select Admin Area. + On the left sidebar, select Settings > CI/CD > Continuous Integration and Deployment. + Change the value of Maximum artifacts size (MB). + Select Save changes for the changes to take effect. + + + + Group level (this overrides the instance setting): + + + Go to the group’s Settings > CI/CD > General Pipelines. + Change the value of Maximum artifacts size (in MB). + Select Save changes for the changes to take effect. + + + + Project level (this overrides the instance and group settings): + + + Go to the project’s Settings > CI/CD > General Pipelines. + Change the value of Maximum artifacts size (in MB). + Select Save changes for the changes to take effect. + + + + +Default artifacts expiration + + +The default expiration time of the job artifacts +can be set in the Admin Area of your GitLab instance. The syntax of duration is +described in artifacts:expire_in +and the default value is 30 days. + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Change the value of default expiration time. + Select Save changes for the changes to take effect. + + +This setting is set per job and can be overridden in +.gitlab-ci.yml. +To disable the expiration, set it to 0. The default unit is in seconds. + + + note Any changes to this setting applies to new artifacts only. The expiration time is not +be updated for artifacts created before this setting was changed. +The administrator may need to manually search for and expire previously-created +artifacts, as described in the troubleshooting documentation. + + +Keep the latest artifacts for all jobs in the latest successful pipelines + + + +History + + + + + +Introduced in GitLab 13.9. + + + + + + +When enabled (default), the artifacts of the most recent pipeline for each Git ref +(branches and tags) +are locked against deletion and kept regardless of the expiry time. + +When disabled, the latest artifacts for any new successful or fixed pipelines +are allowed to expire. + +This setting takes precedence over the project level setting. +If disabled at the instance level, you cannot enable this per-project. + +To disable the setting: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Continuous Integration and Deployment. + Clear the Keep the latest artifacts for all jobs in the latest successful pipelines checkbox. + Select Save changes + + + +When you disable the feature, the latest artifacts do not immediately expire. +A new pipeline must run before the latest artifacts can expire and be deleted. + + + note All application settings have a customizable cache expiry interval which can delay the settings affect. + + +Archive jobs + + +You can archive old jobs to prevent them from being re-run individually. Archived jobs +display a lock icon ( ) and This job is archived at the top of the job log. + +Future work is planned to reduce the CI/CD footprint on the system for archived jobs +by removing metadata stored in the database needed to run the job. See the CI/CD data time decay +blueprint for more details. + +To set the duration for which the jobs are considered as old and expired: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Continuous Integration and Deployment section. + Set the value of Archive jobs. + Select Save changes for the changes to take effect. + + +After that time passes, the jobs are archived in the background and no longer able to be +retried. Make it empty to never expire jobs. It has to be no less than 1 day, +for example: 15 days, 1 month, 2 years. + +For the value set for GitLab.com, see Scheduled job archiving. + +Protect CI/CD variables by default + + +To set all new CI/CD variables as +protected by default: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Select Protect CI/CD variables by default. + + +Maximum includes + + + +History + + + + + +Introduced in GitLab 16.0. + + + + + + +The maximum number of includes per pipeline can be set at the instance level. +The default is 150. + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Change the value of Maximum includes. + Select Save changes for the changes to take effect. + + +Maximum downstream pipeline trigger rate + + + +History + + + + + +Introduced in GitLab 16.10. + + + + + + +The maximum number of downstream pipelines that can be triggered per minute +(for a given project, user, and commit) can be set at the instance level. +The default is 0 (no restriction). + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Change the value of Maximum downstream pipeline trigger rate. + Select Save changes for the changes to take effect. + + +Default CI/CD configuration file + + + +History + + + + + +Introduced in GitLab 12.5. + + + + + + +The default CI/CD configuration file and path for new projects can be set in the Admin Area +of your GitLab instance (.gitlab-ci.yml if not set): + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Input the new file and path in the Default CI/CD configuration file field. + Select Save changes for the changes to take effect. + + +It is also possible to specify a custom CI/CD configuration file for a specific project. + +Set CI/CD limits + + + +History + + + + + +Introduced in GitLab 14.10. + +Maximum number of active pipelines per project setting removed in GitLab 16.0. + + + + + + +You can configure some CI/CD limits +from the Admin Area: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Continuous Integration and Deployment section. + In the CI/CD limits section, you can set the following limits: + + Maximum number of jobs in a single pipeline + Total number of jobs in currently active pipelines + Maximum number of pipeline subscriptions to and from a project + Maximum number of pipeline schedules + Maximum number of DAG dependencies that a job can have + Maximum number of runners registered per group + Maximum number of runners registered per project + Maximum number of downstream pipelines in a pipeline’s hierarchy tree + + + + +Enable or disable the pipeline suggestion banner + + +By default, a banner displays in merge requests with no pipeline suggesting a +walkthrough on how to add one. + + + +To enable or disable the banner: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Select or clear the Enable pipeline suggestion banner checkbox. + Select Save changes. + + +Enable or disable the external redirect page for job artifacts + + +By default, GitLab Pages shows an external redirect page when a user tries to view +a job artifact served by GitLab Pages. This page warns about the potential for +malicious user-generated content, as described in +issue 352611. + +Self-managed administrators can disable the external redirect warning page, +so you can view job artifact pages directly: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Continuous Integration and Deployment. + Deselect Enable the external redirect page for job artifacts. + + +Required pipeline configuration + + + +Tier: Ultimate +Offering: Self-managed + + +History + + + + + +Moved from GitLab Premium to GitLab Ultimate in 15.0. + +Deprecated in GitLab 15.9. + + + + + + + + caution This feature was deprecated in GitLab 15.9 +and is planned for removal in 17.0. Use compliance pipelines +instead. This change is a breaking change. + + +You can set a CI/CD template +as a required pipeline configuration for all projects on a GitLab instance. You can +use a template from: + + + The default CI/CD templates. + + A custom template stored in an instance template repository. + + + note When you use a configuration defined in an instance template repository, +nested include: keywords +(including include:file, include:local, include:remote, and include:template) +do not work. + + + + +The project CI/CD configuration merges into the required pipeline configuration when +a pipeline runs. The merged configuration is the same as if the required pipeline configuration +added the project configuration with the include keyword. +To view a project’s full merged configuration, View full configuration +in the pipeline editor. + +To select a CI/CD template for the required pipeline configuration: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Required pipeline configuration section. + Select a CI/CD template from the dropdown list. + Select Save changes. + + +Package registry configuration + + +Maven Forwarding + + + +Tier: Premium, Ultimate +Offering: Self-managed + +GitLab administrators can disable the forwarding of Maven requests to Maven Central. + +To disable forwarding Maven requests: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Package Registry section. + Clear the checkbox Forward Maven package requests to the Maven Registry if the packages are not found in the GitLab Package Registry. + Select Save changes. + + +npm Forwarding + + + +Tier: Premium, Ultimate +Offering: Self-managed + +GitLab administrators can disable the forwarding of npm requests to npmjs.com. + +To disable it: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Package Registry section. + Clear the checkbox Forward npm package requests to the npm Registry if the packages are not found in the GitLab Package Registry. + Select Save changes. + + +PyPI Forwarding + + + +Tier: Premium, Ultimate +Offering: Self-managed + +GitLab administrators can disable the forwarding of PyPI requests to pypi.org. + +To disable it: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Package Registry section. + Clear the checkbox Forward PyPI package requests to the PyPI Registry if the packages are not found in the GitLab Package Registry. + Select Save changes. + + +Package file size limits + + +GitLab administrators can adjust the maximum allowed file size for each package type. + +To set the maximum file size: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Package Registry section. + Find the package type you would like to adjust. + Enter the maximum file size, in bytes. + Select Save size limits. + + +Restrict runner registration by all users in an instance + + + +History + + + + + +Introduced in GitLab 14.1. + +Enabled on GitLab.com and self-managed in GitLab 15.5. + + + + + + +GitLab administrators can adjust who is allowed to register runners, by showing and hiding areas of the UI. + +When the registration sections are hidden in the UI, members of the project or group must contact administrators to enable runner registration in the group or project. If you plan to prevent registration, ensure users have access to the runners they need to run jobs. + +By default, all members of a project and group are able to register runners. + +To restrict all users in an instance from registering runners: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Runners. + In the Runner registration section, clear the Members of the project can register runners and +Members of the group can register runners checkboxes to remove runner registration from the UI. + Select Save changes. + + + + note After you disable runner registration by members of a project, the registration +token automatically rotates. The token is no longer valid and you must +use the new registration token for the project. + + +Restrict runner registration by all members in a group + + +Prerequisites: + + + Runner registration must be enabled for all users in the instance. + + +GitLab administrators can adjust group permissions to restrict runner registration by group members. + +To restrict runner registration by members in a specific group: + + + On the left sidebar, at the bottom, select Admin Area. + Select Overview > Groups and find your group. + Select Edit. + Clear the New group runners can be registered checkbox if you want to disable runner registration by all members in the group. If the setting is read-only, you must enable runner registration for the instance. + Select Save changes. + + +Disable runner version management + + + +History + + + + + +Introduced in GitLab 15.10. + + + + + + +By default, GitLab instances periodically fetch official runner version data from GitLab.com to determine whether the runners need upgrades. + +To disable your instance fetching this data: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Runners. + In the Runner version management section, clear the Fetch GitLab Runner release version data from GitLab.com checkbox. + Select Save changes. + + +Troubleshooting + + + +413 Request Entity Too Large error + + +If the artifacts are too large, the job might fail with the following error: + +Uploading artifacts as ""archive"" to coordinator... too large archive responseStatus=413 Request Entity Too Large status=413"" at end of a build job on pipeline when trying to store artifacts to . + + +You might need to: + + + Increase the maximum artifacts size. + If you are using NGINX as a proxy server, increase the file upload size limit which is limited to 1 MB by default. +Set a higher value for client-max-body-size in the NGINX configuration file. + + + +2. Jobs artifacts administration + + + +Jobs artifacts administration + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + +This is the administration documentation. To learn how to use job artifacts in your GitLab CI/CD pipeline, +see the job artifacts configuration documentation. + +An artifact is a list of files and directories attached to a job after it +finishes. This feature is enabled by default in all GitLab installations. + +Disabling job artifacts + + +To disable artifacts site-wide: + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['artifacts_enabled'] = false + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + artifacts: + enabled: false + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['artifacts_enabled'] = false + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + artifacts: + enabled: false + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Storing job artifacts + + +GitLab Runner can upload an archive containing the job artifacts to GitLab. By default, +this is done when the job succeeds, but can also be done on failure, or always, with the +artifacts:when parameter. + +Most artifacts are compressed by GitLab Runner before being sent to the coordinator. The exception to this is +reports artifacts, which are compressed after uploading. + +Using local storage + + +If you’re using the Linux package or have a self-compiled installation, you +can change the location where the artifacts are stored locally. + + + note For Docker installations, you can change the path where your data is mounted. +For the Helm chart, use +object storage. + + +Linux package (Omnibus)Self-compiled (source)The artifacts are stored by default in /var/opt/gitlab/gitlab-rails/shared/artifacts. + + To change the storage path, for example to /mnt/storage/artifacts, edit +/etc/gitlab/gitlab.rb and add the following line: + + +gitlab_rails['artifacts_path'] = ""/mnt/storage/artifacts"" + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + +The artifacts are stored by default in /home/git/gitlab/shared/artifacts. + + To change the storage path, for example to /mnt/storage/artifacts, edit +/home/git/gitlab/config/gitlab.yml and add or amend the following lines: + + +production: &base + artifacts: + enabled: true + path: /mnt/storage/artifacts + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Using object storage + + +If you don’t want to use the local disk where GitLab is installed to store the +artifacts, you can use an object storage like AWS S3 instead. + +If you configure GitLab to store artifacts on object storage, you may also want to +eliminate local disk usage for job logs. +In both cases, job logs are archived and moved to object storage when the job completes. + + + caution In a multi-server setup you must use one of the options to +eliminate local disk usage for job logs, or job logs could be lost. + + +In GitLab 13.2 and later, you should use the +consolidated object storage settings. + +Migrating to object storage + + +You can migrate the job artifacts from local storage to object storage. The +processing is done in a background worker and requires no downtime. + + + +Configure the object storage. + + Migrate the artifacts: + + Linux package (Omnibus)DockerSelf-compiled (source) +sudo gitlab-rake gitlab:artifacts:migrate + +sudo docker exec -t gitlab-rake gitlab:artifacts:migrate + +sudo -u git -H bundle exec rake gitlab:artifacts:migrate RAILS_ENV=production + + + Optional. Track the progress and verify that all job artifacts migrated +successfully using the PostgreSQL console. + + + Open a PostgreSQL console: + + Linux package (Omnibus)DockerSelf-compiled (source) +sudo gitlab-psql + +sudo docker exec -it /bin/bash +gitlab-psql + +sudo -u git -H psql -d gitlabhq_production + + + + Verify that all artifacts migrated to object storage with the following +SQL query. The number of objectstg should be the same as total: + + +gitlabhq_production=# SELECT count(*) AS total, sum(case when file_store = '1' then 1 else 0 end) AS filesystem, sum(case when file_store = '2' then 1 else 0 end) AS objectstg FROM ci_job_artifacts; + +total | filesystem | objectstg +------+------------+----------- + 19 | 0 | 19 + + + + + + Verify that there are no files on disk in the artifacts directory: + + Linux package (Omnibus)DockerSelf-compiled (source) +sudo find /var/opt/gitlab/gitlab-rails/shared/artifacts -type f | grep -v tmp | wc -l + Assuming you mounted /var/opt/gitlab to /srv/gitlab: +sudo find /srv/gitlab/gitlab-rails/shared/artifacts -type f | grep -v tmp | wc -l + +sudo find /home/git/gitlab/shared/artifacts -type f | grep -v tmp | wc -l + + + + +In some cases, you need to run the orphan artifact file cleanup Rake task +to clean up orphaned artifacts. + +Migrating from object storage to local storage + + +To migrate artifacts back to local storage: + + + Run gitlab-rake gitlab:artifacts:migrate_to_local. + +Selectively disable the artifacts’ storage in gitlab.rb. + +Reconfigure GitLab. + + +Expiring artifacts + + +If artifacts:expire_in is used to set +an expiry for the artifacts, they are marked for deletion right after that date passes. +Otherwise, they expire per the default artifacts expiration setting. + +Artifacts are deleted by the expire_build_artifacts_worker cron job which Sidekiq +runs every 7 minutes (*/7 * * * * in Cron syntax). + +To change the default schedule on which expired artifacts are deleted: + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb and add the following line (or uncomment it if +it already exists and is commented out), substituting your schedule in cron +syntax: + + +gitlab_rails['expire_build_artifacts_worker_cron'] = ""*/7 * * * *"" + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + cron_jobs: + expire_build_artifacts_worker: + cron: ""*/7 * * * *"" + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['expire_build_artifacts_worker_cron'] = ""*/7 * * * *"" + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + cron_jobs: + expire_build_artifacts_worker: + cron: ""*/7 * * * *"" + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Set the maximum file size of the artifacts + + +If artifacts are enabled, you can change the maximum file size of the +artifacts through the Admin Area settings. + +Storage statistics + + +You can see the total storage used for job artifacts for groups and projects in: + + + The Admin Area + The groups and projects APIs + + +Implementation details + + +When GitLab receives an artifacts archive, an archive metadata file is also +generated by GitLab Workhorse. This metadata file describes all the entries +that are located in the artifacts archive itself. +The metadata file is in a binary format, with additional Gzip compression. + +GitLab doesn’t extract the artifacts archive to save space, memory, and disk +I/O. It instead inspects the metadata file which contains all the relevant +information. This is especially important when there is a lot of artifacts, or +an archive is a very large file. + +When selecting a specific file, GitLab Workhorse extracts it +from the archive and the download begins. This implementation saves space, +memory and disk I/O. + + +" +what is the job maximum artifact size?,,"1. Continuous Integration and Deployment Admin Area settings + + + +Continuous Integration and Deployment Admin Area settings + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + +The Admin Area has the instance settings for Auto DevOps, runners, and +job artifacts. + +Auto DevOps + + +To enable (or disable) Auto DevOps +for all projects: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Check (or uncheck to disable) the box that says Default to Auto DevOps pipeline for all projects. + Optionally, set up the Auto DevOps base domain +which is used for Auto Deploy and Auto Review Apps. + Select Save changes for the changes to take effect. + + +From now on, every existing project and newly created ones that don’t have a +.gitlab-ci.yml use the Auto DevOps pipelines. + +If you want to disable it for a specific project, you can do so in +its settings. + +Enable instance runners for new projects + + +You can set all new projects to have instance runners available by default. + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Continuous Integration and Deployment. + Select the Enable instance runners for new projects checkbox. + + +Any time a new project is created, the instance runners are available. + +Instance runners compute quota + + +As an administrator you can set either a global or namespace-specific +limit on the number of compute minutes you can use. + +Enable a project runner for multiple projects + + +If you have already registered a project runner +you can assign that runner to other projects. + +To enable a project runner for more than one project: + + + On the left sidebar, at the bottom, select Admin Area. + From the left sidebar, select CI/CD > Runners. + Select the runner you want to edit. + In the upper-right corner, select Edit ( ). + Under Restrict projects for this runner, search for a project. + To the left of the project, select Enable. + Repeat this process for each additional project. + + +Add a message for instance runners + + +To display details about the instance runners in all projects’ +runner settings: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Continuous Integration and Deployment. + Enter text, including Markdown if you want, in the Instance runner details field. + + +To view the rendered details: + + + On the left sidebar, select Search or go to and find your project or group. + Select Settings > CI/CD. + Expand Runners. + + + + +Maximum artifacts size + + +An administrator can set the maximum size of the +job artifacts at: + + + The instance level + The project and group level + + +For the setting on GitLab.com, see Artifacts maximum size. + +The value is in MB, and the default is 100 MB per job. An administrator can change the default value at the: + + + + Instance level: + + + On the left sidebar, at the bottom, select Admin Area. + On the left sidebar, select Settings > CI/CD > Continuous Integration and Deployment. + Change the value of Maximum artifacts size (MB). + Select Save changes for the changes to take effect. + + + + Group level (this overrides the instance setting): + + + Go to the group’s Settings > CI/CD > General Pipelines. + Change the value of Maximum artifacts size (in MB). + Select Save changes for the changes to take effect. + + + + Project level (this overrides the instance and group settings): + + + Go to the project’s Settings > CI/CD > General Pipelines. + Change the value of Maximum artifacts size (in MB). + Select Save changes for the changes to take effect. + + + + +Default artifacts expiration + + +The default expiration time of the job artifacts +can be set in the Admin Area of your GitLab instance. The syntax of duration is +described in artifacts:expire_in +and the default value is 30 days. + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Change the value of default expiration time. + Select Save changes for the changes to take effect. + + +This setting is set per job and can be overridden in +.gitlab-ci.yml. +To disable the expiration, set it to 0. The default unit is in seconds. + + + note Any changes to this setting applies to new artifacts only. The expiration time is not +be updated for artifacts created before this setting was changed. +The administrator may need to manually search for and expire previously-created +artifacts, as described in the troubleshooting documentation. + + +Keep the latest artifacts for all jobs in the latest successful pipelines + + + +History + + + + + +Introduced in GitLab 13.9. + + + + + + +When enabled (default), the artifacts of the most recent pipeline for each Git ref +(branches and tags) +are locked against deletion and kept regardless of the expiry time. + +When disabled, the latest artifacts for any new successful or fixed pipelines +are allowed to expire. + +This setting takes precedence over the project level setting. +If disabled at the instance level, you cannot enable this per-project. + +To disable the setting: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Continuous Integration and Deployment. + Clear the Keep the latest artifacts for all jobs in the latest successful pipelines checkbox. + Select Save changes + + + +When you disable the feature, the latest artifacts do not immediately expire. +A new pipeline must run before the latest artifacts can expire and be deleted. + + + note All application settings have a customizable cache expiry interval which can delay the settings affect. + + +Archive jobs + + +You can archive old jobs to prevent them from being re-run individually. Archived jobs +display a lock icon ( ) and This job is archived at the top of the job log. + +Future work is planned to reduce the CI/CD footprint on the system for archived jobs +by removing metadata stored in the database needed to run the job. See the CI/CD data time decay +blueprint for more details. + +To set the duration for which the jobs are considered as old and expired: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Continuous Integration and Deployment section. + Set the value of Archive jobs. + Select Save changes for the changes to take effect. + + +After that time passes, the jobs are archived in the background and no longer able to be +retried. Make it empty to never expire jobs. It has to be no less than 1 day, +for example: 15 days, 1 month, 2 years. + +For the value set for GitLab.com, see Scheduled job archiving. + +Protect CI/CD variables by default + + +To set all new CI/CD variables as +protected by default: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Select Protect CI/CD variables by default. + + +Maximum includes + + + +History + + + + + +Introduced in GitLab 16.0. + + + + + + +The maximum number of includes per pipeline can be set at the instance level. +The default is 150. + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Change the value of Maximum includes. + Select Save changes for the changes to take effect. + + +Maximum downstream pipeline trigger rate + + + +History + + + + + +Introduced in GitLab 16.10. + + + + + + +The maximum number of downstream pipelines that can be triggered per minute +(for a given project, user, and commit) can be set at the instance level. +The default is 0 (no restriction). + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Change the value of Maximum downstream pipeline trigger rate. + Select Save changes for the changes to take effect. + + +Default CI/CD configuration file + + + +History + + + + + +Introduced in GitLab 12.5. + + + + + + +The default CI/CD configuration file and path for new projects can be set in the Admin Area +of your GitLab instance (.gitlab-ci.yml if not set): + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Input the new file and path in the Default CI/CD configuration file field. + Select Save changes for the changes to take effect. + + +It is also possible to specify a custom CI/CD configuration file for a specific project. + +Set CI/CD limits + + + +History + + + + + +Introduced in GitLab 14.10. + +Maximum number of active pipelines per project setting removed in GitLab 16.0. + + + + + + +You can configure some CI/CD limits +from the Admin Area: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Continuous Integration and Deployment section. + In the CI/CD limits section, you can set the following limits: + + Maximum number of jobs in a single pipeline + Total number of jobs in currently active pipelines + Maximum number of pipeline subscriptions to and from a project + Maximum number of pipeline schedules + Maximum number of DAG dependencies that a job can have + Maximum number of runners registered per group + Maximum number of runners registered per project + Maximum number of downstream pipelines in a pipeline’s hierarchy tree + + + + +Enable or disable the pipeline suggestion banner + + +By default, a banner displays in merge requests with no pipeline suggesting a +walkthrough on how to add one. + + + +To enable or disable the banner: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Select or clear the Enable pipeline suggestion banner checkbox. + Select Save changes. + + +Enable or disable the external redirect page for job artifacts + + +By default, GitLab Pages shows an external redirect page when a user tries to view +a job artifact served by GitLab Pages. This page warns about the potential for +malicious user-generated content, as described in +issue 352611. + +Self-managed administrators can disable the external redirect warning page, +so you can view job artifact pages directly: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Continuous Integration and Deployment. + Deselect Enable the external redirect page for job artifacts. + + +Required pipeline configuration + + + +Tier: Ultimate +Offering: Self-managed + + +History + + + + + +Moved from GitLab Premium to GitLab Ultimate in 15.0. + +Deprecated in GitLab 15.9. + + + + + + + + caution This feature was deprecated in GitLab 15.9 +and is planned for removal in 17.0. Use compliance pipelines +instead. This change is a breaking change. + + +You can set a CI/CD template +as a required pipeline configuration for all projects on a GitLab instance. You can +use a template from: + + + The default CI/CD templates. + + A custom template stored in an instance template repository. + + + note When you use a configuration defined in an instance template repository, +nested include: keywords +(including include:file, include:local, include:remote, and include:template) +do not work. + + + + +The project CI/CD configuration merges into the required pipeline configuration when +a pipeline runs. The merged configuration is the same as if the required pipeline configuration +added the project configuration with the include keyword. +To view a project’s full merged configuration, View full configuration +in the pipeline editor. + +To select a CI/CD template for the required pipeline configuration: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Required pipeline configuration section. + Select a CI/CD template from the dropdown list. + Select Save changes. + + +Package registry configuration + + +Maven Forwarding + + + +Tier: Premium, Ultimate +Offering: Self-managed + +GitLab administrators can disable the forwarding of Maven requests to Maven Central. + +To disable forwarding Maven requests: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Package Registry section. + Clear the checkbox Forward Maven package requests to the Maven Registry if the packages are not found in the GitLab Package Registry. + Select Save changes. + + +npm Forwarding + + + +Tier: Premium, Ultimate +Offering: Self-managed + +GitLab administrators can disable the forwarding of npm requests to npmjs.com. + +To disable it: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Package Registry section. + Clear the checkbox Forward npm package requests to the npm Registry if the packages are not found in the GitLab Package Registry. + Select Save changes. + + +PyPI Forwarding + + + +Tier: Premium, Ultimate +Offering: Self-managed + +GitLab administrators can disable the forwarding of PyPI requests to pypi.org. + +To disable it: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Package Registry section. + Clear the checkbox Forward PyPI package requests to the PyPI Registry if the packages are not found in the GitLab Package Registry. + Select Save changes. + + +Package file size limits + + +GitLab administrators can adjust the maximum allowed file size for each package type. + +To set the maximum file size: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand the Package Registry section. + Find the package type you would like to adjust. + Enter the maximum file size, in bytes. + Select Save size limits. + + +Restrict runner registration by all users in an instance + + + +History + + + + + +Introduced in GitLab 14.1. + +Enabled on GitLab.com and self-managed in GitLab 15.5. + + + + + + +GitLab administrators can adjust who is allowed to register runners, by showing and hiding areas of the UI. + +When the registration sections are hidden in the UI, members of the project or group must contact administrators to enable runner registration in the group or project. If you plan to prevent registration, ensure users have access to the runners they need to run jobs. + +By default, all members of a project and group are able to register runners. + +To restrict all users in an instance from registering runners: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Runners. + In the Runner registration section, clear the Members of the project can register runners and +Members of the group can register runners checkboxes to remove runner registration from the UI. + Select Save changes. + + + + note After you disable runner registration by members of a project, the registration +token automatically rotates. The token is no longer valid and you must +use the new registration token for the project. + + +Restrict runner registration by all members in a group + + +Prerequisites: + + + Runner registration must be enabled for all users in the instance. + + +GitLab administrators can adjust group permissions to restrict runner registration by group members. + +To restrict runner registration by members in a specific group: + + + On the left sidebar, at the bottom, select Admin Area. + Select Overview > Groups and find your group. + Select Edit. + Clear the New group runners can be registered checkbox if you want to disable runner registration by all members in the group. If the setting is read-only, you must enable runner registration for the instance. + Select Save changes. + + +Disable runner version management + + + +History + + + + + +Introduced in GitLab 15.10. + + + + + + +By default, GitLab instances periodically fetch official runner version data from GitLab.com to determine whether the runners need upgrades. + +To disable your instance fetching this data: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Runners. + In the Runner version management section, clear the Fetch GitLab Runner release version data from GitLab.com checkbox. + Select Save changes. + + +Troubleshooting + + + +413 Request Entity Too Large error + + +If the artifacts are too large, the job might fail with the following error: + +Uploading artifacts as ""archive"" to coordinator... too large archive responseStatus=413 Request Entity Too Large status=413"" at end of a build job on pipeline when trying to store artifacts to . + + +You might need to: + + + Increase the maximum artifacts size. + If you are using NGINX as a proxy server, increase the file upload size limit which is limited to 1 MB by default. +Set a higher value for client-max-body-size in the NGINX configuration file. + + + +2. Jobs artifacts administration + + + +Jobs artifacts administration + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + +This is the administration documentation. To learn how to use job artifacts in your GitLab CI/CD pipeline, +see the job artifacts configuration documentation. + +An artifact is a list of files and directories attached to a job after it +finishes. This feature is enabled by default in all GitLab installations. + +Disabling job artifacts + + +To disable artifacts site-wide: + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['artifacts_enabled'] = false + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + artifacts: + enabled: false + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['artifacts_enabled'] = false + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + artifacts: + enabled: false + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Storing job artifacts + + +GitLab Runner can upload an archive containing the job artifacts to GitLab. By default, +this is done when the job succeeds, but can also be done on failure, or always, with the +artifacts:when parameter. + +Most artifacts are compressed by GitLab Runner before being sent to the coordinator. The exception to this is +reports artifacts, which are compressed after uploading. + +Using local storage + + +If you’re using the Linux package or have a self-compiled installation, you +can change the location where the artifacts are stored locally. + + + note For Docker installations, you can change the path where your data is mounted. +For the Helm chart, use +object storage. + + +Linux package (Omnibus)Self-compiled (source)The artifacts are stored by default in /var/opt/gitlab/gitlab-rails/shared/artifacts. + + To change the storage path, for example to /mnt/storage/artifacts, edit +/etc/gitlab/gitlab.rb and add the following line: + + +gitlab_rails['artifacts_path'] = ""/mnt/storage/artifacts"" + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + +The artifacts are stored by default in /home/git/gitlab/shared/artifacts. + + To change the storage path, for example to /mnt/storage/artifacts, edit +/home/git/gitlab/config/gitlab.yml and add or amend the following lines: + + +production: &base + artifacts: + enabled: true + path: /mnt/storage/artifacts + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Using object storage + + +If you don’t want to use the local disk where GitLab is installed to store the +artifacts, you can use an object storage like AWS S3 instead. + +If you configure GitLab to store artifacts on object storage, you may also want to +eliminate local disk usage for job logs. +In both cases, job logs are archived and moved to object storage when the job completes. + + + caution In a multi-server setup you must use one of the options to +eliminate local disk usage for job logs, or job logs could be lost. + + +In GitLab 13.2 and later, you should use the +consolidated object storage settings. + +Migrating to object storage + + +You can migrate the job artifacts from local storage to object storage. The +processing is done in a background worker and requires no downtime. + + + +Configure the object storage. + + Migrate the artifacts: + + Linux package (Omnibus)DockerSelf-compiled (source) +sudo gitlab-rake gitlab:artifacts:migrate + +sudo docker exec -t gitlab-rake gitlab:artifacts:migrate + +sudo -u git -H bundle exec rake gitlab:artifacts:migrate RAILS_ENV=production + + + Optional. Track the progress and verify that all job artifacts migrated +successfully using the PostgreSQL console. + + + Open a PostgreSQL console: + + Linux package (Omnibus)DockerSelf-compiled (source) +sudo gitlab-psql + +sudo docker exec -it /bin/bash +gitlab-psql + +sudo -u git -H psql -d gitlabhq_production + + + + Verify that all artifacts migrated to object storage with the following +SQL query. The number of objectstg should be the same as total: + + +gitlabhq_production=# SELECT count(*) AS total, sum(case when file_store = '1' then 1 else 0 end) AS filesystem, sum(case when file_store = '2' then 1 else 0 end) AS objectstg FROM ci_job_artifacts; + +total | filesystem | objectstg +------+------------+----------- + 19 | 0 | 19 + + + + + + Verify that there are no files on disk in the artifacts directory: + + Linux package (Omnibus)DockerSelf-compiled (source) +sudo find /var/opt/gitlab/gitlab-rails/shared/artifacts -type f | grep -v tmp | wc -l + Assuming you mounted /var/opt/gitlab to /srv/gitlab: +sudo find /srv/gitlab/gitlab-rails/shared/artifacts -type f | grep -v tmp | wc -l + +sudo find /home/git/gitlab/shared/artifacts -type f | grep -v tmp | wc -l + + + + +In some cases, you need to run the orphan artifact file cleanup Rake task +to clean up orphaned artifacts. + +Migrating from object storage to local storage + + +To migrate artifacts back to local storage: + + + Run gitlab-rake gitlab:artifacts:migrate_to_local. + +Selectively disable the artifacts’ storage in gitlab.rb. + +Reconfigure GitLab. + + +Expiring artifacts + + +If artifacts:expire_in is used to set +an expiry for the artifacts, they are marked for deletion right after that date passes. +Otherwise, they expire per the default artifacts expiration setting. + +Artifacts are deleted by the expire_build_artifacts_worker cron job which Sidekiq +runs every 7 minutes (*/7 * * * * in Cron syntax). + +To change the default schedule on which expired artifacts are deleted: + +Linux package (Omnibus)Helm chart (Kubernetes)DockerSelf-compiled (source) + + Edit /etc/gitlab/gitlab.rb and add the following line (or uncomment it if +it already exists and is commented out), substituting your schedule in cron +syntax: + + +gitlab_rails['expire_build_artifacts_worker_cron'] = ""*/7 * * * *"" + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + + Export the Helm values: + + +helm get values gitlab > gitlab_values.yaml + + + + Edit gitlab_values.yaml: + + +global: + appConfig: + cron_jobs: + expire_build_artifacts_worker: + cron: ""*/7 * * * *"" + + + + Save the file and apply the new values: + + +helm upgrade -f gitlab_values.yaml gitlab gitlab/gitlab + + + + + Edit docker-compose.yml: + + +version: ""3.6"" +services: + gitlab: + environment: + GITLAB_OMNIBUS_CONFIG: | + gitlab_rails['expire_build_artifacts_worker_cron'] = ""*/7 * * * *"" + + + + Save the file and restart GitLab: + + +docker compose up -d + + + + + Edit /home/git/gitlab/config/gitlab.yml: + + +production: &base + cron_jobs: + expire_build_artifacts_worker: + cron: ""*/7 * * * *"" + + + + Save the file and restart GitLab: + + +# For systems running systemd +sudo systemctl restart gitlab.target + +# For systems running SysV init +sudo service gitlab restart + + + + +Set the maximum file size of the artifacts + + +If artifacts are enabled, you can change the maximum file size of the +artifacts through the Admin Area settings. + +Storage statistics + + +You can see the total storage used for job artifacts for groups and projects in: + + + The Admin Area + The groups and projects APIs + + +Implementation details + + +When GitLab receives an artifacts archive, an archive metadata file is also +generated by GitLab Workhorse. This metadata file describes all the entries +that are located in the artifacts archive itself. +The metadata file is in a binary format, with additional Gzip compression. + +GitLab doesn’t extract the artifacts archive to save space, memory, and disk +I/O. It instead inspects the metadata file which contains all the relevant +information. This is especially important when there is a lot of artifacts, or +an archive is a very large file. + +When selecting a specific file, GitLab Workhorse extracts it +from the archive and the download begins. This implementation saves space, +memory and disk I/O. + + +" +is it possible to add stages to pipelines?,,"1. CI/CD YAML syntax reference + + + +CI/CD YAML syntax reference + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +This document lists the configuration options for the GitLab .gitlab-ci.yml file. +This file is where you define the CI/CD jobs that make up your pipeline. + + + If you are already familiar with basic CI/CD concepts, try creating +your own .gitlab-ci.yml file by following a tutorial that demonstrates a simple +or complex pipeline. + For a collection of examples, see GitLab CI/CD examples. + To view a large .gitlab-ci.yml file used in an enterprise, see the +.gitlab-ci.yml file for gitlab. + + +When you are editing your .gitlab-ci.yml file, you can validate it with the +CI Lint tool. + +If you are editing content on this page, follow the instructions for documenting keywords. + +Keywords + + +A GitLab CI/CD pipeline configuration includes: + + + + Global keywords that configure pipeline behavior: + + + + + Keyword + Description + + + + + default + Custom default values for job keywords. + + + include + Import configuration from other YAML files. + + + stages + The names and order of the pipeline stages. + + + variables + Define CI/CD variables for all job in the pipeline. + + + workflow + Control what types of pipeline run. + + + + + + Header keywords + + + + + Keyword + Description + + + + + spec + Define specifications for external configuration files. + + + + + + Jobs configured with job keywords: + + + + + Keyword + Description + + + + + after_script + Override a set of commands that are executed after job. + + + allow_failure + Allow job to fail. A failed job does not cause the pipeline to fail. + + + artifacts + List of files and directories to attach to a job on success. + + + before_script + Override a set of commands that are executed before job. + + + cache + List of files that should be cached between subsequent runs. + + + coverage + Code coverage settings for a given job. + + + dast_configuration + Use configuration from DAST profiles on a job level. + + + dependencies + Restrict which artifacts are passed to a specific job by providing a list of jobs to fetch artifacts from. + + + environment + Name of an environment to which the job deploys. + + + extends + Configuration entries that this job inherits from. + + + identity + Authenticate with third party services using identity federation. + + + image + Use Docker images. + + + inherit + Select which global defaults all jobs inherit. + + + interruptible + Defines if a job can be canceled when made redundant by a newer run. + + + needs + Execute jobs earlier than the stage ordering. + + + pages + Upload the result of a job to use with GitLab Pages. + + + parallel + How many instances of a job should be run in parallel. + + + release + Instructs the runner to generate a release object. + + + resource_group + Limit job concurrency. + + + retry + When and how many times a job can be auto-retried in case of a failure. + + + rules + List of conditions to evaluate and determine selected attributes of a job, and whether or not it’s created. + + + script + Shell script that is executed by a runner. + + + secrets + The CI/CD secrets the job needs. + + + services + Use Docker services images. + + + stage + Defines a job stage. + + + tags + List of tags that are used to select a runner. + + + timeout + Define a custom job-level timeout that takes precedence over the project-wide setting. + + + trigger + Defines a downstream pipeline trigger. + + + variables + Define job variables on a job level. + + + when + When to run job. + + + + + + +Global keywords + + +Some keywords are not defined in a job. These keywords control pipeline behavior +or import additional pipeline configuration. + + +default + + + +History + + + + + Support for id_tokens introduced in GitLab 16.4. + + + + + + +You can set global defaults for some keywords. Each default keyword is copied to every job +that doesn’t already have it defined. If the job already has a keyword defined, that default +is not used. + +Keyword type: Global keyword. + +Possible inputs: These keywords can have custom defaults: + + + after_script + artifacts + before_script + cache + hooks + id_tokens + image + interruptible + retry + services + tags + +timeout, though due to issue 213634 +this keyword has no effect. + + +Example of default: + +default: + image: ruby:3.0 + retry: 2 + +rspec: + script: bundle exec rspec + +rspec 2.7: + image: ruby:2.7 + script: bundle exec rspec + + +In this example: + + + +image: ruby:3.0 and retry: 2 are the default keywords for all jobs in the pipeline. + The rspec job does not have image or retry defined, so it uses the defaults of +image: ruby:3.0 and retry: 2. + The rspec 2.7 job does not have retry defined, but it does have image explicitly defined. +It uses the default retry: 2, but ignores the default image and uses the image: ruby:2.7 +defined in the job. + + +Additional details: + + + Control inheritance of default keywords in jobs with inherit:default. + + + +include + + + +History + + + + + +Moved to GitLab Free in 11.4. + + + + + + +Use include to include external YAML files in your CI/CD configuration. +You can split one long .gitlab-ci.yml file into multiple files to increase readability, +or reduce duplication of the same configuration in multiple places. + +You can also store template files in a central repository and include them in projects. + +The include files are: + + + Merged with those in the .gitlab-ci.yml file. + Always evaluated first and then merged with the content of the .gitlab-ci.yml file, +regardless of the position of the include keyword. + + +The time limit to resolve all files is 30 seconds. + +Keyword type: Global keyword. + +Possible inputs: The include subkeys: + + + include:component + include:local + include:project + include:remote + include:template + + +And optionally: + + + include:inputs + include:rules + + +Additional details: + + + Only certain CI/CD variables can be used +with include keywords. + Use merging to customize and override included CI/CD configurations with local + You can override included configuration by having the same job name or global keyword +in the .gitlab-ci.yml file. The two configurations are merged together, and the +configuration in the .gitlab-ci.yml file takes precedence over the included configuration. + If you rerun a: + + Job, the include files are not fetched again. All jobs in a pipeline use the configuration +fetched when the pipeline was created. Any changes to the source include files +do not affect job reruns. + Pipeline, the include files are fetched again. If they changed after the last +pipeline run, the new pipeline uses the changed configuration. + + + You can have up to 150 includes per pipeline by default, including nested. Additionally: + + In GitLab 16.0 and later self-managed users can +change the maximum includes value. + In GitLab 15.10 and later you can have up to 150 includes. +In nested includes, the same file can be included multiple times, but duplicated includes +count towards the limit. + From GitLab 14.9 to GitLab 15.9, you can have up to 100 includes. +The same file can be included multiple times in nested includes, but duplicates are ignored. + In GitLab 14.9 and earlier you can have up to 100 includes, but the same file can not +be included multiple times. + + + + +Related topics: + + + +Use variables with include. + +Use rules with include. + + + +include:component + + +Use include:component to add a CI/CD component to the +pipeline configuration. + +Keyword type: Global keyword. + +Possible inputs: The full address of the CI/CD component, formatted as +//@. + +Example of include:component: + +include: + - component: gitlab.example.com/my-org/security-components/secret-detection@1.0 + + +Related topics: + + + +Use a CI/CD component. + + + +include:local + + +Use include:local to include a file that is in the same repository and branch as the configuration file containing the include keyword. +Use include:local instead of symbolic links. + +Keyword type: Global keyword. + +Possible inputs: + +A full path relative to the root directory (/): + + + The YAML file must have the extension .yml or .yaml. + You can use * and ** wildcards in the file path. + You can use certain CI/CD variables. + + +Example of include:local: + +include: + - local: '/templates/.gitlab-ci-template.yml' + + +You can also use shorter syntax to define the path: + +include: '.gitlab-ci-production.yml' + + +Additional details: + + + The .gitlab-ci.yml file and the local file must be on the same branch. + You can’t include local files through Git submodules paths. + All nested includes are executed in the scope of the project containing the configuration file with the include keyword, not the project running the pipeline. +You can use local, project, remote, or template includes. + + + +include:project + + + +History + + + + + Including multiple files from the same project introduced in GitLab 13.6. Feature flag removed in GitLab 13.8. + + + + + + +To include files from another private project on the same GitLab instance, +use include:project and include:file. + +Keyword type: Global keyword. + +Possible inputs: + + + +include:project: The full GitLab project path. + +include:file A full file path, or array of file paths, relative to the root directory (/). +The YAML files must have the .yml or .yaml extension. + +include:ref: Optional. The ref to retrieve the file from. Defaults to the HEAD of the project +when not specified. + You can use certain CI/CD variables. + + +Example of include:project: + +include: + - project: 'my-group/my-project' + file: '/templates/.gitlab-ci-template.yml' + - project: 'my-group/my-subgroup/my-project-2' + file: + - '/templates/.builds.yml' + - '/templates/.tests.yml' + + +You can also specify a ref: + +include: + - project: 'my-group/my-project' + ref: main # Git branch + file: '/templates/.gitlab-ci-template.yml' + - project: 'my-group/my-project' + ref: v1.0.0 # Git Tag + file: '/templates/.gitlab-ci-template.yml' + - project: 'my-group/my-project' + ref: 787123b47f14b552955ca2786bc9542ae66fee5b # Git SHA + file: '/templates/.gitlab-ci-template.yml' + + +Additional details: + + + All nested includes are executed in the scope of the project containing the configuration file with the nested include keyword. +You can use local (relative to the project containing the configuration file with the include keyword), project, remote, or template includes. + When the pipeline starts, the .gitlab-ci.yml file configuration included by all methods is evaluated. +The configuration is a snapshot in time and persists in the database. GitLab does not reflect any changes to +the referenced .gitlab-ci.yml file configuration until the next pipeline starts. + When you include a YAML file from another private project, the user running the pipeline +must be a member of both projects and have the appropriate permissions to run pipelines. +A not found or access denied error may be displayed if the user does not have access to any of the included files. + Be careful when including another project’s CI/CD configuration file. No pipelines or notifications trigger when CI/CD configuration files change. +From a security perspective, this is similar to pulling a third-party dependency. For the ref, consider: + + Using a specific SHA hash, which should be the most stable option. Use the +full 40-character SHA hash to ensure the desired commit is referenced, because +using a short SHA hash for the ref might be ambiguous. + Applying both protected branch and protected tag rules to +the ref in the other project. Protected tags and branches are more likely to pass through change management before changing. + + + + + +include:remote + + +Use include:remote with a full URL to include a file from a different location. + +Keyword type: Global keyword. + +Possible inputs: + +A public URL accessible by an HTTP/HTTPS GET request: + + + Authentication with the remote URL is not supported. + The YAML file must have the extension .yml or .yaml. + You can use certain CI/CD variables. + + +Example of include:remote: + +include: + - remote: 'https://gitlab.com/example-project/-/raw/main/.gitlab-ci.yml' + + +Additional details: + + + All nested includes are executed without context as a public user, +so you can only include public projects or templates. No variables are available in the include section of nested includes. + Be careful when including another project’s CI/CD configuration file. No pipelines or notifications trigger +when the other project’s files change. From a security perspective, this is similar to +pulling a third-party dependency. If you link to another GitLab project you own, consider the use of both +protected branches and protected tags +to enforce change management rules. + + + +include:template + + +Use include:template to include .gitlab-ci.yml templates. + +Keyword type: Global keyword. + +Possible inputs: + +A CI/CD template: + + + All templates can be viewed in lib/gitlab/ci/templates. +Not all templates are designed to be used with include:template, so check template +comments before using one. + You can use certain CI/CD variables. + + +Example of include:template: + +# File sourced from the GitLab template collection +include: + - template: Auto-DevOps.gitlab-ci.yml + + +Multiple include:template files: + +include: + - template: Android-Fastlane.gitlab-ci.yml + - template: Auto-DevOps.gitlab-ci.yml + + +Additional details: + + + All nested includes are executed without context as a public user, +so you can only include public projects or templates. No variables are available in the include section of nested includes. + + + +include:inputs + + + +History + + + + + +Introduced in GitLab 15.11 as a Beta feature. + + + + + + +Use include:inputs to set the values for input parameters when the included configuration +uses spec:inputs and is added to the pipeline. + +Keyword type: Global keyword. + +Possible inputs: A string, numeric value, or boolean. + +Example of include:inputs: + +include: + - local: 'custom_configuration.yml' + inputs: + website: ""My website"" + + +In this example: + + + The configuration contained in custom_configuration.yml is added to the pipeline, +with a website input set to a value of My website for the included configuration. + + +Additional details: + + + If the included configuration file uses spec:inputs:type, +the input value must match the defined type. + If the included configuration file uses spec:inputs:options, +the input value must match one of the listed options. + + +Related topics: + + + +Set input values when using include. + + + +stages + + + +History + + + + + Support for nested array of strings introduced in GitLab 16.9. + + + + + + +Use stages to define stages that contain groups of jobs. Use stage +in a job to configure the job to run in a specific stage. + +If stages is not defined in the .gitlab-ci.yml file, the default pipeline stages are: + + + .pre + build + test + deploy + .post + + +The order of the items in stages defines the execution order for jobs: + + + Jobs in the same stage run in parallel. + Jobs in the next stage run after the jobs from the previous stage complete successfully. + + +If a pipeline contains only jobs in the .pre or .post stages, it does not run. +There must be at least one other job in a different stage. .pre and .post stages +can be used in required pipeline configuration +to define compliance jobs that must run before or after project pipeline jobs. + +Keyword type: Global keyword. + +Example of stages: + +stages: + - build + - test + - deploy + + +In this example: + + + All jobs in build execute in parallel. + If all jobs in build succeed, the test jobs execute in parallel. + If all jobs in test succeed, the deploy jobs execute in parallel. + If all jobs in deploy succeed, the pipeline is marked as passed. + + +If any job fails, the pipeline is marked as failed and jobs in later stages do not +start. Jobs in the current stage are not stopped and continue to run. + +Additional details: + + + If a job does not specify a stage, the job is assigned the test stage. + If a stage is defined but no jobs use it, the stage is not visible in the pipeline, +which can help compliance pipeline configurations: + + Stages can be defined in the compliance configuration but remain hidden if not used. + The defined stages become visible when developers use them in job definitions. + + + + +Related topics: + + + To make a job start earlier and ignore the stage order, use the needs keyword. + + + +workflow + + + +History + + + + + +Introduced in GitLab 12.5 + + + + + + +Use workflow to control pipeline behavior. + +You can use some predefined CI/CD variables in +workflow configuration, but not variables that are only defined when jobs start. + +Related topics: + + + workflow: rules examples + Switch between branch pipelines and merge request pipelines + + + +workflow:auto_cancel:on_new_commit + + + +History + + + + + +Introduced in GitLab 16.8 with a flag named ci_workflow_auto_cancel_on_new_commit. Disabled by default. + +Enabled on GitLab.com and self-managed in GitLab 16.9. + +Generally available in GitLab 16.10. Feature flag ci_workflow_auto_cancel_on_new_commit removed. + + + + + + +Use workflow:auto_cancel:on_new_commit to configure the behavior of +the auto-cancel redundant pipelines feature. + +Possible inputs: + + + +conservative: Cancel the pipeline, but only if no jobs with interruptible: false have started yet. Default when not defined. + +interruptible: Cancel only jobs with interruptible: true. + +none: Do not auto-cancel any jobs. + + +Example of workflow:auto_cancel:on_new_commit: + +workflow: + auto_cancel: + on_new_commit: interruptible + +job1: + interruptible: true + script: sleep 60 + +job2: + interruptible: false # Default when not defined. + script: sleep 60 + + +In this example: + + + When a new commit is pushed to a branch, GitLab creates a new pipeline and job1 and job2 start. + If a new commit is pushed to the branch before the jobs complete, only job1 is canceled. + + + +workflow:auto_cancel:on_job_failure + + + +History + + + + + +Introduced in GitLab 16.10 with a flag named auto_cancel_pipeline_on_job_failure. Disabled by default. + + + + + + + + On self-managed GitLab, by default this feature is not available. +To enable the feature, an administrator can enable the feature flag named auto_cancel_pipeline_on_job_failure. +On GitLab.com and GitLab Dedicated, this feature is not available. + + +Use workflow:auto_cancel:on_job_failure to configure which jobs should be cancelled as soon as one job fails. + +Possible inputs: + + + +all: Cancel the pipeline and all running jobs as soon as one job fails. + +none: Do not auto-cancel any jobs. + + +Example of workflow:auto_cancel:on_job_failure: + +stages: [stage_a, stage_b] + +workflow: + auto_cancel: + on_job_failure: all + +job1: + stage: stage_a + script: sleep 60 + +job2: + stage: stage_a + script: + - sleep 30 + - exit 1 + +job3: + stage: stage_b + script: + - sleep 30 + + +In this example, if job2 fails, job1 is cancelled if it is still running and job3 does not start. + +Related topics: + + + Auto-cancel the parent pipeline from a downstream pipeline + + + +workflow:name + + + +History + + + + + +Introduced in GitLab 15.5 with a flag named pipeline_name. Disabled by default. + +Enabled on GitLab.com and self-managed in GitLab 15.7. + +Generally available in GitLab 15.8. Feature flag pipeline_name removed. + + + + + + +You can use name in workflow: to define a name for pipelines. + +All pipelines are assigned the defined name. Any leading or trailing spaces in the name are removed. + +Possible inputs: + + + A string. + +CI/CD variables. + A combination of both. + + +Examples of workflow:name: + +A simple pipeline name with a predefined variable: + +workflow: + name: 'Pipeline for branch: $CI_COMMIT_BRANCH' + + +A configuration with different pipeline names depending on the pipeline conditions: + +variables: + PROJECT1_PIPELINE_NAME: 'Default pipeline name' # A default is not required. + +workflow: + name: '$PROJECT1_PIPELINE_NAME' + rules: + - if: '$CI_PIPELINE_SOURCE == ""merge_request_event""' + variables: + PROJECT1_PIPELINE_NAME: 'MR pipeline: $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME' + - if: '$CI_MERGE_REQUEST_LABELS =~ /pipeline:run-in-ruby3/' + variables: + PROJECT1_PIPELINE_NAME: 'Ruby 3 pipeline' + - when: always # Other pipelines can run, but use the default name + + +Additional details: + + + If the name is an empty string, the pipeline is not assigned a name. A name consisting +of only CI/CD variables could evaluate to an empty string if all the variables are also empty. + +workflow:rules:variables become global variables available in all jobs, +including trigger jobs which forward variables to downstream pipelines by default. +If the downstream pipeline uses the same variable, the variable is overwritten +by the upstream variable value. Be sure to either: + + Use a unique variable name in every project’s pipeline configuration, like PROJECT1_PIPELINE_NAME. + Use inherit:variables in the trigger job and list the +exact variables you want to forward to the downstream pipeline. + + + + + +workflow:rules + + +The rules keyword in workflow is similar to rules defined in jobs, +but controls whether or not a whole pipeline is created. + +When no rules evaluate to true, the pipeline does not run. + +Possible inputs: You can use some of the same keywords as job-level rules: + + + +rules: if. + +rules: changes. + +rules: exists. + +when, can only be always or never when used with workflow. + +variables. + + +Example of workflow:rules: + +workflow: + rules: + - if: $CI_COMMIT_TITLE =~ /-draft$/ + when: never + - if: $CI_PIPELINE_SOURCE == ""merge_request_event"" + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + + +In this example, pipelines run if the commit title (first line of the commit message) does not end with -draft +and the pipeline is for either: + + + A merge request + The default branch. + + +Additional details: + + + If your rules match both branch pipelines (other than the default branch) and merge request pipelines, +duplicate pipelines can occur. + + +Related topics: + + + You can use the workflow:rules templates to import +a preconfigured workflow: rules entry. + +Common if clauses for workflow:rules. + +Use rules to run merge request pipelines. + + + +workflow:rules:variables + + + +History + + + + + +Introduced in GitLab 13.11. + +Feature flag removed in GitLab 14.1. + + + + + + +You can use variables in workflow:rules to define variables for +specific pipeline conditions. + +When the condition matches, the variable is created and can be used by all jobs +in the pipeline. If the variable is already defined at the global level, the workflow +variable takes precedence and overrides the global variable. + +Keyword type: Global keyword. + +Possible inputs: Variable name and value pairs: + + + The name can use only numbers, letters, and underscores (_). + The value must be a string. + + +Example of workflow:rules:variables: + +variables: + DEPLOY_VARIABLE: ""default-deploy"" + +workflow: + rules: + - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH + variables: + DEPLOY_VARIABLE: ""deploy-production"" # Override globally-defined DEPLOY_VARIABLE + - if: $CI_COMMIT_REF_NAME =~ /feature/ + variables: + IS_A_FEATURE: ""true"" # Define a new variable. + - when: always # Run the pipeline in other cases + +job1: + variables: + DEPLOY_VARIABLE: ""job1-default-deploy"" + rules: + - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH + variables: # Override DEPLOY_VARIABLE defined + DEPLOY_VARIABLE: ""job1-deploy-production"" # at the job level. + - when: on_success # Run the job in other cases + script: + - echo ""Run script with $DEPLOY_VARIABLE as an argument"" + - echo ""Run another script if $IS_A_FEATURE exists"" + +job2: + script: + - echo ""Run script with $DEPLOY_VARIABLE as an argument"" + - echo ""Run another script if $IS_A_FEATURE exists"" + + +When the branch is the default branch: + + + job1’s DEPLOY_VARIABLE is job1-deploy-production. + job2’s DEPLOY_VARIABLE is deploy-production. + + +When the branch is feature: + + + job1’s DEPLOY_VARIABLE is job1-default-deploy, and IS_A_FEATURE is true. + job2’s DEPLOY_VARIABLE is default-deploy, and IS_A_FEATURE is true. + + +When the branch is something else: + + + job1’s DEPLOY_VARIABLE is job1-default-deploy. + job2’s DEPLOY_VARIABLE is default-deploy. + + +Additional details: + + + +workflow:rules:variables become global variables available in all jobs, +including trigger jobs which forward variables to downstream pipelines by default. +If the downstream pipeline uses the same variable, the variable is overwritten +by the upstream variable value. Be sure to either: + + Use unique variable names in every project’s pipeline configuration, like PROJECT1_VARIABLE_NAME. + Use inherit:variables in the trigger job and list the +exact variables you want to forward to the downstream pipeline. + + + + + +workflow:rules:auto_cancel + + + +History + + + + + +Introduced in GitLab 16.8 with a flag named ci_workflow_auto_cancel_on_new_commit. Disabled by default. + +Enabled on GitLab.com and self-managed in GitLab 16.9. + +Generally available in GitLab 16.10. Feature flag ci_workflow_auto_cancel_on_new_commit removed. + + + + + + +Use workflow:rules:auto_cancel to configure the behavior of +the workflow:auto_cancel:on_new_commit feature. + +Possible inputs: + + + +on_new_commit: workflow:auto_cancel:on_new_commit + + + +Example of workflow:rules:auto_cancel: + +workflow: + auto_cancel: + on_new_commit: interruptible + rules: + - if: $CI_COMMIT_REF_PROTECTED == 'true' + auto_cancel: + on_new_commit: none + - when: always # Run the pipeline in other cases + +test-job1: + script: sleep 10 + interruptible: false + +test-job2: + script: sleep 10 + interruptible: true + + +In this example, workflow:auto_cancel:on_new_commit +is set to interruptible for all jobs by default. But if a pipeline runs for a protected branch, +the rule overrides the default with on_new_commit: none. For example, if a pipeline +is running for: + + + A non-protected branch and a new commit is pushed, test-job1 continues to run and test-job2 is canceled. + A protected branch and a new commit is pushed, both test-job1 and test-job2 continue to run. + + +Header keywords + + +Some keywords must be defined in a header section of a YAML configuration file. +The header must be at the top of the file, separated from the rest of the configuration +with ---. + + +spec + + + +History + + + + + +Introduced in GitLab 15.11 as a Beta feature. + + + + + + +Add a spec section to the header of a YAML file to configure the behavior of a pipeline +when a configuration is added to the pipeline with the include keyword. + + +spec:inputs + + +You can use spec:inputs to define input parameters for the CI/CD configuration you intend to add +to a pipeline with include. Use include:inputs to define the values to use when the pipeline runs. + +Use the inputs to customize the behavior of the configuration when included in CI/CD configuration. + +Use the interpolation format $[[ input.input-id ]] to reference the values outside of the header section. +Inputs are evaluated and interpolated when the configuration is fetched during pipeline creation, but before the +configuration is merged with the contents of the .gitlab-ci.yml file. + +Keyword type: Header keyword. specs must be declared at the top of the configuration file, +in a header section. + +Possible inputs: A hash of strings representing the expected inputs. + +Example of spec:inputs: + +spec: + inputs: + environment: + job-stage: +--- + +scan-website: + stage: $[[ inputs.job-stage ]] + script: ./scan-website $[[ inputs.environment ]] + + +Additional details: + + + Inputs are mandatory unless you use spec:inputs:default +to set a default value. + Inputs expect strings unless you use spec:inputs:type to set a +different input type. + A string containing an interpolation block must not exceed 1 MB. + The string inside an interpolation block must not exceed 1 KB. + + +Related topics: + + + +Define input parameters with spec:inputs. + + + +spec:inputs:default + + + +History + + + + + +Introduced in GitLab 15.11 as a Beta feature. + + + + + + +Inputs are mandatory when included, unless you set a default value with spec:inputs:default. + +Use default: '' to have no default value. + +Keyword type: Header keyword. specs must be declared at the top of the configuration file, +in a header section. + +Possible inputs: A string representing the default value, or ''. + +Example of spec:inputs:default: + +spec: + inputs: + website: + user: + default: 'test-user' + flags: + default: '' +--- + +# The pipeline configuration would follow... + + +In this example: + + + +website is mandatory and must be defined. + +user is optional. If not defined, the value is test-user. + +flags is optional. If not defined, it has no value. + + +Additional details: + + + The pipeline fails with a validation error when the input: + + Uses both default and options, but the default value +is not one of the listed options. + Uses both default and regex, but the default value does not match the regular expression. + Value does not match the type. + + + + + +spec:inputs:description + + + +History + + + + + +Introduced in GitLab 16.5. + + + + + + +Use description to give a description to a specific input. The description does +not affect the behavior of the input and is only used to help users of the file +understand the input. + +Keyword type: Header keyword. specs must be declared at the top of the configuration file, +in a header section. + +Possible inputs: A string representing the description. + +Example of spec:inputs:description: + +spec: + inputs: + flags: + description: 'Sample description of the `flags` input details.' +--- + +# The pipeline configuration would follow... + + + +spec:inputs:options + + + +History + + + + + +Introduced in GitLab 16.6. + + + + + + +Inputs can use options to specify a list of allowed values for an input. +The limit is 50 options per input. + +Keyword type: Header keyword. specs must be declared at the top of the configuration file, +in a header section. + +Possible inputs: An array of input options. + +Example of spec:inputs:options: + +spec: + inputs: + environment: + options: + - development + - staging + - production +--- + +# The pipeline configuration would follow... + + +In this example: + + + +environment is mandatory and must be defined with one of the values in the list. + + +Additional details: + + + The pipeline fails with a validation error when: + + The input uses both options and default, but the default value +is not one of the listed options. + Any of the input options do not match the type, which can +be either string or number, but not boolean when using options. + + + + + +spec:inputs:regex + + + +History + + + + + +Introduced in GitLab 16.5. + + + + + + +Use spec:inputs:regex to specify a regular expression that the input must match. + +Keyword type: Header keyword. specs must be declared at the top of the configuration file, +in a header section. + +Possible inputs: Must be a regular expression that starts and ends with the / character. + +Example of spec:inputs:regex: + +spec: + inputs: + version: + regex: /^v\d\.\d+(\.\d+)$/ +--- + +# The pipeline configuration would follow... + + +In this example, inputs of v1.0 or v1.2.3 match the regular expression and pass validation. +An input of v1.A.B does not match the regular expression and fails validation. + +Additional details: + + + +inputs:regex can only be used with a type of string, +not number or boolean. + + + +spec:inputs:type + + +By default, inputs expect strings. Use spec:inputs:type to set a different required +type for inputs. + +Keyword type: Header keyword. specs must be declared at the top of the configuration file, +in a header section. + +Possible inputs: Can be one of: + + + +string, to accept string inputs (default when not defined). + +number, to only accept numeric inputs. + +boolean, to only accept true or false inputs. + + +Example of spec:inputs:type: + +spec: + inputs: + job_name: + website: + type: string + port: + type: number + available: + type: boolean +--- + +# The pipeline configuration would follow... + + +Job keywords + + +The following topics explain how to use keywords to configure CI/CD pipelines. + + +after_script + + +Use after_script to define an array of commands that run after a job’s script section, including failed jobs with failure type of script_failure. +after_script commands do not run after other failure types. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: An array including: + + + Single line commands. + Long commands split over multiple lines. + +YAML anchors. + + +CI/CD variables are supported. + +Example of after_script: + +job: + script: + - echo ""An example script section."" + after_script: + - echo ""Execute this command after the `script` section completes."" + + +Additional details: + +Scripts you specify in after_script execute in a new shell, separate from any +before_script or script commands. As a result, they: + + + Have the current working directory set back to the default (according to the variables which define how the runner processes Git requests). + Don’t have access to changes done by commands defined in the before_script or script, +including: + + Command aliases and variables exported in script scripts. + Changes outside of the working tree (depending on the runner executor), like +software installed by a before_script or script script. + + + Have a separate timeout. For GitLab Runner 16.4 and later, this defaults to 5 minutes, and can be configured with the +RUNNER_AFTER_SCRIPT_TIMEOUT variable. +In GitLab 16.3 and earlier, the timeout is hard-coded to 5 minutes. + Don’t affect the job’s exit code. If the script section succeeds and the +after_script times out or fails, the job exits with code 0 (Job Succeeded). + + +If a job times out or is cancelled, the after_script commands do not execute. +An issue exists to add support for executing after_script commands for timed-out or cancelled jobs. + +Related topics: + + + +Use after_script with default +to define a default array of commands that should run after all jobs. + You can ignore non-zero exit codes. + +Use color codes with after_script +to make job logs easier to review. + +Create custom collapsible sections +to simplify job log output. + + + +allow_failure + + +Use allow_failure to determine whether a pipeline should continue running when a job fails. + + + To let the pipeline continue running subsequent jobs, use allow_failure: true. + To stop the pipeline from running subsequent jobs, use allow_failure: false. + + +When jobs are allowed to fail (allow_failure: true) an orange warning ( ) +indicates that a job failed. However, the pipeline is successful and the associated commit +is marked as passed with no warnings. + +This same warning is displayed when: + + + All other jobs in the stage are successful. + All other jobs in the pipeline are successful. + + +The default value for allow_failure is: + + + +true for manual jobs. + +false for jobs that use when: manual inside rules. + +false in all other cases. + + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +true or false. + + +Example of allow_failure: + +job1: + stage: test + script: + - execute_script_1 + +job2: + stage: test + script: + - execute_script_2 + allow_failure: true + +job3: + stage: deploy + script: + - deploy_to_staging + environment: staging + + +In this example, job1 and job2 run in parallel: + + + If job1 fails, jobs in the deploy stage do not start. + If job2 fails, jobs in the deploy stage can still start. + + +Additional details: + + + You can use allow_failure as a subkey of rules. + If allow_failure: true is set, the job is always considered successful, and later jobs with when: on_failure don’t start if this job fails. + You can use allow_failure: false with a manual job to create a blocking manual job. +A blocked pipeline does not run any jobs in later stages until the manual job +is started and completes successfully. + + + +allow_failure:exit_codes + + + +History + + + + + +Introduced in GitLab 13.8. + +Feature flag removed in GitLab 13.9. + + + + + + +Use allow_failure:exit_codes to control when a job should be +allowed to fail. The job is allow_failure: true for any of the listed exit codes, +and allow_failure false for any other exit code. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + A single exit code. + An array of exit codes. + + +Example of allow_failure: + +test_job_1: + script: + - echo ""Run a script that results in exit code 1. This job fails."" + - exit 1 + allow_failure: + exit_codes: 137 + +test_job_2: + script: + - echo ""Run a script that results in exit code 137. This job is allowed to fail."" + - exit 137 + allow_failure: + exit_codes: + - 137 + - 255 + + + +artifacts + + +Use artifacts to specify which files to save as job artifacts. +Job artifacts are a list of files and directories that are +attached to the job when it succeeds, fails, or always. + +The artifacts are sent to GitLab after the job finishes. They are +available for download in the GitLab UI if the size is smaller than the +maximum artifact size. + +By default, jobs in later stages automatically download all the artifacts created +by jobs in earlier stages. You can control artifact download behavior in jobs with +dependencies. + +When using the needs keyword, jobs can only download +artifacts from the jobs defined in the needs configuration. + +Job artifacts are only collected for successful jobs by default, and +artifacts are restored after caches. + +Read more about artifacts. + + +artifacts:paths + + +Paths are relative to the project directory ($CI_PROJECT_DIR) and can’t directly +link outside it. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + An array of file paths, relative to the project directory. + You can use Wildcards that use glob +patterns and: + + In GitLab Runner 13.0 and later, +doublestar.Glob. + In GitLab Runner 12.10 and earlier, filepath.Match. + + + + +CI/CD variables are supported. + +Example of artifacts:paths: + +job: + artifacts: + paths: + - binaries/ + - .config + + +This example creates an artifact with .config and all the files in the binaries directory. + +Additional details: + + + If not used with artifacts:name, the artifacts file +is named artifacts, which becomes artifacts.zip when downloaded. + + +Related topics: + + + To restrict which jobs a specific job fetches artifacts from, see dependencies. + +Create job artifacts. + + + +artifacts:exclude + + + +History + + + + + +Introduced in GitLab 13.1 + Requires GitLab Runner 13.1 + + + + + + +Use artifacts:exclude to prevent files from being added to an artifacts archive. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + An array of file paths, relative to the project directory. + You can use Wildcards that use glob or +doublestar.PathMatch patterns. + + +Example of artifacts:exclude: + +artifacts: + paths: + - binaries/ + exclude: + - binaries/**/*.o + + +This example stores all files in binaries/, but not *.o files located in +subdirectories of binaries/. + +Additional details: + + + +artifacts:exclude paths are not searched recursively. + Files matched by artifacts:untracked can be excluded using +artifacts:exclude too. + + +Related topics: + + + +Exclude files from job artifacts. + + + +artifacts:expire_in + + + +History + + + + + +Introduced in GitLab 13.0 behind a disabled feature flag, the latest job artifacts are kept regardless of expiry time. + +Made default behavior in GitLab 13.4. + +Introduced in GitLab 13.8, keeping latest job artifacts can be disabled at the project level. + +Introduced in GitLab 13.9, keeping latest job artifacts can be disabled instance-wide. + + + + + + +Use expire_in to specify how long job artifacts are stored before +they expire and are deleted. The expire_in setting does not affect: + + + Artifacts from the latest job, unless keeping the latest job artifacts is disabled +at the project level. +or instance-wide. + + +After their expiry, artifacts are deleted hourly by default (using a cron job), and are not +accessible anymore. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: The expiry time. If no unit is provided, the time is in seconds. +Valid values include: + + + '42' + 42 seconds + 3 mins 4 sec + 2 hrs 20 min + 2h20min + 6 mos 1 day + 47 yrs 6 mos and 4d + 3 weeks and 2 days + never + + +Example of artifacts:expire_in: + +job: + artifacts: + expire_in: 1 week + + +Additional details: + + + The expiration time period begins when the artifact is uploaded and stored on GitLab. +If the expiry time is not defined, it defaults to the instance wide setting. + To override the expiration date and protect artifacts from being automatically deleted: + + Select Keep on the job page. + +In GitLab 13.3 and later, set the value of +expire_in to never. + + + If the expiry time is too short, jobs in later stages of a long pipeline might try to fetch +expired artifacts from earlier jobs. If the artifacts are expired, jobs that try to fetch +them fail with a could not retrieve the needed artifacts error. +Set the expiry time to be longer, or use dependencies in later jobs +to ensure they don’t try to fetch expired artifacts. + + + +artifacts:expose_as + + + +History + + + + + +Introduced in GitLab 12.5. + + + + + + +Use the artifacts:expose_as keyword to +expose job artifacts in the merge request UI. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + The name to display in the merge request UI for the artifacts download link. +Must be combined with artifacts:paths. + + +Example of artifacts:expose_as: + +test: + script: [""echo 'test' > file.txt""] + artifacts: + expose_as: 'artifact 1' + paths: ['file.txt'] + + +Additional details: + + + Artifacts are saved, but do not display in the UI if the artifacts:paths values: + + Use CI/CD variables. + Define a directory, but do not end with /. For example, directory/ works with artifacts:expose_as, +but directory does not. + Start with ./. For example, file works with artifacts:expose_as, but ./file does not. + + + A maximum of 10 job artifacts per merge request can be exposed. + Glob patterns are unsupported. + If a directory is specified and there is more than one file in the directory, +the link is to the job artifacts browser. + If GitLab Pages is enabled, GitLab automatically +renders the artifacts when the artifacts is a single file with one of these extensions: + + +.html or .htm + + .txt + .json + .xml + .log + + + + +Related topics: + + + +Expose job artifacts in the merge request UI. + + + +artifacts:name + + +Use the artifacts:name keyword to define the name of the created artifacts +archive. You can specify a unique name for every archive. + +If not defined, the default name is artifacts, which becomes artifacts.zip when downloaded. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + The name of the artifacts archive. CI/CD variables are supported. +Must be combined with artifacts:paths. + + +Example of artifacts:name: + +To create an archive with a name of the current job: + +job: + artifacts: + name: ""job1-artifacts-file"" + paths: + - binaries/ + + +Related topics: + + + +Use CI/CD variables to define the artifacts name. + + + +artifacts:public + + + +History + + + + + +Introduced in GitLab 13.8 with a flag named non_public_artifacts, disabled by default. + +Updated in GitLab 15.10. Artifacts created with artifacts:public before 15.10 are not guaranteed to remain private after this update. + +Generally available in GitLab 16.7. Feature flag non_public_artifacts removed. + + + + + + +Use artifacts:public to determine whether the job artifacts should be +publicly available. + +When artifacts:public is true (default), the artifacts in +public pipelines are available for download by anonymous, guest, and reporter users. + +To deny read access to artifacts in public +pipelines for anonymous, guest, and reporter users, set artifacts:public to false: + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + +true (default if not defined) or false. + + +Example of artifacts:public: + +job: + artifacts: + public: false + + + +artifacts:reports + + +Use artifacts:reports to collect artifacts generated by +included templates in jobs. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + See list of available artifacts reports types. + + +Example of artifacts:reports: + +rspec: + stage: test + script: + - bundle install + - rspec --format RspecJunitFormatter --out rspec.xml + artifacts: + reports: + junit: rspec.xml + + +Additional details: + + + Combining reports in parent pipelines using artifacts from child pipelines is +not supported. Track progress on adding support in this issue. + To be able to browse the report output files, include the artifacts:paths keyword. This uploads and stores the artifact twice. + Artifacts created for artifacts: reports are always uploaded, regardless of the job results (success or failure). +You can use artifacts:expire_in to set an expiration +date for the artifacts. + + + +artifacts:untracked + + +Use artifacts:untracked to add all Git untracked files as artifacts (along +with the paths defined in artifacts:paths). artifacts:untracked ignores configuration +in the repository’s .gitignore, so matching artifacts in .gitignore are included. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + +true or false (default if not defined). + + +Example of artifacts:untracked: + +Save all Git untracked files: + +job: + artifacts: + untracked: true + + +Related topics: + + + +Add untracked files to artifacts. + + + +artifacts:when + + +Use artifacts:when to upload artifacts on job failure or despite the +failure. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + +on_success (default): Upload artifacts only when the job succeeds. + +on_failure: Upload artifacts only when the job fails. + +always: Always upload artifacts (except when jobs time out). For example, when +uploading artifacts +required to troubleshoot failing tests. + + +Example of artifacts:when: + +job: + artifacts: + when: on_failure + + +Additional details: + + + The artifacts created for artifacts:reports are always uploaded, +regardless of the job results (success or failure). artifacts:when does not change this behavior. + + + +before_script + + +Use before_script to define an array of commands that should run before each job’s +script commands, but after artifacts are restored. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: An array including: + + + Single line commands. + Long commands split over multiple lines. + +YAML anchors. + + +CI/CD variables are supported. + +Example of before_script: + +job: + before_script: + - echo ""Execute this command before any 'script:' commands."" + script: + - echo ""This command executes after the job's 'before_script' commands."" + + +Additional details: + + + Scripts you specify in before_script are concatenated with any scripts you specify +in the main script. The combined scripts execute together in a single shell. + Using before_script at the top level, but not in the default section, is deprecated. + + +Related topics: + + + +Use before_script with default +to define a default array of commands that should run before the script commands in all jobs. + You can ignore non-zero exit codes. + +Use color codes with before_script +to make job logs easier to review. + +Create custom collapsible sections +to simplify job log output. + + + +cache + + + +History + + + + + +Introduced in GitLab 15.0, caches are not shared between protected and unprotected branches. + + + + + + +Use cache to specify a list of files and directories to +cache between jobs. You can only use paths that are in the local working copy. + +Caches are: + + + Shared between pipelines and jobs. + By default, not shared between protected and unprotected branches. + Restored before artifacts. + Limited to a maximum of four different caches. + + +You can disable caching for specific jobs, +for example to override: + + + A default cache defined with default. + The configuration for a job added with include. + + +For more information about caches, see Caching in GitLab CI/CD. + + +cache:paths + + +Use the cache:paths keyword to choose which files or directories to cache. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + An array of paths relative to the project directory ($CI_PROJECT_DIR). +You can use wildcards that use glob +patterns: + + In GitLab Runner 13.0 and later, +doublestar.Glob. + In GitLab Runner 12.10 and earlier, +filepath.Match. + + + + +Example of cache:paths: + +Cache all files in binaries that end in .apk and the .config file: + +rspec: + script: + - echo ""This job uses a cache."" + cache: + key: binaries-cache + paths: + - binaries/*.apk + - .config + + +Additional details: + + + The cache:paths keyword includes files even if they are untracked or in your .gitignore file. + + +Related topics: + + + See the common cache use cases for more +cache:paths examples. + + + +cache:key + + +Use the cache:key keyword to give each cache a unique identifying key. All jobs +that use the same cache key use the same cache, including in different pipelines. + +If not set, the default key is default. All jobs with the cache keyword but +no cache:key share the default cache. + +Must be used with cache: paths, or nothing is cached. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + A string. + A predefined CI/CD variable. + A combination of both. + + +Example of cache:key: + +cache-job: + script: + - echo ""This job uses a cache."" + cache: + key: binaries-cache-$CI_COMMIT_REF_SLUG + paths: + - binaries/ + + +Additional details: + + + If you use Windows Batch to run your shell scripts you must replace +$ with %. For example: key: %CI_COMMIT_REF_SLUG% + + + The cache:key value can’t contain: + + + The / character, or the equivalent URI-encoded %2F. + Only the . character (any number), or the equivalent URI-encoded %2E. + + + The cache is shared between jobs, so if you’re using different +paths for different jobs, you should also set a different cache:key. +Otherwise cache content can be overwritten. + + +Related topics: + + + You can specify a fallback cache key +to use if the specified cache:key is not found. + You can use multiple cache keys in a single job. + See the common cache use cases for more +cache:key examples. + + + +cache:key:files + + + +History + + + + + +Introduced in GitLab 12.5. + + + + + + +Use the cache:key:files keyword to generate a new key when one or two specific files +change. cache:key:files lets you reuse some caches, and rebuild them less often, +which speeds up subsequent pipeline runs. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + An array of one or two file paths. + + +CI/CD variables are not supported. + +Example of cache:key:files: + +cache-job: + script: + - echo ""This job uses a cache."" + cache: + key: + files: + - Gemfile.lock + - package.json + paths: + - vendor/ruby + - node_modules + + +This example creates a cache for Ruby and Node.js dependencies. The cache +is tied to the current versions of the Gemfile.lock and package.json files. When one of +these files changes, a new cache key is computed and a new cache is created. Any future +job runs that use the same Gemfile.lock and package.json with cache:key:files +use the new cache, instead of rebuilding the dependencies. + +Additional details: + + + The cache key is a SHA computed from the most recent commits +that changed each listed file. +If neither file is changed in any commits, the fallback key is default. + + + +cache:key:prefix + + + +History + + + + + +Introduced in GitLab 12.5. + + + + + + +Use cache:key:prefix to combine a prefix with the SHA computed for cache:key:files. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + A string + A predefined variables + + A combination of both. + + +Example of cache:key:prefix: + +rspec: + script: + - echo ""This rspec job uses a cache."" + cache: + key: + files: + - Gemfile.lock + prefix: $CI_JOB_NAME + paths: + - vendor/ruby + + +For example, adding a prefix of $CI_JOB_NAME causes the key to look like rspec-feef9576d21ee9b6a32e30c5c79d0a0ceb68d1e5. +If a branch changes Gemfile.lock, that branch has a new SHA checksum for cache:key:files. +A new cache key is generated, and a new cache is created for that key. If Gemfile.lock +is not found, the prefix is added to default, so the key in the example would be rspec-default. + +Additional details: + + + If no file in cache:key:files is changed in any commits, the prefix is added to the default key. + + + +cache:untracked + + +Use untracked: true to cache all files that are untracked in your Git repository. +Untracked files include files that are: + + + Ignored due to .gitignore configuration. + Created, but not added to the checkout with git add. + + +Caching untracked files can create unexpectedly large caches if the job downloads: + + + Dependencies, like gems or node modules, which are usually untracked. + +Artifacts from a different job. Files extracted from the artifacts are untracked by default. + + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + +true or false (default). + + +Example of cache:untracked: + +rspec: + script: test + cache: + untracked: true + + +Additional details: + + + + You can combine cache:untracked with cache:paths to cache all untracked files, as well as files in the configured paths. +Use cache:paths to cache any specific files, including tracked files, or files that are outside of the working directory, +and use cache: untracked to also cache all untracked files. For example: + + +rspec: + script: test + cache: + untracked: true + paths: + - binaries/ + + + In this example, the job caches all untracked files in the repository, as well as all the files in binaries/. +If there are untracked files in binaries/, they are covered by both keywords. + + + + +cache:unprotect + + + +History + + + + + +Introduced in GitLab 15.8. + + + + + + +Use cache:unprotect to set a cache to be shared between protected +and unprotected branches. + + + caution When set to true, users without access to protected branches can read and write to +cache keys used by protected branches. + + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + +true or false (default). + + +Example of cache:unprotect: + +rspec: + script: test + cache: + unprotect: true + + + +cache:when + + + +History + + + + + +Introduced in GitLab 13.5 and GitLab Runner v13.5.0. + + + + + + +Use cache:when to define when to save the cache, based on the status of the job. + +Must be used with cache: paths, or nothing is cached. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + +on_success (default): Save the cache only when the job succeeds. + +on_failure: Save the cache only when the job fails. + +always: Always save the cache. + + +Example of cache:when: + +rspec: + script: rspec + cache: + paths: + - rspec/ + when: 'always' + + +This example stores the cache whether or not the job fails or succeeds. + + +cache:policy + + +To change the upload and download behavior of a cache, use the cache:policy keyword. +By default, the job downloads the cache when the job starts, and uploads changes +to the cache when the job ends. This caching style is the pull-push policy (default). + +To set a job to only download the cache when the job starts, but never upload changes +when the job finishes, use cache:policy:pull. + +To set a job to only upload a cache when the job finishes, but never download the +cache when the job starts, use cache:policy:push. + +Use the pull policy when you have many jobs executing in parallel that use the same cache. +This policy speeds up job execution and reduces load on the cache server. You can +use a job with the push policy to build the cache. + +Must be used with cache: paths, or nothing is cached. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + pull + push + +pull-push (default) + +CI/CD variables. + + +Example of cache:policy: + +prepare-dependencies-job: + stage: build + cache: + key: gems + paths: + - vendor/bundle + policy: push + script: + - echo ""This job only downloads dependencies and builds the cache."" + - echo ""Downloading dependencies..."" + +faster-test-job: + stage: test + cache: + key: gems + paths: + - vendor/bundle + policy: pull + script: + - echo ""This job script uses the cache, but does not update it."" + - echo ""Running tests..."" + + +Related topics: + + + You can use a variable to control a job’s cache policy. + + + +cache:fallback_keys + + +Use cache:fallback_keys to specify a list of keys to try to restore cache from +if there is no cache found for the cache:key. Caches are retrieved in the order specified +in the fallback_keys section. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + An array of cache keys + + +Example of cache:fallback_keys: + +rspec: + script: rspec + cache: + key: gems-$CI_COMMIT_REF_SLUG + paths: + - rspec/ + fallback_keys: + - gems + when: 'always' + + + +coverage + + +Use coverage with a custom regular expression to configure how code coverage +is extracted from the job output. The coverage is shown in the UI if at least one +line in the job output matches the regular expression. + +To extract the code coverage value from the match, GitLab uses +this smaller regular expression: \d+(?:\.\d+)?. + +Possible inputs: + + + An RE2 regular expression. Must start and end with /. Must match the coverage number. +May match surrounding text as well, so you don’t need to use a regular expression character group +to capture the exact number. +Because it uses RE2 syntax, all groups must be non-capturing. + + +Example of coverage: + +job1: + script: rspec + coverage: '/Code coverage: \d+(?:\.\d+)?/' + + +In this example: + + + GitLab checks the job log for a match with the regular expression. A line +like Code coverage: 67.89% of lines covered would match. + GitLab then checks the matched fragment to find a match to \d+(?:\.\d+)?. +The sample matching line above gives a code coverage of 67.89. + + +Additional details: + + + You can find parse examples in Code Coverage. + If there is more than one matched line in the job output, the last line is used +(the first result of reverse search). + If there are multiple matches in a single line, the last match is searched +for the coverage number. + If there are multiple coverage numbers found in the matched fragment, the first number is used. + Leading zeros are removed. + Coverage output from child pipelines +is not recorded or displayed. Check the related issue +for more details. + + + +dast_configuration + + + +Tier: Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 14.1. + + + + + + +Use the dast_configuration keyword to specify a site profile and scanner profile to be used in a +CI/CD configuration. Both profiles must first have been created in the project. The job’s stage must +be dast. + +Keyword type: Job keyword. You can use only as part of a job. + +Possible inputs: One each of site_profile and scanner_profile. + + + Use site_profile to specify the site profile to be used in the job. + Use scanner_profile to specify the scanner profile to be used in the job. + + +Example of dast_configuration: + +stages: + - build + - dast + +include: + - template: DAST.gitlab-ci.yml + +dast: + dast_configuration: + site_profile: ""Example Co"" + scanner_profile: ""Quick Passive Test"" + + +In this example, the dast job extends the dast configuration added with the include keyword +to select a specific site profile and scanner profile. + +Additional details: + + + Settings contained in either a site profile or scanner profile take precedence over those +contained in the DAST template. + + +Related topics: + + + +Site profile. + +Scanner profile. + + + +dependencies + + +Use the dependencies keyword to define a list of specific jobs to fetch artifacts +from. The specified jobs must all be in earlier stages. You can also set a job to download no artifacts at all. + +When dependencies is not defined in a job, all jobs in earlier stages are considered dependent +and the job fetches all artifacts from those jobs. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + The names of jobs to fetch artifacts from. + An empty array ([]), to configure the job to not download any artifacts. + + +Example of dependencies: + +build osx: + stage: build + script: make build:osx + artifacts: + paths: + - binaries/ + +build linux: + stage: build + script: make build:linux + artifacts: + paths: + - binaries/ + +test osx: + stage: test + script: make test:osx + dependencies: + - build osx + +test linux: + stage: test + script: make test:linux + dependencies: + - build linux + +deploy: + stage: deploy + script: make deploy + environment: production + + +In this example, two jobs have artifacts: build osx and build linux. When test osx is executed, +the artifacts from build osx are downloaded and extracted in the context of the build. +The same thing happens for test linux and artifacts from build linux. + +The deploy job downloads artifacts from all previous jobs because of +the stage precedence. + +Additional details: + + + The job status does not matter. If a job fails or it’s a manual job that isn’t triggered, no error occurs. + If the artifacts of a dependent job are expired or +deleted, then the job fails. + To fetch artifacts from a job in the same stage, you must use needs:artifacts. +You should not combine dependencies with needs in the same job. + + + +environment + + +Use environment to define the environment that a job deploys to. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: The name of the environment the job deploys to, in one of these +formats: + + + Plain text, including letters, digits, spaces, and these characters: -, _, /, $, {, }. + CI/CD variables, including predefined, project, group, instance, or variables defined in the +.gitlab-ci.yml file. You can’t use variables defined in a script section. + + +Example of environment: + +deploy to production: + stage: deploy + script: git push production HEAD:main + environment: production + + +Additional details: + + + If you specify an environment and no environment with that name exists, an environment is +created. + + + +environment:name + + +Set a name for an environment. + +Common environment names are qa, staging, and production, but you can use any name. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: The name of the environment the job deploys to, in one of these +formats: + + + Plain text, including letters, digits, spaces, and these characters: -, _, /, $, {, }. + +CI/CD variables, +including predefined, project, group, instance, or variables defined in the +.gitlab-ci.yml file. You can’t use variables defined in a script section. + + +Example of environment:name: + +deploy to production: + stage: deploy + script: git push production HEAD:main + environment: + name: production + + + +environment:url + + +Set a URL for an environment. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: A single URL, in one of these formats: + + + Plain text, like https://prod.example.com. + +CI/CD variables, +including predefined, project, group, instance, or variables defined in the +.gitlab-ci.yml file. You can’t use variables defined in a script section. + + +Example of environment:url: + +deploy to production: + stage: deploy + script: git push production HEAD:main + environment: + name: production + url: https://prod.example.com + + +Additional details: + + + After the job completes, you can access the URL by selecting a button in the merge request, +environment, or deployment pages. + + + +environment:on_stop + + +Closing (stopping) environments can be achieved with the on_stop keyword +defined under environment. It declares a different job that runs to close the +environment. + +Keyword type: Job keyword. You can use it only as part of a job. + +Additional details: + + + See environment:action for more details and an example. + + + +environment:action + + +Use the action keyword to specify how the job interacts with the environment. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: One of the following keywords: + + + + + Value + Description + + + + + start + Default value. Indicates that the job starts the environment. The deployment is created after the job starts. + + + prepare + Indicates that the job is only preparing the environment. It does not trigger deployments. Read more about preparing environments. + + + stop + Indicates that the job stops an environment. Read more about stopping an environment. + + + verify + Indicates that the job is only verifying the environment. It does not trigger deployments. Read more about verifying environments. + + + access + Indicates that the job is only accessing the environment. It does not trigger deployments. Read more about accessing environments. + + + + +Example of environment:action: + +stop_review_app: + stage: deploy + variables: + GIT_STRATEGY: none + script: make delete-app + when: manual + environment: + name: review/$CI_COMMIT_REF_SLUG + action: stop + + + +environment:auto_stop_in + + + +History + + + + + CI/CD variable support introduced in GitLab 15.4. + + + + + + +The auto_stop_in keyword specifies the lifetime of the environment. When an environment expires, GitLab +automatically stops it. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: A period of time written in natural language. For example, +these are all equivalent: + + + 168 hours + 7 days + one week + never + + +CI/CD variables are supported. + +Example of environment:auto_stop_in: + +review_app: + script: deploy-review-app + environment: + name: review/$CI_COMMIT_REF_SLUG + auto_stop_in: 1 day + + +When the environment for review_app is created, the environment’s lifetime is set to 1 day. +Every time the review app is deployed, that lifetime is also reset to 1 day. + +Related topics: + + + +Environments auto-stop documentation. + + + +environment:kubernetes + + + +History + + + + + +Introduced in GitLab 12.6. + + + + + + +Use the kubernetes keyword to configure deployments to a +Kubernetes cluster that is associated with your project. + +Keyword type: Job keyword. You can use it only as part of a job. + +Example of environment:kubernetes: + +deploy: + stage: deploy + script: make deploy-app + environment: + name: production + kubernetes: + namespace: production + + +This configuration sets up the deploy job to deploy to the production +environment, using the production +Kubernetes namespace. + +Additional details: + + + Kubernetes configuration is not supported for Kubernetes clusters +managed by GitLab. + + +Related topics: + + + +Available settings for kubernetes. + + + +environment:deployment_tier + + + +History + + + + + +Introduced in GitLab 13.10. + + + + + + +Use the deployment_tier keyword to specify the tier of the deployment environment. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: One of the following: + + + production + staging + testing + development + other + + +Example of environment:deployment_tier: + +deploy: + script: echo + environment: + name: customer-portal + deployment_tier: production + + +Additional details: + + + Environments created from this job definition are assigned a tier based on this value. + Existing environments don’t have their tier updated if this value is added later. Existing environments must have their tier updated via the Environments API. + + +Related topics: + + + +Deployment tier of environments. + + +Dynamic environments + + +Use CI/CD variables to dynamically name environments. + +For example: + +deploy as review app: + stage: deploy + script: make deploy + environment: + name: review/$CI_COMMIT_REF_SLUG + url: https://$CI_ENVIRONMENT_SLUG.example.com/ + + +The deploy as review app job is marked as a deployment to dynamically +create the review/$CI_COMMIT_REF_SLUG environment. $CI_COMMIT_REF_SLUG +is a CI/CD variable set by the runner. The +$CI_ENVIRONMENT_SLUG variable is based on the environment name, but suitable +for inclusion in URLs. If the deploy as review app job runs in a branch named +pow, this environment would be accessible with a URL like https://review-pow.example.com/. + +The common use case is to create dynamic environments for branches and use them +as Review Apps. You can see an example that uses Review Apps at +https://gitlab.com/gitlab-examples/review-apps-nginx/. + + +extends + + +Use extends to reuse configuration sections. It’s an alternative to YAML anchors +and is a little more flexible and readable. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + The name of another job in the pipeline. + A list (array) of names of other jobs in the pipeline. + + +Example of extends: + +.tests: + script: rake test + stage: test + only: + refs: + - branches + +rspec: + extends: .tests + script: rake rspec + only: + variables: + - $RSPEC + + +In this example, the rspec job uses the configuration from the .tests template job. +When creating the pipeline, GitLab: + + + Performs a reverse deep merge based on the keys. + Merges the .tests content with the rspec job. + Doesn’t merge the values of the keys. + + +The result is this rspec job: + +rspec: + script: rake rspec + stage: test + only: + refs: + - branches + variables: + - $RSPEC + + +Additional details: + + + In GitLab 12.0 and later, you can use multiple parents for extends. + The extends keyword supports up to eleven levels of inheritance, but you should +avoid using more than three levels. + In the example above, .tests is a hidden job, +but you can extend configuration from regular jobs as well. + + +Related topics: + + + +Reuse configuration sections by using extends. + Use extends to reuse configuration from included configuration files. + + + +hooks + + + +History + + + + + +Introduced in GitLab 15.6 with a flag named ci_hooks_pre_get_sources_script. Disabled by default. + +Generally available in GitLab 15.10. Feature flag ci_hooks_pre_get_sources_script removed. + + + + + + +Use hooks to specify lists of commands to execute on the runner +at certain stages of job execution, like before retrieving the Git repository. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + A hash of hooks and their commands. Available hooks: pre_get_sources_script. + + + +hooks:pre_get_sources_script + + + +History + + + + + +Introduced in GitLab 15.6 with a flag named ci_hooks_pre_get_sources_script. Disabled by default. + +Generally available in GitLab 15.10. Feature flag ci_hooks_pre_get_sources_script removed. + + + + + + +Use hooks:pre_get_sources_script to specify a list of commands to execute on the runner +before cloning the Git repository and any submodules. +You can use it for example to: + + + Adjust the Git configuration. + Export tracing variables. + + +Possible inputs: An array including: + + + Single line commands. + Long commands split over multiple lines. + +YAML anchors. + + +CI/CD variables are supported. + +Example of hooks:pre_get_sources_script: + +job1: + hooks: + pre_get_sources_script: + - echo 'hello job1 pre_get_sources_script' + script: echo 'hello job1 script' + + +Related topics: + + + GitLab Runner configuration + + + +identity + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com +Status: Beta + + +History + + + + + +Introduced in GitLab 16.9 with a flag named google_cloud_support_feature_flag. This feature is in Beta. + + + + + + + + On GitLab.com, this feature is available for a subset of users. On GitLab Dedicated, this feature is not available. + + +This feature is in Beta. +To join the list of users testing this feature, join the waitlist. + +Use identity to authenticate with third party services using identity federation. + +Keyword type: Job keyword. You can use it only as part of a job or in the default: section. + +Possible inputs: An identifier. Supported providers: + + + +google_cloud: Google Cloud. Must be configured with the Google Cloud IAM integration. + + +Example of identity: + +job_with_workload_identity: + identity: google_cloud + script: + - gcloud compute instances list + + +Related topics: + + + +Workload Identity Federation. + +Google Cloud IAM integration. + + + +id_tokens + + + +History + + + + + +Introduced in GitLab 15.7. + + + + + + +Use id_tokens to create JSON web tokens (JWT) to authenticate with third party services. All +JWTs created this way support OIDC authentication. The required aud sub-keyword is used to configure the aud claim for the JWT. + +Possible inputs: + + + Token names with their aud claims. aud supports: + + A single string. + An array of strings. + +CI/CD variables. + + + + +Example of id_tokens: + +job_with_id_tokens: + id_tokens: + ID_TOKEN_1: + aud: https://vault.example.com + ID_TOKEN_2: + aud: + - https://gcp.com + - https://aws.com + SIGSTORE_ID_TOKEN: + aud: sigstore + script: + - command_to_authenticate_with_vault $ID_TOKEN_1 + - command_to_authenticate_with_aws $ID_TOKEN_2 + - command_to_authenticate_with_gcp $ID_TOKEN_2 + + +Related topics: + + + +Keyless signing with Sigstore. + + + +image + + +Use image to specify a Docker image that the job runs in. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: The name of the image, including the registry path if needed, in one of these formats: + + + + (Same as using with the latest tag) + : + @ + + +CI/CD variables are supported. + +Example of image: + +default: + image: ruby:3.0 + +rspec: + script: bundle exec rspec + +rspec 2.7: + image: registry.example.com/my-group/my-project/ruby:2.7 + script: bundle exec rspec + + +In this example, the ruby:3.0 image is the default for all jobs in the pipeline. +The rspec 2.7 job does not use the default, because it overrides the default with +a job-specific image section. + +Related topics: + + + +Run your CI/CD jobs in Docker containers. + + + +image:name + + +The name of the Docker image that the job runs in. Similar to image used by itself. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: The name of the image, including the registry path if needed, in one of these formats: + + + + (Same as using with the latest tag) + : + @ + + +CI/CD variables are supported. + +Example of image:name: + +test-job: + image: + name: ""registry.example.com/my/image:latest"" + script: echo ""Hello world"" + + +Related topics: + + + +Run your CI/CD jobs in Docker containers. + + + +image:entrypoint + + +Command or script to execute as the container’s entry point. + +When the Docker container is created, the entrypoint is translated to the Docker --entrypoint option. +The syntax is similar to the Dockerfile ENTRYPOINT directive, +where each shell token is a separate string in the array. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + A string. + + +Example of image:entrypoint: + +test-job: + image: + name: super/sql:experimental + entrypoint: [""""] + script: echo ""Hello world"" + + +Related topics: + + + +Override the entrypoint of an image. + + + +image:docker + + + +History + + + + + +Introduced in GitLab 16.7. Requires GitLab Runner 16.7 or later. + +user input option introduced in GitLab 16.8. + + + + + + +Use image:docker to pass options to the Docker executor of a GitLab Runner. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + +A hash of options for the Docker executor, which can include: + + + +platform: Selects the architecture of the image to pull. When not specified, +the default is the same platform as the host runner. + +user: Specify the username or UID to use when running the container. + + +Example of image:docker: + +arm-sql-job: + script: echo ""Run sql tests"" + image: + name: super/sql:experimental + docker: + platform: arm64/v8 + user: dave + + +Additional details: + + + +image:docker:platform maps to the docker pull --platform option. + +image:docker:user maps to the docker run --user option. + + + +image:pull_policy + + + +History + + + + + +Introduced in GitLab 15.1 with a flag named ci_docker_image_pull_policy. Disabled by default. + +Enabled on GitLab.com and self-managed in GitLab 15.2. + +Generally available in GitLab 15.4. Feature flag ci_docker_image_pull_policy removed. + Requires GitLab Runner 15.1 or later. + + + + + + +The pull policy that the runner uses to fetch the Docker image. + +Keyword type: Job keyword. You can use it only as part of a job or in the default section. + +Possible inputs: + + + A single pull policy, or multiple pull policies in an array. +Can be always, if-not-present, or never. + + +Examples of image:pull_policy: + +job1: + script: echo ""A single pull policy."" + image: + name: ruby:3.0 + pull_policy: if-not-present + +job2: + script: echo ""Multiple pull policies."" + image: + name: ruby:3.0 + pull_policy: [always, if-not-present] + + +Additional details: + + + If the runner does not support the defined pull policy, the job fails with an error similar to: +ERROR: Job failed (system failure): the configured PullPolicies ([always]) are not allowed by AllowedPullPolicies ([never]). + + +Related topics: + + + +Run your CI/CD jobs in Docker containers. + +Configure how runners pull images. + +Set multiple pull policies. + + + +inherit + + + +History + + + + + +Introduced in GitLab 12.9. + + + + + + +Use inherit to control inheritance of default keywords and variables. + + +inherit:default + + +Use inherit:default to control the inheritance of default keywords. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +true (default) or false to enable or disable the inheritance of all default keywords. + A list of specific default keywords to inherit. + + +Example of inherit:default: + +default: + retry: 2 + image: ruby:3.0 + interruptible: true + +job1: + script: echo ""This job does not inherit any default keywords."" + inherit: + default: false + +job2: + script: echo ""This job inherits only the two listed default keywords. It does not inherit 'interruptible'."" + inherit: + default: + - retry + - image + + +Additional details: + + + You can also list default keywords to inherit on one line: default: [keyword1, keyword2] + + + + +inherit:variables + + +Use inherit:variables to control the inheritance of global variables keywords. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +true (default) or false to enable or disable the inheritance of all global variables. + A list of specific variables to inherit. + + +Example of inherit:variables: + +variables: + VARIABLE1: ""This is variable 1"" + VARIABLE2: ""This is variable 2"" + VARIABLE3: ""This is variable 3"" + +job1: + script: echo ""This job does not inherit any global variables."" + inherit: + variables: false + +job2: + script: echo ""This job inherits only the two listed global variables. It does not inherit 'VARIABLE3'."" + inherit: + variables: + - VARIABLE1 + - VARIABLE2 + + +Additional details: + + + You can also list global variables to inherit on one line: variables: [VARIABLE1, VARIABLE2] + + + + +interruptible + + + +History + + + + + +Introduced in GitLab 12.3. + Support for trigger jobs introduced in GitLab 16.8. + + + + + + +Use interruptible to configure the auto-cancel redundant pipelines +feature to cancel a job before it completes if a new pipeline on the same ref starts for a newer commit. If the feature +is disabled, the keyword has no effect. + +Running jobs are only cancelled when the jobs are configured with interruptible: true and: + + + No jobs configured with interruptible: false have started at any time. +After a job with interruptible: false starts, the entire pipeline is no longer +considered interruptible. + + If the pipeline triggered a downstream pipeline, but no job with interruptible: false +in the downstream pipeline has started yet, the downstream pipeline is also cancelled. + + + The new pipeline is for a commit with new changes. The Auto-cancel redundant pipelines +feature has no effect if you select Run pipeline in the UI to run a pipeline for the same commit. + + +A job that has not started yet is always considered interruptible: true, regardless of the job’s configuration. +The interruptible configuration is only considered after the job starts. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + +true or false (default). + + +Example of interruptible: + +stages: + - stage1 + - stage2 + - stage3 + +step-1: + stage: stage1 + script: + - echo ""Can be canceled."" + interruptible: true + +step-2: + stage: stage2 + script: + - echo ""Can not be canceled."" + +step-3: + stage: stage3 + script: + - echo ""Because step-2 can not be canceled, this step can never be canceled, even though it's set as interruptible."" + interruptible: true + + +In this example, a new pipeline causes a running pipeline to be: + + + Canceled, if only step-1 is running or pending. + Not canceled, after step-2 starts. + + +Additional details: + + + Only set interruptible: true if the job can be safely canceled after it has started, +like a build job. Deployment jobs usually shouldn’t be cancelled, to prevent partial deployments. + You can add an optional manual job with interruptible: false in the first stage of +a pipeline to allow users to manually prevent a pipeline from being automatically +cancelled. After a user starts the job, the pipeline cannot be canceled by the +Auto-cancel redundant pipelines feature. + When using interruptible with a trigger job: + + The triggered downstream pipeline is never affected by the trigger job’s interruptible configuration. + If workflow:auto_cancel is set to conservative, +the trigger job’s interruptible configuration has no effect. + If workflow:auto_cancel is set to interruptible, +a trigger job with interruptible: true can be automatically cancelled. + + + + + +needs + + + +History + + + + + +Introduced in GitLab 12.2. + In GitLab 12.3, maximum number of jobs in needs array raised from five to 50. + +Introduced in GitLab 12.8, needs: [] lets jobs start immediately. + +Introduced in GitLab 14.2, you can refer to jobs in the same stage as the job you are configuring. + + + + + + +Use needs to execute jobs out-of-order. Relationships between jobs +that use needs can be visualized as a directed acyclic graph. + +You can ignore stage ordering and run some jobs without waiting for others to complete. +Jobs in multiple stages can run concurrently. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + An array of jobs. + An empty array ([]), to set the job to start as soon as the pipeline is created. + + +Example of needs: + +linux:build: + stage: build + script: echo ""Building linux..."" + +mac:build: + stage: build + script: echo ""Building mac..."" + +lint: + stage: test + needs: [] + script: echo ""Linting..."" + +linux:rspec: + stage: test + needs: [""linux:build""] + script: echo ""Running rspec on linux..."" + +mac:rspec: + stage: test + needs: [""mac:build""] + script: echo ""Running rspec on mac..."" + +production: + stage: deploy + script: echo ""Running production..."" + environment: production + + +This example creates four paths of execution: + + + Linter: The lint job runs immediately without waiting for the build stage +to complete because it has no needs (needs: []). + Linux path: The linux:rspec job runs as soon as the linux:build +job finishes, without waiting for mac:build to finish. + macOS path: The mac:rspec jobs runs as soon as the mac:build +job finishes, without waiting for linux:build to finish. + The production job runs as soon as all previous jobs finish: +linux:build, linux:rspec, mac:build, mac:rspec. + + +Additional details: + + + The maximum number of jobs that a single job can have in the needs array is limited: + + For GitLab.com, the limit is 50. For more information, see +issue 350398. + For self-managed instances, the default limit is 50. This limit can be changed. + + + If needs refers to a job that uses the parallel keyword, +it depends on all jobs created in parallel, not just one job. It also downloads +artifacts from all the parallel jobs by default. If the artifacts have the same +name, they overwrite each other and only the last one downloaded is saved. + + To have needs refer to a subset of parallelized jobs (and not all of the parallelized jobs), +use the needs:parallel:matrix keyword. + + + In GitLab 14.1 and later you +can refer to jobs in the same stage as the job you are configuring. This feature is +enabled on GitLab.com and ready for production use. On self-managed GitLab 14.2 and later +this feature is available by default. + In GitLab 14.0 and earlier, you can only refer to jobs in earlier stages. Stages must be +explicitly defined for all jobs that use the needs keyword, or are referenced +in a job’s needs section. + If needs refers to a job that might not be added to +a pipeline because of only, except, or rules, the pipeline might fail to create. Use the needs:optional keyword to resolve a failed pipeline creation. + If a pipeline has jobs with needs: [] and jobs in the .pre stage, they will +all start as soon as the pipeline is created. Jobs with needs: [] start immediately, +and jobs in the .pre stage also start immediately. + + + +needs:artifacts + + + +History + + + + + +Introduced in GitLab 12.6. + + + + + + +When a job uses needs, it no longer downloads all artifacts from previous stages +by default, because jobs with needs can start before earlier stages complete. With +needs you can only download artifacts from the jobs listed in the needs configuration. + +Use artifacts: true (default) or artifacts: false to control when artifacts are +downloaded in jobs that use needs. + +Keyword type: Job keyword. You can use it only as part of a job. Must be used with needs:job. + +Possible inputs: + + + +true (default) or false. + + +Example of needs:artifacts: + +test-job1: + stage: test + needs: + - job: build_job1 + artifacts: true + +test-job2: + stage: test + needs: + - job: build_job2 + artifacts: false + +test-job3: + needs: + - job: build_job1 + artifacts: true + - job: build_job2 + - build_job3 + + +In this example: + + + The test-job1 job downloads the build_job1 artifacts + The test-job2 job does not download the build_job2 artifacts. + The test-job3 job downloads the artifacts from all three build_jobs, because +artifacts is true, or defaults to true, for all three needed jobs. + + +Additional details: + + + You should not combine needs with dependencies in the same job. + + + +needs:project + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 12.7. + + + + + + +Use needs:project to download artifacts from up to five jobs in other pipelines. +The artifacts are downloaded from the latest successful specified job for the specified ref. +To specify multiple jobs, add each as separate array items under the needs keyword. + +If there is a pipeline running for the ref, a job with needs:project +does not wait for the pipeline to complete. Instead, the artifacts are downloaded +from the latest successful run of the specified job. + +needs:project must be used with job, ref, and artifacts. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +needs:project: A full project path, including namespace and group. + +job: The job to download artifacts from. + +ref: The ref to download artifacts from. + +artifacts: Must be true to download artifacts. + + +Examples of needs:project: + +build_job: + stage: build + script: + - ls -lhR + needs: + - project: namespace/group/project-name + job: build-1 + ref: main + artifacts: true + - project: namespace/group/project-name-2 + job: build-2 + ref: main + artifacts: true + + +In this example, build_job downloads the artifacts from the latest successful build-1 and build-2 jobs +on the main branches in the group/project-name and group/project-name-2 projects. + +In GitLab 13.3 and later, you can use CI/CD variables +in needs:project, for example: + +build_job: + stage: build + script: + - ls -lhR + needs: + - project: $CI_PROJECT_PATH + job: $DEPENDENCY_JOB_NAME + ref: $ARTIFACTS_DOWNLOAD_REF + artifacts: true + + +Additional details: + + + To download artifacts from a different pipeline in the current project, set project +to be the same as the current project, but use a different ref than the current pipeline. +Concurrent pipelines running on the same ref could override the artifacts. + The user running the pipeline must have at least the Reporter role for the group or project, +or the group/project must have public visibility. + You can’t use needs:project in the same job as trigger. + When using needs:project to download artifacts from another pipeline, the job does not wait for +the needed job to complete. Directed acyclic graph +behavior is limited to jobs in the same pipeline. Make sure that the needed job in the other +pipeline completes before the job that needs it tries to download the artifacts. + You can’t download artifacts from jobs that run in parallel. + Support for CI/CD variables in project, job, and ref was +introduced in GitLab 13.3. +Feature flag removed in GitLab 13.4. + + +Related topics: + + + To download artifacts between parent-child pipelines, +use needs:pipeline:job. + + + +needs:pipeline:job + + + +History + + + + + +Introduced in GitLab 13.7. + + + + + + +A child pipeline can download artifacts from a job in +its parent pipeline or another child pipeline in the same parent-child pipeline hierarchy. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +needs:pipeline: A pipeline ID. Must be a pipeline present in the same parent-child pipeline hierarchy. + +job: The job to download artifacts from. + + +Example of needs:pipeline:job: + + + + Parent pipeline (.gitlab-ci.yml): + + +create-artifact: + stage: build + script: echo ""sample artifact"" > artifact.txt + artifacts: + paths: [artifact.txt] + +child-pipeline: + stage: test + trigger: + include: child.yml + strategy: depend + variables: + PARENT_PIPELINE_ID: $CI_PIPELINE_ID + + + + Child pipeline (child.yml): + + +use-artifact: + script: cat artifact.txt + needs: + - pipeline: $PARENT_PIPELINE_ID + job: create-artifact + + + + +In this example, the create-artifact job in the parent pipeline creates some artifacts. +The child-pipeline job triggers a child pipeline, and passes the CI_PIPELINE_ID +variable to the child pipeline as a new PARENT_PIPELINE_ID variable. The child pipeline +can use that variable in needs:pipeline to download artifacts from the parent pipeline. + +Additional details: + + + The pipeline attribute does not accept the current pipeline ID ($CI_PIPELINE_ID). +To download artifacts from a job in the current pipeline, use needs:artifacts. + + + +needs:optional + + + +History + + + + + +Introduced in GitLab 13.10. + +Feature flag removed in GitLab 14.0. + + + + + + +To need a job that sometimes does not exist in the pipeline, add optional: true +to the needs configuration. If not defined, optional: false is the default. + +Jobs that use rules, only, or except and that are added with include +might not always be added to a pipeline. GitLab checks the needs relationships before starting a pipeline: + + + If the needs entry has optional: true and the needed job is present in the pipeline, +the job waits for it to complete before starting. + If the needed job is not present, the job can start when all other needs requirements are met. + If the needs section contains only optional jobs, and none are added to the pipeline, +the job starts immediately (the same as an empty needs entry: needs: []). + If a needed job has optional: false, but it was not added to the pipeline, the +pipeline fails to start with an error similar to: 'job1' job needs 'job2' job, but it was not added to the pipeline. + + +Keyword type: Job keyword. You can use it only as part of a job. + +Example of needs:optional: + +build-job: + stage: build + +test-job1: + stage: test + +test-job2: + stage: test + rules: + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + +deploy-job: + stage: deploy + needs: + - job: test-job2 + optional: true + - job: test-job1 + environment: production + +review-job: + stage: deploy + needs: + - job: test-job2 + optional: true + environment: review + + +In this example: + + + +build-job, test-job1, and test-job2 start in stage order. + When the branch is the default branch, test-job2 is added to the pipeline, so: + + +deploy-job waits for both test-job1 and test-job2 to complete. + +review-job waits for test-job2 to complete. + + + When the branch is not the default branch, test-job2 is not added to the pipeline, so: + + +deploy-job waits for only test-job1 to complete, and does not wait for the missing test-job2. + +review-job has no other needed jobs and starts immediately (at the same time as build-job), +like needs: []. + + + + + +needs:pipeline + + +You can mirror the pipeline status from an upstream pipeline to a job by +using the needs:pipeline keyword. The latest pipeline status from the default branch is +replicated to the job. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + A full project path, including namespace and group. If the +project is in the same group or namespace, you can omit them from the project +keyword. For example: project: group/project-name or project: project-name. + + +Example of needs:pipeline: + +upstream_status: + stage: test + needs: + pipeline: other/project + + +Additional details: + + + If you add the job keyword to needs:pipeline, the job no longer mirrors the +pipeline status. The behavior changes to needs:pipeline:job. + + + +needs:parallel:matrix + + + +History + + + + + +Introduced in GitLab 16.3. + + + + + + +Jobs can use parallel:matrix to run a job multiple times in parallel in a single pipeline, +but with different variable values for each instance of the job. + +Use needs:parallel:matrix to execute jobs out-of-order depending on parallelized jobs. + +Keyword type: Job keyword. You can use it only as part of a job. Must be used with needs:job. + +Possible inputs: An array of hashes of variables: + + + The variables and values must be selected from the variables and values defined in the parallel:matrix job. + + +Example of needs:parallel:matrix: + +linux:build: + stage: build + script: echo ""Building linux..."" + parallel: + matrix: + - PROVIDER: aws + STACK: + - monitoring + - app1 + - app2 + +linux:rspec: + stage: test + needs: + - job: linux:build + parallel: + matrix: + - PROVIDER: aws + STACK: app1 + script: echo ""Running rspec on linux..."" + + +The above example generates the following jobs: + +linux:build: [aws, monitoring] +linux:build: [aws, app1] +linux:build: [aws, app2] +linux:rspec + + +The linux:rspec job runs as soon as the linux:build: [aws, app1] job finishes. + +Related topics: + + + +Specify a parallelized job using needs with multiple parallelized jobs. + + +Additional details: + + + + The order of the matrix variables in needs:parallel:matrix must match the order +of the matrix variables in the needed job. For example, reversing the order of +the variables in the linux:rspec job in the earlier example above would be invalid: + + +linux:rspec: + stage: test + needs: + - job: linux:build + parallel: + matrix: + - STACK: app1 # The variable order does not match `linux:build` and is invalid. + PROVIDER: aws + script: echo ""Running rspec on linux..."" + + + + + +pages + + +Use pages to define a GitLab Pages job that +uploads static content to GitLab. The content is then published as a website. + +You must: + + + Define artifacts with a path to the content directory, which is +public by default. + Use publish if want to use a different content directory. + + +Keyword type: Job name. + +Example of pages: + +pages: + stage: deploy + script: + - mv my-html-content public + artifacts: + paths: + - public + rules: + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + environment: production + + +This example renames the my-html-content/ directory to public/. +This directory is exported as an artifact and published with GitLab Pages. + + +pages:publish + + + +History + + + + + +Introduced in GitLab 16.1. + + + + + + +Use publish to configure the content directory of a pages job. + +Keyword type: Job keyword. You can use it only as part of a pages job. + +Possible inputs: A path to a directory containing the Pages content. + +Example of publish: + +pages: + stage: deploy + script: + - npx @11ty/eleventy --input=path/to/eleventy/root --output=dist + artifacts: + paths: + - dist + publish: dist + rules: + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + environment: production + + +This example uses Eleventy to generate a static website and +output the generated HTML files into a the dist/ directory. This directory is exported +as an artifact and published with GitLab Pages. + + +pages:pages.path_prefix + + + +Tier: Premium, Ultimate +Offering: Self-managed +Status: Experiment + + +History + + + + + +Introduced in GitLab 16.7 as an Experiment with a flag named pages_multiple_versions_setting, disabled by default. + + + + + + + + On self-managed GitLab, by default this feature is not available. To make it available, +an administrator can enable the feature flag named +pages_multiple_versions_setting. On GitLab.com and GitLab Dedicated, this feature is not available. This feature is not ready for production use. + + +Use pages.path_prefix to configure a path prefix for multiple deployments of GitLab Pages. + +Keyword type: Job keyword. You can use it only as part of a pages job. + +Possible inputs: + + + A string with valid URL characters. + +CI/CD variables. + A combination of both. + + +Example of pages.path_prefix: + +pages: + stage: deploy + script: + - echo ""Pages accessible through ${CI_PAGES_URL}/${CI_COMMIT_BRANCH}"" + pages: + path_prefix: ""$CI_COMMIT_BRANCH"" + artifacts: + paths: + - public + + +In this example, a different pages deployment is created for each branch. + + +parallel + + + +History + + + + + +Introduced in GitLab 15.9, the maximum value for parallel is increased from 50 to 200. + + + + + + +Use parallel to run a job multiple times in parallel in a single pipeline. + +Multiple runners must exist, or a single runner must be configured to run multiple jobs concurrently. + +Parallel jobs are named sequentially from job_name 1/N to job_name N/N. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + A numeric value from 1 to 200. + + +Example of parallel: + +test: + script: rspec + parallel: 5 + + +This example creates 5 jobs that run in parallel, named test 1/5 to test 5/5. + +Additional details: + + + Every parallel job has a CI_NODE_INDEX and CI_NODE_TOTAL +predefined CI/CD variable set. + A pipeline with jobs that use parallel might: + + Create more jobs running in parallel than available runners. Excess jobs are queued +and marked pending while waiting for an available runner. + Create too many jobs, and the pipeline fails with a job_activity_limit_exceeded error. +The maximum number of jobs that can exist in active pipelines is limited at the instance-level. + + + + +Related topics: + + + +Parallelize large jobs. + + + +parallel:matrix + + + +History + + + + + +Introduced in GitLab 13.3. + The job naming style was improved in GitLab 13.4. + +Introduced in GitLab 15.9, the maximum number of permutations is increased from 50 to 200. + + + + + + +Use parallel:matrix to run a job multiple times in parallel in a single pipeline, +but with different variable values for each instance of the job. + +Multiple runners must exist, or a single runner must be configured to run multiple jobs concurrently. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: An array of hashes of variables: + + + The variable names can use only numbers, letters, and underscores (_). + The values must be either a string, or an array of strings. + The number of permutations cannot exceed 200. + + +Example of parallel:matrix: + +deploystacks: + stage: deploy + script: + - bin/deploy + parallel: + matrix: + - PROVIDER: aws + STACK: + - monitoring + - app1 + - app2 + - PROVIDER: ovh + STACK: [monitoring, backup, app] + - PROVIDER: [gcp, vultr] + STACK: [data, processing] + environment: $PROVIDER/$STACK + + +The example generates 10 parallel deploystacks jobs, each with different values +for PROVIDER and STACK: + +deploystacks: [aws, monitoring] +deploystacks: [aws, app1] +deploystacks: [aws, app2] +deploystacks: [ovh, monitoring] +deploystacks: [ovh, backup] +deploystacks: [ovh, app] +deploystacks: [gcp, data] +deploystacks: [gcp, processing] +deploystacks: [vultr, data] +deploystacks: [vultr, processing] + + +Additional details: + + + +parallel:matrix jobs add the variable values to the job names to differentiate +the jobs from each other, but large values can cause names to exceed limits: + + Job names must be 255 characters or fewer. + When using needs, job names must be 128 characters or fewer. + + + + +Related topics: + + + +Run a one-dimensional matrix of parallel jobs. + +Run a matrix of triggered parallel jobs. + +Select different runner tags for each parallel matrix job. + + +Additional details: + + + + You cannot create multiple matrix configurations with the same variable values but different variable names. +Job names are generated from the variable values, not the variable names, so matrix entries +with identical values generate identical job names that overwrite each other. + + For example, this test configuration would try to create two series of identical jobs, +but the OS2 versions overwrite the OS versions: + + +test: + parallel: + matrix: + - OS: [ubuntu] + PROVIDER: [aws, gcp] + - OS2: [ubuntu] + PROVIDER: [aws, gcp] + + + + + +release + + + +History + + + + + +Introduced in GitLab 13.2. + + + + + + +Use release to create a release. + +The release job must have access to the release-cli, +which must be in the $PATH. + +If you use the Docker executor, +you can use this image from the GitLab container registry: registry.gitlab.com/gitlab-org/release-cli:latest + +If you use the Shell executor or similar, +install release-cli on the server where the runner is registered. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: The release subkeys: + + + tag_name + +tag_message (optional) + +name (optional) + description + +ref (optional) + +milestones (optional) + +released_at (optional) + +assets:links (optional) + + +Example of release keyword: + +release_job: + stage: release + image: registry.gitlab.com/gitlab-org/release-cli:latest + rules: + - if: $CI_COMMIT_TAG # Run this job when a tag is created manually + script: + - echo ""Running the release job."" + release: + tag_name: $CI_COMMIT_TAG + name: 'Release $CI_COMMIT_TAG' + description: 'Release created using the release-cli.' + + +This example creates a release: + + + When you push a Git tag. + When you add a Git tag in the UI at Code > Tags. + + +Additional details: + + + + All release jobs, except trigger jobs, must include the script keyword. A release +job can use the output from script commands. If you don’t need the script, you can use a placeholder: + + +script: + - echo ""release job"" + + + An issue exists to remove this requirement. + + The release section executes after the script keyword and before the after_script. + A release is created only if the job’s main script succeeds. + If the release already exists, it is not updated and the job with the release keyword fails. + + +Related topics: + + + +CI/CD example of the release keyword. + +Create multiple releases in a single pipeline. + +Use a custom SSL CA certificate authority. + + + +release:tag_name + + +Required. The Git tag for the release. + +If the tag does not exist in the project yet, it is created at the same time as the release. +New tags use the SHA associated with the pipeline. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + A tag name. + + +CI/CD variables are supported. + +Example of release:tag_name: + +To create a release when a new tag is added to the project: + + + Use the $CI_COMMIT_TAG CI/CD variable as the tag_name. + Use rules:if to configure the job to run only for new tags. + + +job: + script: echo ""Running the release job for the new tag."" + release: + tag_name: $CI_COMMIT_TAG + description: 'Release description' + rules: + - if: $CI_COMMIT_TAG + + +To create a release and a new tag at the same time, your rules +should not configure the job to run only for new tags. A semantic versioning example: + +job: + script: echo ""Running the release job and creating a new tag."" + release: + tag_name: ${MAJOR}_${MINOR}_${REVISION} + description: 'Release description' + rules: + - if: $CI_PIPELINE_SOURCE == ""schedule"" + + + +release:tag_message + + + +History + + + + + +Introduced in GitLab 15.3. Supported by release-cli v0.12.0 or later. + + + + + + +If the tag does not exist, the newly created tag is annotated with the message specified by tag_message. +If omitted, a lightweight tag is created. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + A text string. + + +Example of release:tag_message: + + release_job: + stage: release + release: + tag_name: $CI_COMMIT_TAG + description: 'Release description' + tag_message: 'Annotated tag message' + + + +release:name + + +The release name. If omitted, it is populated with the value of release: tag_name. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + A text string. + + +Example of release:name: + + release_job: + stage: release + release: + name: 'Release $CI_COMMIT_TAG' + + + +release:description + + +The long description of the release. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + A string with the long description. + The path to a file that contains the description. Introduced in GitLab 13.7. + + The file location must be relative to the project directory ($CI_PROJECT_DIR). + If the file is a symbolic link, it must be in the $CI_PROJECT_DIR. + The ./path/to/file and filename can’t contain spaces. + + + + +Example of release:description: + +job: + release: + tag_name: ${MAJOR}_${MINOR}_${REVISION} + description: './path/to/CHANGELOG.md' + + +Additional details: + + + The description is evaluated by the shell that runs release-cli. +You can use CI/CD variables to define the description, but some shells +use different syntax +to reference variables. Similarly, some shells might require special characters +to be escaped. For example, backticks (`) might need to be escaped with a backslash (\). + + + +release:ref + + +The ref for the release, if the release: tag_name doesn’t exist yet. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + A commit SHA, another tag name, or a branch name. + + + +release:milestones + + +The title of each milestone the release is associated with. + + +release:released_at + + +The date and time when the release is ready. + +Possible inputs: + + + A date enclosed in quotes and expressed in ISO 8601 format. + + +Example of release:released_at: + +released_at: '2021-03-15T08:00:00Z' + + +Additional details: + + + If it is not defined, the current date and time is used. + + + +release:assets:links + + + +History + + + + + +Introduced in GitLab 13.12. + + + + + + +Use release:assets:links to include asset links in the release. + +Requires release-cli version v0.4.0 or later. + +Example of release:assets:links: + +assets: + links: + - name: 'asset1' + url: 'https://example.com/assets/1' + - name: 'asset2' + url: 'https://example.com/assets/2' + filepath: '/pretty/url/1' # optional + link_type: 'other' # optional + + + +resource_group + + + +History + + + + + +Introduced in GitLab 12.7. + + + + + + +Use resource_group to create a resource group that +ensures a job is mutually exclusive across different pipelines for the same project. + +For example, if multiple jobs that belong to the same resource group are queued simultaneously, +only one of the jobs starts. The other jobs wait until the resource_group is free. + +Resource groups behave similar to semaphores in other programming languages. + +You can choose a process mode to strategically control the job concurrency for your deployment preferences. The default process mode is unordered. To change the process mode of a resource group, use the API to send a request to edit an existing resource group. + +You can define multiple resource groups per environment. For example, +when deploying to physical devices, you might have multiple physical devices. Each device +can be deployed to, but only one deployment can occur per device at any given time. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + Only letters, digits, -, _, /, $, {, }, ., and spaces. +It can’t start or end with /. CI/CD variables are supported. + + +Example of resource_group: + +deploy-to-production: + script: deploy + resource_group: production + + +In this example, two deploy-to-production jobs in two separate pipelines can never run at the same time. As a result, +you can ensure that concurrent deployments never happen to the production environment. + +Related topics: + + + +Pipeline-level concurrency control with cross-project/parent-child pipelines. + + + +retry + + +Use retry to configure how many times a job is retried if it fails. +If not defined, defaults to 0 and jobs do not retry. + +When a job fails, the job is processed up to two more times, until it succeeds or +reaches the maximum number of retries. + +By default, all failure types cause the job to be retried. Use retry:when or retry:exit_codes +to select which failures to retry on. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + +0 (default), 1, or 2. + + +Example of retry: + +test: + script: rspec + retry: 2 + +test_advanced: + script: + - echo ""Run a script that results in exit code 137."" + - exit 137 + retry: + max: 2 + when: runner_system_failure + exit_codes: 137 + + +test_advanced will be retried up to 2 times if the exit code is 137 or if it had +a runner system failure. + + +retry:when + + +Use retry:when with retry:max to retry jobs for only specific failure cases. +retry:max is the maximum number of retries, like retry, and can be +0, 1, or 2. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + A single failure type, or an array of one or more failure types: + + + + + + +always: Retry on any failure (default). + +unknown_failure: Retry when the failure reason is unknown. + +script_failure: Retry when: + + The script failed. + The runner failed to pull the Docker image. For docker, docker+machine, kubernetes executors. + + + +api_failure: Retry on API failure. + +stuck_or_timeout_failure: Retry when the job got stuck or timed out. + +runner_system_failure: Retry if there is a runner system failure (for example, job setup failed). + +runner_unsupported: Retry if the runner is unsupported. + +stale_schedule: Retry if a delayed job could not be executed. + +job_execution_timeout: Retry if the script exceeded the maximum execution time set for the job. + +archived_failure: Retry if the job is archived and can’t be run. + +unmet_prerequisites: Retry if the job failed to complete prerequisite tasks. + +scheduler_failure: Retry if the scheduler failed to assign the job to a runner. + +data_integrity_failure: Retry if there is an unknown job problem. + + +Example of retry:when (single failure type): + +test: + script: rspec + retry: + max: 2 + when: runner_system_failure + + +If there is a failure other than a runner system failure, the job is not retried. + +Example of retry:when (array of failure types): + +test: + script: rspec + retry: + max: 2 + when: + - runner_system_failure + - stuck_or_timeout_failure + + + +retry:exit_codes + + + +History + + + + + +Introduced in GitLab 16.10 with a flag named ci_retry_on_exit_codes. Disabled by default. + + + + + + + + On self-managed GitLab, by default this feature is not available. To make it available, +an administrator can enable the feature flag named ci_retry_on_exit_codes. + + +Use retry:exit_codes with retry:max to retry jobs for only specific failure cases. +retry:max is the maximum number of retries, like retry, and can be +0, 1, or 2. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + A single exit code. + An array of exit codes. + + +Example of retry:exit_codes: + +test_job_1: + script: + - echo ""Run a script that results in exit code 1. This job isn't retried."" + - exit 1 + retry: + max: 2 + exit_codes: 137 + +test_job_2: + script: + - echo ""Run a script that results in exit code 137. This job will be retried."" + - exit 137 + retry: + max: 1 + exit_codes: + - 255 + - 137 + + +Related topics: + +You can specify the number of retry attempts for certain stages of job execution +using variables. + + +rules + + + +History + + + + + +Introduced in GitLab 12.3. + + + + + + +Use rules to include or exclude jobs in pipelines. + +Rules are evaluated when the pipeline is created, and evaluated in order +until the first match. When a match is found, the job is either included or excluded from the pipeline, +depending on the configuration. + +You cannot use dotenv variables created in job scripts in rules, because rules are evaluated before any jobs run. + +rules replaces only/except and they can’t be used together +in the same job. If you configure one job to use both keywords, the GitLab returns +a key may not be used with rules error. + +rules accepts an array of rules defined with: + + + if + changes + exists + allow_failure + variables + when + + +You can combine multiple keywords together for complex rules. + +The job is added to the pipeline: + + + If an if, changes, or exists rule matches and also has when: on_success (default), +when: delayed, or when: always. + If a rule is reached that is only when: on_success, when: delayed, or when: always. + + +The job is not added to the pipeline: + + + If no rules match. + If a rule matches and has when: never. + + +You can use !reference tags to reuse rules configuration +in different jobs. + + +rules:if + + +Use rules:if clauses to specify when to add a job to a pipeline: + + + If an if statement is true, add the job to the pipeline. + If an if statement is true, but it’s combined with when: never, do not add the job to the pipeline. + If no if statements are true, do not add the job to the pipeline. + + +if clauses are evaluated based on the values of CI/CD variables +or predefined CI/CD variables, with +some exceptions. + +Keyword type: Job-specific and pipeline-specific. You can use it as part of a job +to configure the job behavior, or with workflow to configure the pipeline behavior. + +Possible inputs: + + + A CI/CD variable expression. + + +Example of rules:if: + +job: + script: echo ""Hello, Rules!"" + rules: + - if: $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME =~ /^feature/ && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME != $CI_DEFAULT_BRANCH + when: never + - if: $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME =~ /^feature/ + when: manual + allow_failure: true + - if: $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME + + +Additional details: + + + If a rule matches and has no when defined, the rule uses the when +defined for the job, which defaults to on_success if not defined. + In GitLab 14.5 and earlier, you can define when once per rule, or once at the job-level, +which applies to all rules. You can’t mix when at the job-level with when in rules. + In GitLab 14.6 and later, you can mix when at the job-level with when in rules. +when configuration in rules takes precedence over when at the job-level. + Unlike variables in script +sections, variables in rules expressions are always formatted as $VARIABLE. + + You can use rules:if with include to conditionally include other configuration files. + + + CI/CD variables on the right side of =~ and !~ expressions are evaluated as regular expressions. + + +Related topics: + + + +Common if expressions for rules. + +Avoid duplicate pipelines. + +Use rules to run merge request pipelines. + + + +rules:changes + + +Use rules:changes to specify when to add a job to a pipeline by checking for changes +to specific files. + + + caution You should use rules: changes only with branch pipelines or merge request pipelines. +You can use rules: changes with other pipeline types, but rules: changes always +evaluates to true for new branch pipelines or when there is no Git push event. Pipelines like tag pipelines, +scheduled pipelines, and manual pipelines, all do not +have a Git push event associated with them. In these cases, use rules: changes: compare_to +to specify the branch to compare against. + + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + +An array including any number of: + + + Paths to files. In GitLab 13.6 and later, file paths can include variables. +A file path array can also be in rules:changes:paths. + Wildcard paths for: + + Single directories, for example path/to/directory/*. + A directory and all its subdirectories, for example path/to/directory/**/*. + + + Wildcard glob paths for all files +with the same extension or multiple extensions, for example *.md or path/to/directory/*.{rb,py,sh}. + Wildcard paths to files in the root directory, or all directories, wrapped in double quotes. +For example ""*.json"" or ""**/*.json"". + + +Example of rules:changes: + +docker build: + script: docker build -t my-image:$CI_COMMIT_REF_SLUG . + rules: + - if: $CI_PIPELINE_SOURCE == ""merge_request_event"" + changes: + - Dockerfile + when: manual + allow_failure: true + + + + If the pipeline is a merge request pipeline, check Dockerfile for changes. + If Dockerfile has changed, add the job to the pipeline as a manual job, and the pipeline +continues running even if the job is not triggered (allow_failure: true). + A maximum of 50 patterns or file paths can be defined per rules:changes section. + If Dockerfile has not changed, do not add job to any pipeline (same as when: never). + +rules:changes:paths is the same as rules:changes without +any subkeys. + + +Additional details: + + + +rules: changes works the same way as only: changes and except: changes. + Glob patterns are interpreted with Ruby’s File.fnmatch +with the flags +File::FNM_PATHNAME | File::FNM_DOTMATCH | File::FNM_EXTGLOB. + You can use when: never to implement a rule similar to except:changes. + +changes resolves to true if any of the matching files are changed (an OR operation). + + +Related topics: + + + +Jobs or pipelines can run unexpectedly when using rules: changes. + + + +rules:changes:paths + + + +History + + + + + +Introduced in GitLab 15.2. + + + + + + +Use rules:changes to specify that a job only be added to a pipeline when specific +files are changed, and use rules:changes:paths to specify the files. + +rules:changes:paths is the same as using rules:changes without +any subkeys. All additional details and related topics are the same. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + An array of file paths. File paths can include variables. + + +Example of rules:changes:paths: + +docker-build-1: + script: docker build -t my-image:$CI_COMMIT_REF_SLUG . + rules: + - if: $CI_PIPELINE_SOURCE == ""merge_request_event"" + changes: + - Dockerfile + +docker-build-2: + script: docker build -t my-image:$CI_COMMIT_REF_SLUG . + rules: + - if: $CI_PIPELINE_SOURCE == ""merge_request_event"" + changes: + paths: + - Dockerfile + + +In this example, both jobs have the same behavior. + + +rules:changes:compare_to + + + +History + + + + + +Introduced in GitLab 15.3 with a flag named ci_rules_changes_compare. Enabled by default. + +Generally available in GitLab 15.5. Feature flag ci_rules_changes_compare removed. + + + + + + +Use rules:changes:compare_to to specify which ref to compare against for changes to the files +listed under rules:changes:paths. + +Keyword type: Job keyword. You can use it only as part of a job, and it must be combined with rules:changes:paths. + +Possible inputs: + + + A branch name, like main, branch1, or refs/heads/branch1. + A tag name, like tag1 or refs/tags/tag1. + A commit SHA, like 2fg31ga14b. + + +Example of rules:changes:compare_to: + +docker build: + script: docker build -t my-image:$CI_COMMIT_REF_SLUG . + rules: + - if: $CI_PIPELINE_SOURCE == ""merge_request_event"" + changes: + paths: + - Dockerfile + compare_to: 'refs/heads/branch1' + + +In this example, the docker build job is only included when the Dockerfile has changed +relative to refs/heads/branch1 and the pipeline source is a merge request event. + + +rules:exists + + + +History + + + + + +Introduced in GitLab 12.4. + CI/CD variable support introduced in GitLab 15.6. + + + + + + +Use exists to run a job when certain files exist in the repository. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + An array of file paths. Paths are relative to the project directory ($CI_PROJECT_DIR) and can’t directly link outside it. File paths can use glob patterns and CI/CD variables. + + +Example of rules:exists: + +job: + script: docker build -t my-image:$CI_COMMIT_REF_SLUG . + rules: + - exists: + - Dockerfile + + +job runs if a Dockerfile exists anywhere in the repository. + +Additional details: + + + Glob patterns are interpreted with Ruby’s File.fnmatch +with the flags +File::FNM_PATHNAME | File::FNM_DOTMATCH | File::FNM_EXTGLOB. + For performance reasons, GitLab performs a maximum of 10,000 checks against +exists patterns or file paths. After the 10,000th check, rules with patterned +globs always match. In other words, the exists rule always assumes a match in +projects with more than 10,000 files, or if there are fewer than 10,000 files but +the exists rules are checked more than 10,000 times. + A maximum of 50 patterns or file paths can be defined per rules:exists section. + +exists resolves to true if any of the listed files are found (an OR operation). + + + +rules:allow_failure + + + +History + + + + + +Introduced in GitLab 12.8. + + + + + + +Use allow_failure: true in rules to allow a job to fail +without stopping the pipeline. + +You can also use allow_failure: true with a manual job. The pipeline continues +running without waiting for the result of the manual job. allow_failure: false +combined with when: manual in rules causes the pipeline to wait for the manual +job to run before continuing. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +true or false. Defaults to false if not defined. + + +Example of rules:allow_failure: + +job: + script: echo ""Hello, Rules!"" + rules: + - if: $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH + when: manual + allow_failure: true + + +If the rule matches, then the job is a manual job with allow_failure: true. + +Additional details: + + + The rule-level rules:allow_failure overrides the job-level allow_failure, +and only applies when the specific rule triggers the job. + + + +rules:needs + + + +History + + + + + +Introduced in GitLab 16.0 with a flag named introduce_rules_with_needs. Disabled by default. + +Generally available in GitLab 16.2. Feature flag introduce_rules_with_needs removed. + + + + + + +Use needs in rules to update a job’s needs for specific conditions. When a condition matches a rule, the job’s needs configuration is completely replaced with the needs in the rule. + +Keyword type: Job-specific. You can use it only as part of a job. + +Possible inputs: + + + An array of job names as strings. + A hash with a job name, optionally with additional attributes. + An empty array ([]), to set the job needs to none when the specific condition is met. + + +Example of rules:needs: + +build-dev: + stage: build + rules: + - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH + script: echo ""Feature branch, so building dev version..."" + +build-prod: + stage: build + rules: + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + script: echo ""Default branch, so building prod version..."" + +specs: + stage: test + needs: ['build-dev'] + rules: + - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH + needs: ['build-prod'] + - when: on_success # Run the job in other cases + script: echo ""Running dev specs by default, or prod specs when default branch..."" + + +In this example: + + + If the pipeline runs on a branch that is not the default branch, the specs job needs the build-dev job (default behavior). + If the pipeline runs on the default branch, and therefore the rule matches the condition, the specs job needs the build-prod job instead. + + +Additional details: + + + +needs in rules override any needs defined at the job-level. When overridden, the behavior is same as job-level needs. + +needs in rules can accept artifacts and optional. + + + +rules:variables + + + +History + + + + + +Introduced in GitLab 13.7. + +Feature flag removed in GitLab 13.10. + + + + + + +Use variables in rules to define variables for specific conditions. + +Keyword type: Job-specific. You can use it only as part of a job. + +Possible inputs: + + + A hash of variables in the format VARIABLE-NAME: value. + + +Example of rules:variables: + +job: + variables: + DEPLOY_VARIABLE: ""default-deploy"" + rules: + - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH + variables: # Override DEPLOY_VARIABLE defined + DEPLOY_VARIABLE: ""deploy-production"" # at the job level. + - if: $CI_COMMIT_REF_NAME =~ /feature/ + variables: + IS_A_FEATURE: ""true"" # Define a new variable. + script: + - echo ""Run script with $DEPLOY_VARIABLE as an argument"" + - echo ""Run another script if $IS_A_FEATURE exists"" + + + +rules:interruptible + + + +History + + + + + +Introduced in GitLab 16.10. + + + + + + +Use interruptible in rules to update a job’s interruptible value for specific conditions. + +Keyword type: Job-specific. You can use it only as part of a job. + +Possible inputs: + + + +true or false. + + +Example of rules:interruptible: + +job: + script: echo ""Hello, Rules!"" + interruptible: true + rules: + - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH + interruptible: false # Override interruptible defined at the job level. + - when: on_success + + +Additional details: + + + The rule-level rules:interruptible overrides the job-level interruptible, +and only applies when the specific rule triggers the job. + + + +script + + +Use script to specify commands for the runner to execute. + +All jobs except trigger jobs require a script keyword. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: An array including: + + + Single line commands. + Long commands split over multiple lines. + +YAML anchors. + + +CI/CD variables are supported. + +Example of script: + +job1: + script: ""bundle exec rspec"" + +job2: + script: + - uname -a + - bundle exec rspec + + +Additional details: + + + When you use these special characters in script, you must use single quotes (') or double quotes (""). + + +Related topics: + + + You can ignore non-zero exit codes. + +Use color codes with script +to make job logs easier to review. + +Create custom collapsible sections +to simplify job log output. + + + +secrets + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 13.4. + + + + + + +Use secrets to specify CI/CD secrets to: + + + Retrieve from an external secrets provider. + Make available in the job as CI/CD variables +(file type by default). + + + +secrets:vault + + + +History + + + + + +Introduced in GitLab 13.4 and GitLab Runner 13.4. + + + + + + +Use secrets:vault to specify secrets provided by a HashiCorp Vault. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +engine:name: Name of the secrets engine. + +engine:path: Path to the secrets engine. + +path: Path to the secret. + +field: Name of the field where the password is stored. + + +Example of secrets:vault: + +To specify all details explicitly and use the KV-V2 secrets engine: + +job: + secrets: + DATABASE_PASSWORD: # Store the path to the secret in this CI/CD variable + vault: # Translates to secret: `ops/data/production/db`, field: `password` + engine: + name: kv-v2 + path: ops + path: production/db + field: password + + +You can shorten this syntax. With the short syntax, engine:name and engine:path +both default to kv-v2: + +job: + secrets: + DATABASE_PASSWORD: # Store the path to the secret in this CI/CD variable + vault: production/db/password # Translates to secret: `kv-v2/data/production/db`, field: `password` + + +To specify a custom secrets engine path in the short syntax, add a suffix that starts with @: + +job: + secrets: + DATABASE_PASSWORD: # Store the path to the secret in this CI/CD variable + vault: production/db/password@ops # Translates to secret: `ops/data/production/db`, field: `password` + + + +secrets:gcp_secret_manager + + + +History + + + + + +Introduced in GitLab 16.8 and GitLab Runner 16.8. + + + + + + +Use secrets:gcp_secret_manager to specify secrets provided by GCP Secret Manager. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +name: Name of the secret. + +version: Version of the secret. + + +Example of secrets:gcp_secret_manager: + +job: + secrets: + DATABASE_PASSWORD: + gcp_secret_manager: + name: 'test' + version: 2 + + +Related topics: + + + +Use GCP Secret Manager secrets in GitLab CI/CD. + + + +secrets:azure_key_vault + + + +History + + + + + +Introduced in GitLab 16.3 and GitLab Runner 16.3. + + + + + + +Use secrets:azure_key_vault to specify secrets provided by a Azure Key Vault. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +name: Name of the secret. + +version: Version of the secret. + + +Example of secrets:azure_key_vault: + +job: + secrets: + DATABASE_PASSWORD: + azure_key_vault: + name: 'test' + version: 'test' + + +Related topics: + + + +Use Azure Key Vault secrets in GitLab CI/CD. + + + +secrets:file + + + +History + + + + + +Introduced in GitLab 14.1 and GitLab Runner 14.1. + + + + + + +Use secrets:file to configure the secret to be stored as either a +file or variable type CI/CD variable + +By default, the secret is passed to the job as a file type CI/CD variable. The value +of the secret is stored in the file and the variable contains the path to the file. + +If your software can’t use file type CI/CD variables, set file: false to store +the secret value directly in the variable. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +true (default) or false. + + +Example of secrets:file: + +job: + secrets: + DATABASE_PASSWORD: + vault: production/db/password@ops + file: false + + +Additional details: + + + The file keyword is a setting for the CI/CD variable and must be nested under +the CI/CD variable name, not in the vault section. + + + +secrets:token + + + +History + + + + + +Introduced in GitLab 15.8, controlled by the Limit JSON Web Token (JWT) access setting. + +Made always available and Limit JSON Web Token (JWT) access setting removed in GitLab 16.0. + + + + + + +Use secrets:token to explicitly select a token to use when authenticating with Vault by referencing the token’s CI/CD variable. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + The name of an ID token + + +Example of secrets:token: + +job: + id_tokens: + AWS_TOKEN: + aud: https://aws.example.com + VAULT_TOKEN: + aud: https://vault.example.com + secrets: + DB_PASSWORD: + vault: gitlab/production/db + token: $VAULT_TOKEN + + +Additional details: + + + When the token keyword is not set, the first ID token is used to authenticate. + + + +services + + +Use services to specify any additional Docker images that your scripts require to run successfully. The services image is linked +to the image specified in the image keyword. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: The name of the services image, including the registry path if needed, in one of these formats: + + + + (Same as using with the latest tag) + : + @ + + +CI/CD variables are supported, but not for alias. + +Example of services: + +default: + image: + name: ruby:2.6 + entrypoint: [""/bin/bash""] + + services: + - name: my-postgres:11.7 + alias: db-postgres + entrypoint: [""/usr/local/bin/db-postgres""] + command: [""start""] + + before_script: + - bundle install + +test: + script: + - bundle exec rake spec + + +In this example, GitLab launches two containers for the job: + + + A Ruby container that runs the script commands. + A PostgreSQL container. The script commands in the Ruby container can connect to +the PostgreSQL database at the db-postgrest hostname. + + +Related topics: + + + +Available settings for services. + +Define services in the .gitlab-ci.yml file. + +Run your CI/CD jobs in Docker containers. + +Use Docker to build Docker images. + + + +services:docker + + + +History + + + + + +Introduced in GitLab 16.7. Requires GitLab Runner 16.7 or later. + +user input option introduced in GitLab 16.8. + + + + + + +Use services:docker to pass options to the Docker executor of a GitLab Runner. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + +A hash of options for the Docker executor, which can include: + + + +platform: Selects the architecture of the image to pull. When not specified, +the default is the same platform as the host runner. + +user: Specify the username or UID to use when running the container. + + +Example of services:docker: + +arm-sql-job: + script: echo ""Run sql tests in service container"" + image: ruby:2.6 + services: + - name: super/sql:experimental + docker: + platform: arm64/v8 + user: dave + + +Additional details: + + + +services:docker:platform maps to the docker pull --platform option. + +services:docker:user maps to the docker run --user option. + + + +services:pull_policy + + + +History + + + + + +Introduced in GitLab 15.1 with a flag named ci_docker_image_pull_policy. Disabled by default. + +Enabled on GitLab.com and self-managed in GitLab 15.2. + +Generally available in GitLab 15.4. Feature flag ci_docker_image_pull_policy removed. + Requires GitLab Runner 15.1 or later. + + + + + + +The pull policy that the runner uses to fetch the Docker image. + +Keyword type: Job keyword. You can use it only as part of a job or in the default section. + +Possible inputs: + + + A single pull policy, or multiple pull policies in an array. +Can be always, if-not-present, or never. + + +Examples of services:pull_policy: + +job1: + script: echo ""A single pull policy."" + services: + - name: postgres:11.6 + pull_policy: if-not-present + +job2: + script: echo ""Multiple pull policies."" + services: + - name: postgres:11.6 + pull_policy: [always, if-not-present] + + +Additional details: + + + If the runner does not support the defined pull policy, the job fails with an error similar to: +ERROR: Job failed (system failure): the configured PullPolicies ([always]) are not allowed by AllowedPullPolicies ([never]). + + +Related topics: + + + +Run your CI/CD jobs in Docker containers. + +Configure how runners pull images. + +Set multiple pull policies. + + + +stage + + +Use stage to define which stage a job runs in. Jobs in the same +stage can execute in parallel (see Additional details). + +If stage is not defined, the job uses the test stage by default. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: A string, which can be a: + + + +Default stage. + User-defined stages. + + +Example of stage: + +stages: + - build + - test + - deploy + +job1: + stage: build + script: + - echo ""This job compiles code."" + +job2: + stage: test + script: + - echo ""This job tests the compiled code. It runs when the build stage completes."" + +job3: + script: + - echo ""This job also runs in the test stage"". + +job4: + stage: deploy + script: + - echo ""This job deploys the code. It runs when the test stage completes."" + environment: production + + +Additional details: + + + Jobs can run in parallel if they run on different runners. + If you have only one runner, jobs can run in parallel if the runner’s +concurrent setting +is greater than 1. + + + +stage: .pre + + + +History + + + + + +Introduced in GitLab 12.4. + + + + + + +Use the .pre stage to make a job run at the start of a pipeline. .pre is +always the first stage in a pipeline. User-defined stages execute after .pre. +You do not have to define .pre in stages. + +If a pipeline contains only jobs in the .pre or .post stages, it does not run. +There must be at least one other job in a different stage. + +Keyword type: You can only use it with a job’s stage keyword. + +Example of stage: .pre: + +stages: + - build + - test + +job1: + stage: build + script: + - echo ""This job runs in the build stage."" + +first-job: + stage: .pre + script: + - echo ""This job runs in the .pre stage, before all other stages."" + +job2: + stage: test + script: + - echo ""This job runs in the test stage."" + + + +stage: .post + + + +History + + + + + +Introduced in GitLab 12.4. + + + + + + +Use the .post stage to make a job run at the end of a pipeline. .post +is always the last stage in a pipeline. User-defined stages execute before .post. +You do not have to define .post in stages. + +If a pipeline contains only jobs in the .pre or .post stages, it does not run. +There must be at least one other job in a different stage. + +Keyword type: You can only use it with a job’s stage keyword. + +Example of stage: .post: + +stages: + - build + - test + +job1: + stage: build + script: + - echo ""This job runs in the build stage."" + +last-job: + stage: .post + script: + - echo ""This job runs in the .post stage, after all other stages."" + +job2: + stage: test + script: + - echo ""This job runs in the test stage."" + + +Additional details: + + + If a pipeline has jobs with needs: [] and jobs in the .pre stage, they will +all start as soon as the pipeline is created. Jobs with needs: [] start immediately, +ignoring any stage configuration. + + + +tags + + + +History + + + + + A limit of 50 tags per job enabled on GitLab.com in GitLab 14.3. + A limit of 50 tags per job enabled on self-managed in GitLab 14.3. + + + + + + +Use tags to select a specific runner from the list of all runners that are +available for the project. + +When you register a runner, you can specify the runner’s tags, for +example ruby, postgres, or development. To pick up and run a job, a runner must +be assigned every tag listed in the job. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + An array of tag names. + CI/CD variables are supported +in GitLab 14.1 and later. + + +Example of tags: + +job: + tags: + - ruby + - postgres + + +In this example, only runners with both the ruby and postgres tags can run the job. + +Additional details: + + + In GitLab 14.3 and later, +the number of tags must be less than 50. + + +Related topics: + + + +Use tags to control which jobs a runner can run. + +Select different runner tags for each parallel matrix job. + + + +timeout + + + +History + + + + + +Introduced in GitLab 12.3. + + + + + + +Use timeout to configure a timeout for a specific job. If the job runs for longer +than the timeout, the job fails. + +The job-level timeout can be longer than the project-level timeout, +but can’t be longer than the runner’s timeout. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: A period of time written in natural language. For example, these are all equivalent: + + + 3600 seconds + 60 minutes + one hour + + +Example of timeout: + +build: + script: build.sh + timeout: 3 hours 30 minutes + +test: + script: rspec + timeout: 3h 30m + + + +trigger + + + +History + + + + + Support for resource_group introduced support for resource_group in GitLab 13.9. + Support for environment introduced in GitLab 16.4. + + + + + + +Use trigger to declare that a job is a “trigger job” which starts a +downstream pipeline that is either: + + + +A multi-project pipeline. + +A child pipeline. + + +Trigger jobs can use only a limited set of GitLab CI/CD configuration keywords. +The keywords available for use in trigger jobs are: + + + +allow_failure. + +extends. + +needs, but not needs:project. + +only and except. + +rules. + +stage. + +trigger. + +variables. + +when (only with a value of on_success, on_failure, or always). + +resource_group. + +environment. + + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + For multi-project pipelines, the path to the downstream project. CI/CD variables are supported +in GitLab 15.3 and later, but not job-level persisted variables. +Alternatively, use trigger:project. + For child pipelines, use trigger:include. + + +Example of trigger: + +trigger-multi-project-pipeline: + trigger: my-group/my-project + + +Additional details: + + + You cannot use the API to start when:manual trigger jobs. + In GitLab 13.5 and later, you +can use when:manual in the same job as trigger. In GitLab 13.4 and +earlier, using them together causes the error jobs:#{job-name} when should be on_success, on_failure or always. + You cannot manually specify CI/CD variables +before running a manual trigger job. + +Manual pipeline variables +and scheduled pipeline variables +are not passed to downstream pipelines by default. Use trigger:forward +to forward these variables to downstream pipelines. + +Job-level persisted variables +are not available in trigger jobs. + Environment variables defined in the runner’s config.toml are not available to trigger jobs and are not passed to downstream pipelines. + + +Related topics: + + + +Multi-project pipeline configuration examples. + To run a pipeline for a specific branch, tag, or commit, you can use a trigger token +to authenticate with the pipeline triggers API. +The trigger token is different than the trigger keyword. + + + +trigger:include + + +Use trigger:include to declare that a job is a “trigger job” which starts a +child pipeline. + +Use trigger:include:artifact to trigger a dynamic child pipeline. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + The path to the child pipeline’s configuration file. + + +Example of trigger:include: + +trigger-child-pipeline: + trigger: + include: path/to/child-pipeline.gitlab-ci.yml + + +Related topics: + + + +Child pipeline configuration examples. + + + +trigger:project + + +Use trigger:project to declare that a job is a “trigger job” which starts a +multi-project pipeline. + +By default, the multi-project pipeline triggers for the default branch. Use trigger:branch +to specify a different branch. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + The path to the downstream project. CI/CD variables are supported +in GitLab 15.3 and later, but not job-level persisted variables. + + +Example of trigger:project: + +trigger-multi-project-pipeline: + trigger: + project: my-group/my-project + + +Example of trigger:project for a different branch: + +trigger-multi-project-pipeline: + trigger: + project: my-group/my-project + branch: development + + +Related topics: + + + +Multi-project pipeline configuration examples. + To run a pipeline for a specific branch, tag, or commit, you can also use a trigger token +to authenticate with the pipeline triggers API. +The trigger token is different than the trigger keyword. + + + +trigger:strategy + + +Use trigger:strategy to force the trigger job to wait for the downstream pipeline to complete +before it is marked as success. + +This behavior is different than the default, which is for the trigger job to be marked as +success as soon as the downstream pipeline is created. + +This setting makes your pipeline execution linear rather than parallel. + +Example of trigger:strategy: + +trigger_job: + trigger: + include: path/to/child-pipeline.yml + strategy: depend + + +In this example, jobs from subsequent stages wait for the triggered pipeline to +successfully complete before starting. + +Additional details: + + + +Optional manual jobs in the downstream pipeline +do not affect the status of the downstream pipeline or the upstream trigger job. +The downstream pipeline can complete successfully without running any optional manual jobs. + +Blocking manual jobs in the downstream pipeline +must run before the trigger job is marked as successful or failed. The trigger job +shows pending ( ) if the downstream pipeline status is +waiting for manual action ( ) due to manual jobs. By default, +jobs in later stages do not start until the trigger job completes. + If the downstream pipeline has a failed job, but the job uses allow_failure: true, +the downstream pipeline is considered successful and the trigger job shows success. + + + +trigger:forward + + + +History + + + + + +Introduced in GitLab 14.9 with a flag named ci_trigger_forward_variables. Disabled by default. + +Enabled on GitLab.com and self-managed in GitLab 14.10. + +Generally available in GitLab 15.1. Feature flag ci_trigger_forward_variables removed. + + + + + + +Use trigger:forward to specify what to forward to the downstream pipeline. You can control +what is forwarded to both parent-child pipelines +and multi-project pipelines. + +Possible inputs: + + + +yaml_variables: true (default), or false. When true, variables defined +in the trigger job are passed to downstream pipelines. + +pipeline_variables: true or false (default). When true, manual pipeline variables and scheduled pipeline variables +are passed to downstream pipelines. + + +Example of trigger:forward: + +Run this pipeline manually, with +the CI/CD variable MYVAR = my value: + +variables: # default variables for each job + VAR: value + +# Default behavior: +# - VAR is passed to the child +# - MYVAR is not passed to the child +child1: + trigger: + include: .child-pipeline.yml + +# Forward pipeline variables: +# - VAR is passed to the child +# - MYVAR is passed to the child +child2: + trigger: + include: .child-pipeline.yml + forward: + pipeline_variables: true + +# Do not forward YAML variables: +# - VAR is not passed to the child +# - MYVAR is not passed to the child +child3: + trigger: + include: .child-pipeline.yml + forward: + yaml_variables: false + + +Additional details: + + + CI/CD variables forwarded to downstream pipelines with trigger:forward have the +highest precedence. If a variable +with the same name is defined in the downstream pipeline, that variable is overwritten +by the forwarded variable. + + + +variables + + +Use variables to define CI/CD variables for jobs. + +Keyword type: Global and job keyword. You can use it at the global level, +and also at the job level. + +If you define variables as a global keyword, it behaves like default variables +for all jobs. Each variable is copied to every job configuration when the pipeline is created. +If the job already has that variable defined, the job-level variable takes precedence. + +Variables defined at the global-level cannot be used as inputs for other global keywords +like include. These variables can only +be used at the job-level, in script, before_script, and after_script sections, +as well as inputs in some job keywords like rules. + +Possible inputs: Variable name and value pairs: + + + The name can use only numbers, letters, and underscores (_). In some shells, +the first character must be a letter. + The value must be a string. + + +CI/CD variables are supported. + +Examples of variables: + +variables: + DEPLOY_SITE: ""https://example.com/"" + +deploy_job: + stage: deploy + script: + - deploy-script --url $DEPLOY_SITE --path ""/"" + environment: production + +deploy_review_job: + stage: deploy + variables: + REVIEW_PATH: ""/review"" + script: + - deploy-review-script --url $DEPLOY_SITE --path $REVIEW_PATH + environment: production + + +Additional details: + + + All YAML-defined variables are also set to any linked Docker service containers. + YAML-defined variables are meant for non-sensitive project configuration. Store sensitive information +in protected variables or CI/CD secrets. + +Manual pipeline variables +and scheduled pipeline variables +are not passed to downstream pipelines by default. Use trigger:forward +to forward these variables to downstream pipelines. + + +Related topics: + + + +Predefined variables are variables the runner +automatically creates and makes available in the job. + You can configure runner behavior with variables. + + + +variables:description + + + +History + + + + + +Introduced in GitLab 13.7. + + + + + + +Use the description keyword to define a description for a pipeline-level (global) variable. +The description displays with the prefilled variable name when running a pipeline manually. + +Keyword type: Global keyword. You cannot use it for job-level variables. + +Possible inputs: + + + A string. + + +Example of variables:description: + +variables: + DEPLOY_NOTE: + description: ""The deployment note. Explain the reason for this deployment."" + + +Additional details: + + + When used without value, the variable exists in pipelines that were not triggered manually, +and the default value is an empty string (''). + + + +variables:value + + + +History + + + + + +Introduced in GitLab 13.7. + + + + + + +Use the value keyword to define a pipeline-level (global) variable’s value. When used with +variables: description, the variable value is prefilled when running a pipeline manually. + +Keyword type: Global keyword. You cannot use it for job-level variables. + +Possible inputs: + + + A string. + + +Example of variables:value: + +variables: + DEPLOY_ENVIRONMENT: + value: ""staging"" + description: ""The deployment target. Change this variable to 'canary' or 'production' if needed."" + + +Additional details: + + + If used without variables: description, the behavior is +the same as variables. + + + +variables:options + + + +History + + + + + +Introduced in GitLab 15.7. + + + + + + +Use variables:options to define an array of values that are selectable in the UI when running a pipeline manually. + +Must be used with variables: value, and the string defined for value: + + + Must also be one of the strings in the options array. + Is the default selection. + + +If there is no description, +this keyword has no effect. + +Keyword type: Global keyword. You cannot use it for job-level variables. + +Possible inputs: + + + An array of strings. + + +Example of variables:options: + +variables: + DEPLOY_ENVIRONMENT: + value: ""staging"" + options: + - ""production"" + - ""staging"" + - ""canary"" + description: ""The deployment target. Set to 'staging' by default."" + + + +variables:expand + + + +History + + + + + +Introduced in GitLab 15.6 with a flag named ci_raw_variables_in_yaml_config. Disabled by default. + +Enabled on GitLab.com in GitLab 15.6. + +Enabled on self-managed in GitLab 15.7. + +Generally available in GitLab 15.8. Feature flag ci_raw_variables_in_yaml_config removed. + + + + + + +Use the expand keyword to configure a variable to be expandable or not. + +Keyword type: Global and job keyword. You can use it at the global level, and also at the job level. + +Possible inputs: + + + +true (default): The variable is expandable. + +false: The variable is not expandable. + + +Example of variables:expand: + +variables: + VAR1: value1 + VAR2: value2 $VAR1 + VAR3: + value: value3 $VAR1 + expand: false + + + + The result of VAR2 is value2 value1. + The result of VAR3 is value3 $VAR1. + + +Additional details: + + + The expand keyword can only be used with the global and job-level variables keywords. +You can’t use it with rules:variables or workflow:rules:variables. + + + +when + + +Use when to configure the conditions for when jobs run. If not defined in a job, +the default value is when: on_success. + +Keyword type: Job keyword. You can use it as part of a job. when: always and when: never can also be used in workflow:rules. + +Possible inputs: + + + +on_success (default): Run the job only when no jobs in earlier stages fail +or have allow_failure: true. + +on_failure: Run the job only when at least one job in an earlier stage fails. A job in an earlier stage +with allow_failure: true is always considered successful. + +never: Don’t run the job regardless of the status of jobs in earlier stages. +Can only be used in a rules section or workflow: rules. + +always: Run the job regardless of the status of jobs in earlier stages. Can also be used in workflow:rules. + +manual: Run the job only when triggered manually. + +delayed: Delay the execution of a job +for a specified duration. + + +Example of when: + +stages: + - build + - cleanup_build + - test + - deploy + - cleanup + +build_job: + stage: build + script: + - make build + +cleanup_build_job: + stage: cleanup_build + script: + - cleanup build when failed + when: on_failure + +test_job: + stage: test + script: + - make test + +deploy_job: + stage: deploy + script: + - make deploy + when: manual + environment: production + +cleanup_job: + stage: cleanup + script: + - cleanup after jobs + when: always + + +In this example, the script: + + + Executes cleanup_build_job only when build_job fails. + Always executes cleanup_job as the last step in pipeline regardless of +success or failure. + Executes deploy_job when you run it manually in the GitLab UI. + + +Additional details: + + + In GitLab 13.5 and later, you +can use when:manual in the same job as trigger. In GitLab 13.4 and +earlier, using them together causes the error jobs:#{job-name} when should be on_success, on_failure or always. + The default behavior of allow_failure changes to true with when: manual. +However, if you use when: manual with rules, allow_failure defaults +to false. + + +Related topics: + + + +when can be used with rules for more dynamic job control. + +when can be used with workflow to control when a pipeline can start. + + +Deprecated keywords + + +The following keywords are deprecated. + + + note These keywords are still usable to ensure backwards compatibility, +but could be scheduled for removal in a future major milestone. + + +Globally-defined image, services, cache, before_script, after_script + + +Defining image, services, cache, before_script, and after_script globally is deprecated. +Using these keywords at the top level is still possible to ensure backwards compatibility, +but could be scheduled for removal in a future milestone. + +Use default instead. For example: + +default: + image: ruby:3.0 + services: + - docker:dind + cache: + paths: [vendor/] + before_script: + - bundle config set path vendor/bundle + - bundle install + after_script: + - rm -rf tmp/ + + + +only / except + + + + note +only and except are deprecated and not being actively developed. These keywords +are still usable to ensure backwards compatibility, but could be scheduled for removal +in a future milestone. To control when to add jobs to pipelines, use rules instead. + + +You can use only and except to control when to add jobs to pipelines. + + + Use only to define when a job runs. + Use except to define when a job does not run. + + +See specify when jobs run with only and except +for more details and examples. + + +only:refs / except:refs + + + + note +only:refs and except:refs are deprecated and not being actively developed. These keywords +are still usable to ensure backwards compatibility, but could be scheduled for removal +in a future milestone. To use refs, regular expressions, or variables to control +when to add jobs to pipelines, use rules:if instead. + + +You can use the only:refs and except:refs keywords to control when to add jobs to a +pipeline based on branch names or pipeline types. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: An array including any number of: + + + Branch names, for example main or my-feature-branch. + +Regular expressions +that match against branch names, for example /^feature-.*/. + + The following keywords: + + + + + Value + Description + + + + + api + For pipelines triggered by the pipelines API. + + + branches + When the Git reference for a pipeline is a branch. + + + chat + For pipelines created by using a GitLab ChatOps command. + + + external + When you use CI services other than GitLab. + + + external_pull_requests + When an external pull request on GitHub is created or updated (See Pipelines for external pull requests). + + + merge_requests + For pipelines created when a merge request is created or updated. Enables merge request pipelines, merged results pipelines, and merge trains. + + + pipelines + For multi-project pipelines created by using the API with CI_JOB_TOKEN, or the trigger keyword. + + + pushes + For pipelines triggered by a git push event, including for branches and tags. + + + schedules + For scheduled pipelines. + + + tags + When the Git reference for a pipeline is a tag. + + + triggers + For pipelines created by using a trigger token. + + + web + For pipelines created by selecting Run pipeline in the GitLab UI, from the project’s Build > Pipelines section. + + + + + + +Example of only:refs and except:refs: + +job1: + script: echo + only: + - main + - /^issue-.*$/ + - merge_requests + +job2: + script: echo + except: + - main + - /^stable-branch.*$/ + - schedules + + +Additional details: + + + Scheduled pipelines run on specific branches, so jobs configured with only: branches +run on scheduled pipelines too. Add except: schedules to prevent jobs with only: branches +from running on scheduled pipelines. + + only or except used without any other keywords are equivalent to only: refs +or except: refs. For example, the following two jobs configurations have the same +behavior: + + +job1: + script: echo + only: + - branches + +job2: + script: echo + only: + refs: + - branches + + + + If a job does not use only, except, or rules, then only is set to branches +and tags by default. + + For example, job1 and job2 are equivalent: + + +job1: + script: echo ""test"" + +job2: + script: echo ""test"" + only: + - branches + - tags + + + + + +only:variables / except:variables + + + + note +only:variables and except:variables are deprecated and not being actively developed. +These keywords are still usable to ensure backwards compatibility, but could be scheduled +for removal in a future milestone. To use refs, regular expressions, or variables +to control when to add jobs to pipelines, use rules:if instead. + + +You can use the only:variables or except:variables keywords to control when to add jobs +to a pipeline, based on the status of CI/CD variables. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + An array of CI/CD variable expressions. + + +Example of only:variables: + +deploy: + script: cap staging deploy + only: + variables: + - $RELEASE == ""staging"" + - $STAGING + + +Related topics: + + + +only:variables and except:variables examples. + + + +only:changes / except:changes + + +only:variables and except:variables + + + note +only:changes and except:changes are deprecated and not being actively developed. +These keywords are still usable to ensure backwards compatibility, but could be scheduled +for removal in a future milestone. To use changed files to control when to add a job to a pipeline, +use rules:changes instead. + + +Use the changes keyword with only to run a job, or with except to skip a job, +when a Git push event modifies a file. + +Use changes in pipelines with the following refs: + + + branches + external_pull_requests + +merge_requests (see additional details about using only:changes with merge request pipelines) + + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: An array including any number of: + + + Paths to files. + Wildcard paths for: + + Single directories, for example path/to/directory/*. + A directory and all its subdirectories, for example path/to/directory/**/*. + + + Wildcard glob paths for all files +with the same extension or multiple extensions, for example *.md or path/to/directory/*.{rb,py,sh}. + Wildcard paths to files in the root directory, or all directories, wrapped in double quotes. +For example ""*.json"" or ""**/*.json"". + + +Example of only:changes: + +docker build: + script: docker build -t my-image:$CI_COMMIT_REF_SLUG . + only: + refs: + - branches + changes: + - Dockerfile + - docker/scripts/* + - dockerfiles/**/* + - more_scripts/*.{rb,py,sh} + - ""**/*.json"" + + +Additional details: + + + +changes resolves to true if any of the matching files are changed (an OR operation). + Glob patterns are interpreted with Ruby’s File.fnmatch +with the flags +File::FNM_PATHNAME | File::FNM_DOTMATCH | File::FNM_EXTGLOB. + If you use refs other than branches, external_pull_requests, or merge_requests, +changes can’t determine if a given file is new or old and always returns true. + If you use only: changes with other refs, jobs ignore the changes and always run. + If you use except: changes with other refs, jobs ignore the changes and never run. + + +Related topics: + + + +only: changes and except: changes examples. + If you use changes with only allow merge requests to be merged if the pipeline succeeds, +you should also use only:merge_requests. + +Jobs or pipelines can run unexpectedly when using only: changes. + + + +only:kubernetes / except:kubernetes + + + + note +only:kubernetes and except:kubernetes are deprecated and not being actively developed. +These keywords are still usable to ensure backwards compatibility, but could be scheduled +for removal in a future milestone. To control if jobs are added to the pipeline when +the Kubernetes service is active in the project, use rules:if with the +CI_KUBERNETES_ACTIVE predefined CI/CD variable instead. + + +Use only:kubernetes or except:kubernetes to control if jobs are added to the pipeline +when the Kubernetes service is active in the project. + +Keyword type: Job-specific. You can use it only as part of a job. + +Possible inputs: + + + The kubernetes strategy accepts only the active keyword. + + +Example of only:kubernetes: + +deploy: + only: + kubernetes: active + + +In this example, the deploy job runs only when the Kubernetes service is active +in the project. + + +2. CI/CD pipelines + + + +CI/CD pipelines + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + + note Watch the +“Mastering continuous software development” +webcast to see a comprehensive demo of a GitLab CI/CD pipeline. + + +Pipelines are the top-level component of continuous integration, delivery, and deployment. + +Pipelines comprise: + + + Jobs, which define what to do. For example, jobs that compile or test code. + Stages, which define when to run the jobs. For example, stages that run tests after stages that compile the code. + + +Jobs are executed by runners. Multiple jobs in the same stage are executed in parallel, +if there are enough concurrent runners. + +If all jobs in a stage succeed, the pipeline moves on to the next stage. + +If any job in a stage fails, the next stage is not (usually) executed and the pipeline ends early. + +In general, pipelines are executed automatically and require no intervention once created. However, there are +also times when you can manually interact with a pipeline. + +A typical pipeline might consist of four stages, executed in the following order: + + + A build stage, with a job called compile. + A test stage, with two jobs called test1 and test2. + A staging stage, with a job called deploy-to-stage. + A production stage, with a job called deploy-to-prod. + + + + note If you have a mirrored repository that GitLab pulls from, +you may need to enable pipeline triggering in your project’s +Settings > Repository > Mirroring repositories > Trigger pipelines for mirror updates. + + +Types of pipelines + + +Pipelines can be configured in many different ways: + + + +Basic pipelines run everything in each stage concurrently, +followed by the next stage. + +Directed Acyclic Graph Pipeline (DAG) pipelines are based on relationships +between jobs and can run more quickly than basic pipelines. + +Merge request pipelines run for merge +requests only (rather than for every commit). + +Merged results pipelines +are merge request pipelines that act as though the changes from the source branch have +already been merged into the target branch. + +Merge trains +use merged results pipelines to queue merges one after the other. + +Parent-child pipelines break down complex pipelines +into one parent pipeline that can trigger multiple child sub-pipelines, which all +run in the same project and with the same SHA. This pipeline architecture is commonly used for mono-repos. + +Multi-project pipelines combine pipelines for different projects together. + + +Configure a pipeline + + +Pipelines and their component jobs and stages are defined in the CI/CD pipeline configuration file for each project. + + + +Jobs are the basic configuration component. + Stages are defined by using the stages keyword. + + +For a list of configuration options in the CI pipeline file, see the CI/CD YAML syntax reference. + +You can also configure specific aspects of your pipelines through the GitLab UI. For example: + + + +Pipeline settings for each project. + +Pipeline schedules. + +Custom CI/CD variables. + + +Ref specs for runners + + +When a runner picks a pipeline job, GitLab provides that job’s metadata. This includes the Git refspecs, +which indicate which ref (such as branch or tag) and commit (SHA1) are checked out from your +project repository. + +This table lists the refspecs injected for each pipeline type: + + + + + Pipeline type + Refspecs + + + + + pipeline for branches + ++:refs/pipelines/ and +refs/heads/:refs/remotes/origin/ + + + + pipeline for tags + ++:refs/pipelines/ and +refs/tags/:refs/tags/ + + + + merge request pipeline + +refs/pipelines/:refs/pipelines/ + + + + +The refs refs/heads/ and refs/tags/ exist in your +project repository. GitLab generates the special ref refs/pipelines/ during a +running pipeline job. This ref can be created even after the associated branch or tag has been +deleted. It’s therefore useful in some features such as automatically stopping an environment, +and merge trains +that might run pipelines after branch deletion. + +View pipelines + + +You can find the current and historical pipeline runs under your project’s +Build > Pipelines page. You can also access pipelines for a merge request by navigating +to its Pipelines tab. + + + +Select a pipeline to open the Pipeline Details page and show +the jobs that were run for that pipeline. From here you can cancel a running pipeline, +retry jobs on a failed pipeline, or delete a pipeline. + +A link to the latest pipeline for the last commit of a given branch is available at +/project/-/pipelines/[branch]/latest. Also, /project/-/pipelines/latest redirects +you to the latest pipeline for the last commit on the project’s default branch. + +Starting in GitLab 13.0, +you can filter the pipeline list by: + + + Trigger author + Branch name + Status (GitLab 13.1 and later) + Tag (GitLab 13.1 and later) + Source (GitLab 14.3 and later) + + +Starting in GitLab 14.2, you can change the +pipeline column to display the pipeline ID or the pipeline IID. + +If you use VS Code to edit your GitLab CI/CD configuration, the +GitLab Workflow VS Code extension helps you +validate your configuration +and view your pipeline status. + +Run a pipeline manually + + +Pipelines can be manually executed, with predefined or manually-specified variables. + +You might do this if the results of a pipeline (for example, a code build) are required outside the standard +operation of the pipeline. + +To execute a pipeline manually: + + + On the left sidebar, select Search or go to and find your project. + Select Build > Pipelines. + Select Run pipeline. + In the Run for branch name or tag field, select the branch or tag to run the pipeline for. + Enter any CI/CD variables required for the pipeline to run. +You can set specific variables to have their values prefilled in the form. + Select Run pipeline. + + +The pipeline now executes the jobs as configured. + +Prefill variables in manual pipelines + + + +History + + + + + +Introduced in GitLab 13.7. + + + + + + +You can use the description and value +keywords to define pipeline-level (global) variables +that are prefilled when running a pipeline manually. Use the description to explain +information such as what the variable is used for, and what the acceptable values are. + +Job-level variables cannot be pre-filled. + +In manually-triggered pipelines, the Run pipeline page displays all pipeline-level variables +that have a description defined in the .gitlab-ci.yml file. The description displays +below the variable. + +You can change the prefilled value, which overrides the value for that single pipeline run. +Any variables overridden by using this process are expanded +and not masked. +If you do not define a value for the variable in the configuration file, the variable name is still listed, +but the value field is blank. + +For example: + +variables: + DEPLOY_CREDENTIALS: + description: ""The deployment credentials."" + DEPLOY_ENVIRONMENT: + description: ""Select the deployment target. Valid options are: 'canary', 'staging', 'production', or a stable branch of your choice."" + value: ""canary"" + + +In this example: + + + +DEPLOY_CREDENTIALS is listed in the Run pipeline page, but with no value set. +The user is expected to define the value each time the pipeline is run manually. + +DEPLOY_ENVIRONMENT is pre-filled in the Run pipeline page with canary as the default value, +and the message explains the other options. + + + + note Because of a known issue, projects that use compliance pipelines can have prefilled variables not appear +when running a pipeline manually. To workaround this issue, +change the compliance pipeline configuration. + + +Configure a list of selectable prefilled variable values + + + +History + + + + + +Introduced in GitLab 15.5 with a flag named run_pipeline_graphql. Disabled by default. + The options keyword was introduced in GitLab 15.7. + +Generally available in GitLab 15.7. Feature flag run_pipeline_graphql removed. + The variables list sometimes did not populate correctly due to a bug, which was resolved in GitLab 15.9. + + + + + + +You can define an array of CI/CD variable values the user can select from when running a pipeline manually. +These values are in a dropdown list in the Run pipeline page. Add the list of +value options to options and set the default value with value. The string in value +must also be included in the options list. + +For example: + +variables: + DEPLOY_ENVIRONMENT: + value: ""staging"" + options: + - ""production"" + - ""staging"" + - ""canary"" + description: ""The deployment target. Set to 'staging' by default."" + + +Run a pipeline by using a URL query string + + + +History + + + + + +Introduced in GitLab 12.5. + + + + + + +You can use a query string to pre-populate the Run Pipeline page. For example, the query string +.../pipelines/new?ref=my_branch&var[foo]=bar&file_var[file_foo]=file_bar pre-populates the +Run Pipeline page with: + + + +Run for field: my_branch. + +Variables section: + + Variable: + + Key: foo + + Value: bar + + + + File: + + Key: file_foo + + Value: file_bar + + + + + + + +The format of the pipelines/new URL is: + +.../pipelines/new?ref=&var[]=&file_var[]= + + +The following parameters are supported: + + + +ref: specify the branch to populate the Run for field with. + +var: specify a Variable variable. + +file_var: specify a File variable. + + +For each var or file_var, a key and value are required. + +Add manual interaction to your pipeline + + +Manual jobs, +allow you to require manual interaction before moving forward in the pipeline. + +You can do this straight from the pipeline graph. Just select the play button +to execute that particular job. + +For example, your pipeline can start automatically, but require a manual action to +deploy to production. +In the example below, the production stage has a job with a manual action: + + + +Start all manual jobs in a stage + + +If a stage contains only manual jobs, you can start all the jobs at the same time +by selecting Play all manual ( ) above the stage. If the stage contains +non-manual jobs, the option is not displayed. + +Skip a pipeline + + +To push a commit without triggering a pipeline, add [ci skip] or [skip ci], using any +capitalization, to your commit message. + +Alternatively, if you are using Git 2.10 or later, use the ci.skip Git push option. +The ci.skip push option does not skip merge request +pipelines. + +Delete a pipeline + + + +History + + + + + +Introduced in GitLab 12.7. + + + + + + +Users with the Owner role for a project can delete a pipeline +by selecting the pipeline in the Build > Pipelines to get to the Pipeline Details +page, then selecting Delete. + + + +Deleting a pipeline does not automatically delete its +child pipelines. +See the related issue +for details. + + + caution Deleting a pipeline expires all pipeline caches, and deletes all immediately +related objects, such as builds, logs, artifacts, and triggers. +This action cannot be undone. + + + +Pipeline security on protected branches + + +A strict security model is enforced when pipelines are executed on +protected branches. + +The following actions are allowed on protected branches if the user is +allowed to merge or push +to that specific branch: + + + Run manual pipelines (using the Web UI or pipelines API). + Run scheduled pipelines. + Run pipelines using triggers. + Run on-demand DAST scan. + Trigger manual actions on existing pipelines. + Retry or cancel existing jobs (using the Web UI or pipelines API). + + +Variables marked as protected are accessible to jobs that run in pipelines for protected branches. Only assign users the right to merge to protected branches if they have permission to access sensitive information like deployment credentials and tokens. + +Runners marked as protected can run jobs only on protected +branches, preventing untrusted code from executing on the protected runner and +preserving deployment keys and other credentials from being unintentionally +accessed. To ensure that jobs intended to be executed on protected +runners do not use regular runners, they must be tagged accordingly. + +Review the deployment safety +page for additional security recommendations for securing your pipelines. + +Trigger a pipeline when an upstream project is rebuilt + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 12.8. + + + + + + +You can trigger a pipeline in your project whenever a pipeline finishes for a new +tag in a different project. + +Prerequisites: + + + The upstream project must be public. + The user must have the Developer role +in the upstream project. + + +To trigger the pipeline when the upstream project is rebuilt: + + + On the left sidebar, select Search or go to and find your project. + Select Settings > CI/CD. + Expand Pipeline subscriptions. + Select Add project. + Enter the project you want to subscribe to, in the format /. +For example, if the project is https://gitlab.com/gitlab-org/gitlab, use gitlab-org/gitlab. + Select Subscribe. + + +Any pipelines that complete successfully for new tags in the subscribed project +now trigger a pipeline on the current project’s default branch. The maximum +number of upstream pipeline subscriptions is 2 by default, for both the upstream and +downstream projects. On self-managed instances, an administrator can change this +limit. + +How pipeline duration is calculated + + +The total running time for a given pipeline excludes: + + + The duration of the initial run for any job that is retried or manually re-run. + Any pending (queue) time. + + +That means that if a job is retried or manually re-run, only the duration of the latest run is included in the total running time. + +Each job is represented as a Period, which consists of: + + + +Period#first (when the job started). + +Period#last (when the job finished). + + +A simple example is: + + + A (0, 2) + A’ (2, 4) + + This is retrying A + + + B (1, 3) + C (6, 7) + + +In the example: + + + A begins at 0 and ends at 2. + A’ begins at 2 and ends at 4. + B begins at 1 and ends at 3. + C begins at 6 and ends at 7. + + +Visually, it can be viewed as: + +0 1 2 3 4 5 6 7 +AAAAAAA + BBBBBBB + A'A'A'A + CCCC + + +Because A is retried, we ignore it and count only job A’. +The union of B, A’, and C is (1, 4) and (6, 7). Therefore, the total +running time is: + +(4 - 1) + (7 - 6) => 4 + + +Visualize pipelines + + +Pipelines can be complex structures with many sequential and parallel jobs. + +To make it easier to understand the flow of a pipeline, GitLab has pipeline graphs for viewing pipelines +and their statuses. + +Pipeline graphs can be displayed as a large graph or a miniature representation, depending on the page you +access the graph from. + +GitLab capitalizes the stages’ names in the pipeline graphs. + +View full pipeline graph + + + +History + + + + + Visualization improvements introduced in GitLab 13.11. + + + + + + +The pipeline details page displays the full pipeline graph of +all the jobs in the pipeline. + +You can group the jobs by: + + + + Stage, which arranges jobs in the same stage together in the same column: + + + + + Job dependencies, which arranges +jobs based on their needs dependencies. + + + +Multi-project pipeline graphs help you visualize the entire pipeline, including all cross-project inter-dependencies. + +If a stage contains more than 100 jobs, only the first 100 jobs are listed in the +pipeline graph. The remaining jobs still run as usual. To see the jobs: + + + Select the pipeline, and the jobs are listed on the right side of the pipeline details page. + On the left sidebar, select Build > Jobs. + + +View job dependencies in the pipeline graph + + + +History + + + + + +Introduced in GitLab 13.12. + +Enabled by default in GitLab 14.0. + +Feature flag removed in GitLab 14.2. + + + + + + +To arrange jobs in the pipeline graph based on their needs +dependencies, select Job dependencies in the Group jobs by section. This option +is available for pipelines with 3 or more jobs with needs job dependencies. + +Jobs in the leftmost column run first, and jobs that depend on them are grouped in the next columns. + +For example, test-job1 depends only on jobs in the first column, so it displays +in the second column from the left. deploy-job1 depends on jobs in both the first +and second column and displays in the third column: + + + +To add lines that show the needs relationships between jobs, select the Show dependencies toggle. +These lines are similar to the needs visualization: + + + +To see the full needs dependency tree for a job, hover over it: + + + +Pipeline mini graphs + + +Pipeline mini graphs take less space and can tell you at a +quick glance if all jobs passed or something failed. The pipeline mini graph can +be found when you go to: + + + The pipelines index page. + A single commit page. + A merge request page. + The pipeline editor, in GitLab 14.5 and later. + + +Pipeline mini graphs allow you to see all related jobs for a single commit and the net result +of each stage of your pipeline. This allows you to quickly see what failed and +fix it. + +Pipeline mini graphs only display jobs by stage. + +Stages in pipeline mini graphs are expandable. Hover your mouse over each stage to see the name and status, and select a stage to expand its jobs list. + + + + + Mini graph + Mini graph expanded + + + + + + + + + + +Pipeline success and duration charts + + +Pipeline analytics are available on the CI/CD Analytics page. + +Pipeline badges + + +Pipeline status and test coverage report badges are available and configurable for each project. +For information on adding pipeline badges to projects, see Pipeline badges. + +Pipelines API + + +GitLab provides API endpoints to: + + + Perform basic functions. For more information, see Pipelines API. + Maintain pipeline schedules. For more information, see Pipeline schedules API. + Trigger pipeline runs. For more information, see: + + +Triggering pipelines through the API. + +Pipeline triggers API. + + + + + +" +can i proxy to maven central using the maven package regsitry?,,"1. Maven packages in the package registry + + + +Maven packages in the package registry + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +Publish Maven artifacts in your project’s package registry. +Then, install the packages whenever you need to use them as a dependency. + +For documentation of the specific API endpoints that the Maven package manager +client uses, see the Maven API documentation. + +Supported clients: + + + +mvn. Learn how to build a Maven package. + +gradle. Learn how to build a Gradle package. + +sbt. + + +Publish to the GitLab package registry + + +Authenticate to the package registry + + +You need a token to publish a package. There are different tokens available depending on what you’re trying to achieve. For more information, review the guidance on tokens. + +Create a token and save it to use later in the process. + +Do not use authentication methods other than the methods documented here. Undocumented authentication methods might be removed in the future. + +Edit the client configuration + + +Update your configuration to authenticate to the Maven repository with HTTP. + +Custom HTTP header + + +You must add the authentication details to the configuration file +for your client. + +mvngradle + + + Token type + Name must be + Token + + + + + Personal access token + Private-Token + Paste token as-is, or define an environment variable to hold the token + + + Deploy token + Deploy-Token + Paste token as-is, or define an environment variable to hold the token + + + CI Job token + Job-Token + ${CI_JOB_TOKEN} + + + + note The field must be named to match the token you chose. +Add the following section to your +settings.xml file. + + + gitlab-maven + + + + REPLACE_WITH_NAME + REPLACE_WITH_TOKEN + + + + + + + + + + Token type + Name must be + Token + + + + + Personal access token + Private-Token + Paste token as-is, or define an environment variable to hold the token + + + Deploy token + Deploy-Token + Paste token as-is, or define an environment variable to hold the token + + + CI Job token + Job-Token + System.getenv(""CI_JOB_TOKEN"") + + + + note The field must be named to match the token you chose. +In your GRADLE_USER_HOME directory, +create a file gradle.properties with the following content:gitLabPrivateToken=REPLACE_WITH_YOUR_TOKEN +Add a repositories section to your +build.gradle +file: + + In Groovy DSL: + + +repositories { + maven { + url ""https://gitlab.example.com/api/v4/groups//-/packages/maven"" + name ""GitLab"" + credentials(HttpHeaderCredentials) { + name = 'REPLACE_WITH_NAME' + value = gitLabPrivateToken + } + authentication { + header(HttpHeaderAuthentication) + } + } +} + + + + In Kotlin DSL: + + +repositories { + maven { + url = uri(""https://gitlab.example.com/api/v4/groups//-/packages/maven"") + name = ""GitLab"" + credentials(HttpHeaderCredentials::class) { + name = ""REPLACE_WITH_NAME"" + value = findProperty(""gitLabPrivateToken"") as String? + } + authentication { + create(""header"", HttpHeaderAuthentication::class) + } + } +} + + + + +Basic HTTP Authentication + + +You can also use basic HTTP authentication to authenticate to the Maven package registry. + +mvngradlesbt + + + Token type + Name must be + Token + + + + + Personal access token + The username of the user + Paste token as-is, or define an environment variable to hold the token + + + Deploy token + The username of deploy token + Paste token as-is, or define an environment variable to hold the token + + + CI Job token + gitlab-ci-token + ${CI_JOB_TOKEN} + + +Add the following section to your +settings.xml file. + + + gitlab-maven + REPLACE_WITH_NAME + REPLACE_WITH_TOKEN + + + REPLACE_WITH_NAME + REPLACE_WITH_TOKEN + + + + + + + + + Token type + Name must be + Token + + + + + Personal access token + The username of the user + Paste token as-is, or define an environment variable to hold the token + + + Deploy token + The username of deploy token + Paste token as-is, or define an environment variable to hold the token + + + CI Job token + gitlab-ci-token + System.getenv(""CI_JOB_TOKEN"") + + +In your GRADLE_USER_HOME directory, +create a file gradle.properties with the following content:gitLabPrivateToken=REPLACE_WITH_YOUR_TOKEN +Add a repositories section to your +build.gradle. + + In Groovy DSL: + + +repositories { + maven { + url ""https://gitlab.example.com/api/v4/groups//-/packages/maven"" + name ""GitLab"" + credentials(PasswordCredentials) { + username = 'REPLACE_WITH_NAME' + password = gitLabPrivateToken + } + authentication { + basic(BasicAuthentication) + } + } +} + + + + In Kotlin DSL: + + +repositories { + maven { + url = uri(""https://gitlab.example.com/api/v4/groups//-/packages/maven"") + name = ""GitLab"" + credentials(BasicAuthentication::class) { + username = ""REPLACE_WITH_NAME"" + password = findProperty(""gitLabPrivateToken"") as String? + } + authentication { + create(""basic"", BasicAuthentication::class) + } + } +} + + + + + + Token type + Name must be + Token + + + + + Personal access token + The username of the user + Paste token as-is, or define an environment variable to hold the token + + + Deploy token + The username of deploy token + Paste token as-is, or define an environment variable to hold the token + + + CI Job token + gitlab-ci-token + sys.env.get(""CI_JOB_TOKEN"").get + + +Authentication for SBT is based on +basic HTTP Authentication. +You must to provide a name and a password. + note The name field must be named to match the token you chose. +To install a package from the Maven GitLab package registry by using sbt, you must configure +a Maven resolver. +If you’re accessing a private or an internal project or group, you need to set up +credentials. +After configuring the resolver and authentication, you can install a package +from a project, group, or namespace.In your build.sbt, add the following lines:resolvers += (""gitlab"" at """") + +credentials += Credentials(""GitLab Packages Registry"", """", """", """") +In this example: + + is the endpoint URL. +Example: https://gitlab.example.com/api/v4/projects//packages/maven. + + is the host present in the without the protocol +scheme or the port. Example: gitlab.example.com. + + and are explained in the table above. + + +Naming convention + + +You can use one of three endpoints to install a Maven package. You must publish a package to a project, but the endpoint you choose determines the settings you add to your pom.xml file for publishing. + +The three endpoints are: + + + +Project-level: Use when you have a few Maven packages and they are not in the same GitLab group. + +Group-level: Use when you want to install packages from many different projects in the same GitLab group. GitLab does not guarantee the uniqueness of package names within the group. You can have two projects with the same package name and package version. As a result, GitLab serves whichever one is more recent. + +Instance-level: Use when you have many packages in different GitLab groups or in their own namespace. + + +For the instance-level endpoint, ensure the relevant section of your pom.xml in Maven looks like this: + + group-slug.subgroup-slug + project-slug + + +Only packages that have the same path as the project are exposed by the instance-level endpoint. + + + + + Project + Package + Instance-level endpoint available + + + + + foo/bar + foo/bar/1.0-SNAPSHOT + Yes + + + gitlab-org/gitlab + foo/bar/1.0-SNAPSHOT + No + + + gitlab-org/gitlab + gitlab-org/gitlab/1.0-SNAPSHOT + Yes + + + + +Endpoint URLs + + + + + + Endpoint + Endpoint URL for pom.xml + + Additional information + + + + + Project + https://gitlab.example.com/api/v4/projects//packages/maven + Replace gitlab.example.com with your domain name. Replace with your project ID, found on your project overview page. + + + Group + https://gitlab.example.com/api/v4/groups//-/packages/maven + Replace gitlab.example.com with your domain name. Replace with your group ID, found on your group’s homepage. + + + Instance + https://gitlab.example.com/api/v4/packages/maven + Replace gitlab.example.com with your domain name. + + + + +Edit the configuration file for publishing + + +You must add publishing details to the configuration file for your client. + +mvngradleNo matter which endpoint you choose, you must have: + A project-specific URL in the distributionManagement section. + A repository and distributionManagement section. +The relevant repository section of your pom.xml in Maven should look like this: + + gitlab-maven + + + + + + gitlab-maven + https://gitlab.example.com/api/v4/projects//packages/maven + + + gitlab-maven + https://gitlab.example.com/api/v4/projects//packages/maven + + + + The id is what you defined in settings.xml. + The depends on which endpoint you choose. + Replace gitlab.example.com with your domain name. +To publish a package by using Gradle: + + Add the Gradle plugin maven-publish to the plugins section: + + + + In Groovy DSL: + + +plugins { + id 'java' + id 'maven-publish' +} + + + + In Kotlin DSL: + + +plugins { + java + `maven-publish` +} + + + + + + Add a publishing section: + + + + In Groovy DSL: + + +publishing { + publications { + library(MavenPublication) { + from components.java + } + } + repositories { + maven { + url ""https://gitlab.example.com/api/v4/projects//packages/maven"" + credentials(HttpHeaderCredentials) { + name = ""REPLACE_WITH_TOKEN_NAME"" + value = gitLabPrivateToken // the variable resides in $GRADLE_USER_HOME/gradle.properties + } + authentication { + header(HttpHeaderAuthentication) + } + } + } +} + + + + In Kotlin DSL: + + +publishing { + publications { + create(""library"") { + from(components[""java""]) + } + } + repositories { + maven { + url = uri(""https://gitlab.example.com/api/v4/projects//packages/maven"") + credentials(HttpHeaderCredentials::class) { + name = ""REPLACE_WITH_TOKEN_NAME"" + value = + findProperty(""gitLabPrivateToken"") as String? // the variable resides in $GRADLE_USER_HOME/gradle.properties + } + authentication { + create(""header"", HttpHeaderAuthentication::class) + } + } + } +} + + + + + + +Publish a package + + + + caution Using the DeployAtEnd option can cause an upload to be rejected with 400 bad request {""message"":""Validation failed: Name has already been taken""}. For more details, +see issue 424238. + + +After you have set up the authentication +and chosen an endpoint for publishing, +publish a Maven package to your project. + +mvngradlesbtTo publish a package by using Maven:mvn deploy +If the deploy is successful, the build success message should be displayed:... +[INFO] BUILD SUCCESS +... +The message should also show that the package was published to the correct location:Uploading to gitlab-maven: https://example.com/api/v4/projects/PROJECT_ID/packages/maven/com/mycompany/mydepartment/my-project/1.0-SNAPSHOT/my-project-1.0-20200128.120857-1.jar +Run the publish task:gradle publish +Go to your project’s Packages and registries page and view the published packages.Configure the publishTo setting in your build.sbt file:publishTo := Some(""gitlab"" at """") +Ensure the credentials are referenced correctly. See the sbt documentation for more information.To publish a package using sbt:sbt publish +If the deploy is successful, the build success message is displayed:[success] Total time: 1 s, completed Jan 28, 2020 12:08:57 PM +Check the success message to ensure the package was published to the +correct location:[info] published my-project_2.12 to https://gitlab.example.com/api/v4/projects/PROJECT_ID/packages/maven/com/mycompany/my-project_2.12/0.1.1-SNAPSHOT/my-project_2.12-0.1.1-SNAPSHOT.pom + + +Install a package + + +To install a package from the GitLab package registry, you must configure +the remote and authenticate. +When this is completed, you can install a package from a project, +group, or namespace. + +If multiple packages have the same name and version, when you install +a package, the most recently-published package is retrieved. + +In case there are not enough permissions to read the most recently-published +package than 403 Forbidden is returning. + +mvngradlesbtTo install a package by using mvn install: + + Add the dependency manually to your project pom.xml file. +To add the example created earlier, the XML would be: + + + + com.mycompany.mydepartment + my-project + 1.0-SNAPSHOT + + + + + In your project, run the following: + + +mvn install + + +The message should show that the package is downloading from the package registry:Downloading from gitlab-maven: http://gitlab.example.com/api/v4/projects/PROJECT_ID/packages/maven/com/mycompany/mydepartment/my-project/1.0-SNAPSHOT/my-project-1.0-20200128.120857-1.pom +You can also install packages by using the Maven dependency:get command directly. + + In your project directory, run: + + +mvn dependency:get -Dartifact=com.nickkipling.app:nick-test-app:1.1-SNAPSHOT -DremoteRepositories=gitlab-maven:::: -s + + + + + is the URL of the GitLab endpoint. + + is the path to the settings.xml file that contains the authentication details. + + + + note The repository IDs in the command(gitlab-maven) and the settings.xml file must match. +The message should show that the package is downloading from the package registry:Downloading from gitlab-maven: http://gitlab.example.com/api/v4/projects/PROJECT_ID/packages/maven/com/mycompany/mydepartment/my-project/1.0-SNAPSHOT/my-project-1.0-20200128.120857-1.pom +To install a package by using gradle: + + Add a dependency to build.gradle in the dependencies section: + + + + In Groovy DSL: + + +dependencies { + implementation 'com.mycompany.mydepartment:my-project:1.0-SNAPSHOT' +} + + + + In Kotlin DSL: + + +dependencies { + implementation(""com.mycompany.mydepartment:my-project:1.0-SNAPSHOT"") +} + + + + + + In your project, run the following: + + +gradle install + + +To install a package by using sbt: + + Add an inline dependency to build.sbt: + + +libraryDependencies += ""com.mycompany.mydepartment"" % ""my-project"" % ""8.4"" + + + + In your project, run the following: + + +sbt update + + + + +Helpful hints + + +Publishing a package with the same name or version + + +When you publish a package with the same name and version as an existing package, the new package +files are added to the existing package. You can still use the UI or API to access and view the +existing package’s older assets. + +To delete older package versions, consider using the Packages API or the UI. + +Do not allow duplicate Maven packages + + + +History + + + + + Required role changed from Developer to Maintainer in GitLab 15.0. + + + + + + +To prevent users from publishing duplicate Maven packages, you can use the GraphQl API or the UI. + +In the UI: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > Packages and registries. + In the Maven row of the Duplicate packages table, turn off the Allow duplicates toggle. + Optional. In the Exceptions text box, enter a regular expression that matches the names and versions of packages to allow. + + +Your changes are automatically saved. + +Request forwarding to Maven Central + + + + By default this feature is not available for self-managed. To make it available, an administrator can enable the feature flag named maven_central_request_forwarding. +This feature is not available for GitLab.com or GitLab Dedicated users. + + +When a Maven package is not found in the package registry, the request is forwarded +to Maven Central. + +When the feature flag is enabled, administrators can disable this behavior in the +Continuous Integration settings. + +Maven forwarding is restricted to only the project level and +group level endpoints. The instance level endpoint +has naming restrictions that prevent it from being used for packages that don’t follow that convention and also +introduces too much security risk for supply-chain style attacks. + +Additional configuration for mvn + + +When using mvn, there are many ways to configure your Maven project so that it requests packages +in Maven Central from GitLab. Maven repositories are queried in a +specific order. +By default, Maven Central is usually checked first through the +Super POM, so +GitLab needs to be configured to be queried before maven-central. + +To ensure all package requests are sent to GitLab instead of Maven Central, +you can override Maven Central as the central repository by adding a +section to your settings.xml: + + + + + central-proxy + + + + Private-Token + + + + + + + + + central-proxy + GitLab proxy of central repo + https://gitlab.example.com/api/v4/projects//packages/maven + central + + + + + +Create Maven packages with GitLab CI/CD + + +After you have configured your repository to use the Package Repository for Maven, +you can configure GitLab CI/CD to build new packages automatically. + +mvngradleYou can create a new package each time the default branch is updated. + + Create a ci_settings.xml file that serves as Maven’s settings.xml file. + + + Add the server section with the same ID you defined in your pom.xml file. +For example, use gitlab-maven as the ID: + + + + + + gitlab-maven + + + + Job-Token + ${CI_JOB_TOKEN} + + + + + + + + + + Make sure your pom.xml file includes the following. +You can either let Maven use the predefined CI/CD variables, as shown in this example, +or you can hard code your server’s hostname and project’s ID. + + + + + gitlab-maven + ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/maven + + + + + gitlab-maven + ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/maven + + + gitlab-maven + ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/maven + + + + + + Add a deploy job to your .gitlab-ci.yml file: + + +deploy: + image: maven:3.6-jdk-11 + script: + - 'mvn deploy -s ci_settings.xml' + rules: + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + + + + Push those files to your repository. + +The next time the deploy job runs, it copies ci_settings.xml to the +user’s home location. In this example: + The user is root, because the job runs in a Docker container. + Maven uses the configured CI/CD variables. +You can create a package each time the default branch +is updated. + + Authenticate with a CI job token in Gradle. + + + Add a deploy job to your .gitlab-ci.yml file: + + +deploy: + image: gradle:6.5-jdk11 + script: + - 'gradle publish' + rules: + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + + + + Commit files to your repository. + +When the pipeline is successful, the Maven package is created. + +Version validation + + +The version string is validated by using the following regex. + +\A(?!.*\.\.)[\w+.-]+\z + + +You can experiment with the regex and try your version strings on this regular expression editor. + +Useful Maven command-line options + + +There are some Maven command-line options +that you can use when performing tasks with GitLab CI/CD. + + + + File transfer progress can make the CI logs hard to read. +Option -ntp,--no-transfer-progress was added in +3.6.1. +Alternatively, look at -B,--batch-mode +or lower level logging changes. + + + Specify where to find the pom.xml file (-f,--file): + + +package: + script: + - 'mvn --no-transfer-progress -f helloworld/pom.xml package' + + + + Specify where to find the user settings (-s,--settings) instead of +the default location. There’s also a -gs,--global-settings option: + + +package: + script: + - 'mvn -s settings/ci.xml package' + + + + +Supported CLI commands + + +The GitLab Maven repository supports the following CLI commands: + +mvngradle + +mvn deploy: Publish your package to the package registry. + +mvn install: Install packages specified in your Maven project. + +mvn dependency:get: Install a specific package. + + +gradle publish: Publish your package to the package registry. + +gradle install: Install packages specified in your Gradle project. + + +Troubleshooting + + +To improve performance, clients cache files related to a package. If you encounter issues, clear +the cache with these commands: + +mvngradlerm -rf ~/.m2/repository +rm -rf ~/.gradle/caches # Or replace ~/.gradle with your custom GRADLE_USER_HOME + + +Review network trace logs + + +If you are having issues with the Maven Repository, you may want to review network trace logs. + +For example, try to run mvn deploy locally with a PAT token and use these options: + +mvn deploy \ +-Dorg.slf4j.simpleLogger.log.org.apache.maven.wagon.providers.http.httpclient=trace \ +-Dorg.slf4j.simpleLogger.log.org.apache.maven.wagon.providers.http.httpclient.wire=trace + + + + caution When you set these options, all network requests are logged and a large amount of output is generated. + + +Verify your Maven settings + + +If you encounter issues within CI/CD that relate to the settings.xml file, try adding +an additional script task or job to verify the effective settings. + +The help plugin can also provide +system properties, including environment variables: + +mvn-settings: + script: + - 'mvn help:effective-settings' + +package: + script: + - 'mvn help:system' + - 'mvn package' + + + +2. Dependency proxy for packages + + + +Dependency proxy for packages + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 16.6 with a flag named packages_dependency_proxy_maven. Disabled by default. + +Generally available in GitLab 16.8. Feature flag packages_dependency_proxy_maven removed. + + + + + + +The GitLab dependency proxy for packages is a local proxy for frequently pulled packages. +It is implemented as a pull-through cache that works at the project level. + +Packages are pulled from the upstream package registry and automatically published to the +project’s package registry. Subsequent identical requests are fulfilled with the project’s +package registry. You can use the dependency proxy for packages to reduce unnecessary traffic +to the upstream registry. + +Enable the dependency proxy + + +To use the dependency proxy for packages, ensure your project is configured properly, +and that users who pull from the cache have the necessary authentication: + + + In the global configuration, if the following features are disabled, enable them: + + The package feature. Enabled by default. + The dependency_proxy feature. Enabled by default. + + + In the project settings, if the package feature +is disabled, enable it. It is enabled by default. + +Add an authentication method. The dependency proxy supports the same authentication methods as the package registry: + + Personal access token + Project deploy token + Group deploy token + Job token + + + + +Advanced caching + + +When possible, the dependency proxy for packages uses advanced caching to store packages in the project’s package registry. + +Advanced caching verifies the coherence between the project’s package registry +and the upstream package registry. If the upstream registry has updated files, +the dependency proxy uses them to update the cached files. + +When advanced caching is not supported, the dependency proxy falls back to the default behavior: + + + If the requested file is found in the project’s package registry, it is returned. + If the file is not found, it is fetched from the upstream package registry. + + +Advanced caching support depends on how the upstream package registry +responds to dependency proxy requests, and on +which package format you use. + +Maven + + + Package registry + Advanced caching supported? + + + + + GitLab + + Yes + + + Maven Central + + Yes + + + Artifactory + + Yes + + + Sonatype Nexus + + Yes + + + GitHub Packages + + No + + + + +Permissions + + +When the dependency proxy pulls a file, the following occurs: + + + The dependency proxy searches for a file in the project’s package registry. +This is a read operation. + The dependency proxy might publish a package file to the project’s package registry. +This is a write operation. + + +Whether both steps are executed depends on user permissions. +The dependency proxy uses the same permissions as the package registry. + + + + + Project visibility + Minimum role + + Can read package files? + Can write package files? + Behavior + + + + + Public + Anonymous + + No + + No + Request rejected. + + + Public + Guest + + Yes + + No + Package file returned from either the cache or the remote registry. + + + Public + Developer + + Yes + + Yes + Package file returned from either the cache or the remote registry. The file is published to the cache. + + + Internal + Anonymous + + No + + No + Request rejected + + + Internal + Guest + + Yes + + No + Package file returned from either the cache or the remote registry. + + + Internal + Developer + + Yes + + Yes + Package file returned from either the cache or the remote registry. The file is published to the cache. + + + Private + Anonymous + + No + + No + Request rejected + + + Private + Reporter + + Yes + + No + Package file returned from either the cache or the remote registry. + + + Internal + Developer + + Yes + + Yes + Package file returned from either the cache or the remote registry. The file is published to the cache. + + + + +At a minimum, any user who can use the dependency proxy can also use the project’s package registry. + +To ensure the cache is properly filled over time, you should make sure a user with at least the Developer role pulls packages with the dependency proxy. + +Configure a client + + +Configuring a client for the dependency proxy is similar to configuring a client for the package registry. + +For Maven packages + + +For Maven packages, all clients supported by the package registry are supported by the dependency proxy: + + + mvn + gradle + sbt + + +For authentication, you can use all methods accepted by the Maven package registry. +You should use the Basic HTTP authentication method as it is less complex. + +To configure the client: + + + + Follow the instructions in Basic HTTP authentication. + + Make sure you use the endpoint URL https://gitlab.example.com/api/v4/projects//dependency_proxy/packages/maven. + + + Complete the configuration for your client: + + + +mvngradlesbtBasic HTTP authentication is accepted. +However, you should use the custom HTTP header authentication, +so that mvn uses fewer network requests.In the pom.xml file add a repository element: + + gitlab-maven + https://gitlab.example.com/api/v4/projects//dependency_proxy/packages/maven + + +Where: + + is the ID of the project to be used as a dependency proxy. + + contains the name of the used in the authentication configuration. +By default, Maven Central is checked first through the Super POM. +However, you might want to force mvn to check the GitLab endpoint first. To do this, follow the instructions from the request forward.Add a repositories section to your build.gradle file. + + In Groovy DSL: + + +repositories { + maven { + url ""https://gitlab.example.com/api/v4/projects//dependency_proxy/packages/maven"" + name ""GitLab"" + credentials(PasswordCredentials) { + username = 'REPLACE_WITH_NAME' + password = gitLabPrivateToken + } + authentication { + basic(BasicAuthentication) + } + } +} + + + + In Kotlin DSL: + + +repositories { + maven { + url = uri(""https://gitlab.example.com/api/v4/projects//dependency_proxy/packages/maven"") + name = ""GitLab"" + credentials(BasicAuthentication::class) { + username = ""REPLACE_WITH_NAME"" + password = findProperty(""gitLabPrivateToken"") as String? + } + authentication { + create(""basic"", BasicAuthentication::class) + } + } +} + + +In this example: + + is the ID of the project to be used as a dependency proxy. + +REPLACE_WITH_NAME is explained in the Basic HTTP authentication section. +In your build.sbt, add the following lines:resolvers += (""gitlab"" at ""https://gitlab.example.com/api/v4/projects//dependency_proxy/packages/maven"") + +credentials += Credentials(""GitLab Packages Registry"", """", """", """") +In this example: + + is the ID of the project to be used as a dependency proxy. + + is the host present in the without the protocol scheme or the port. Example: gitlab.example.com. + + and are explained in the Basic HTTP authentication section. + + +Configure the remote registry + + +The dependency proxy must be configured with: + + + The URL of the remote package registry. + Optional. The required credentials. + + +To set those parameters: + + + On the left sidebar, select Search or go to and find your project. + Select Settings > Packages and registries. + Under Dependency Proxy, complete the form for your package format: + + +MavenAny Maven package registry can be connected to the dependency proxy. You can +authorize the connection with the Maven package registry username and password.To set or update the remote Maven package registry, update the following fields +in the form: + +URL - The URL of the remote registry. + +Username - Optional. The username to use with the remote registry. + +Password - Optional. The password to use with the remote registry. +You must either set both the username and password, or leave both fields empty. + +Troubleshooting + + +Manual file pull errors + + +You can pull files manually with cURL. +However, you might encounter one of the following responses: + + + +404 Not Found - The dependency proxy setting object was not found because it doesn’t exist, or because the requirements were not fulfilled. + +401 Unauthorized - The user was properly authenticated but did not have the proper permissions to access the dependency proxy object. + +403 Forbidden - There was an issue with the GitLab license level. + +502 Bad Gateway - The remote package registry could not fulfill the file request. Verify the dependency proxy settings. + +504 Gateway Timeout - The remote package registry timed out. Verify the dependency proxy settings. + + +Mavencurl --verbose ""https://:@gitlab.example.com/api/v4/projects//dependency_proxy/packages/maven///"" + + + and are the credentials to access the dependency proxy of the GitLab instance. + + is the project ID. + + are the Maven package group ID and artifact ID joined with a forward slash. + + is the package version. + +file_name is the exact name of the file. +For example, given a package with: + group ID: com.my_company. + artifact ID: my_package. + version: 1.2.3. +The request to manually pull a package is:curl --verbose ""https://:@gitlab.example.com/api/v4/projects//dependency_proxy/packages/maven/com/my_company/my_package/1.2.3/my_package-1.2.3.pom"" + + + +" +how do i set up gitlba pages,,"1. Tutorial: Create a GitLab Pages website from scratch + + + +Tutorial: Create a GitLab Pages website from scratch + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +This tutorial shows you how to create a Pages site from scratch using +the Jekyll Static Site Generator (SSG). You start with +a blank project and create your own CI/CD configuration file, which gives +instructions to a runner. When your CI/CD +pipeline runs, the Pages site is created. + +This example uses Jekyll, but other SSGs follow similar steps. +You do not need to be familiar with Jekyll or SSGs +to complete this tutorial. + +To create a GitLab Pages website: + + + Step 1: Create the project files + Step 2: Choose a Docker image + Step 3: Install Jekyll + Step 4: Specify the public directory for output + Step 5: Specify the public directory for artifacts + Step 6: Deploy and view your website + + +Prerequisites + + +You must have a blank project in GitLab. + +Create the project files + + +Create three files in the root (top-level) directory: + + + + .gitlab-ci.yml: A YAML file that contains the commands you want to run. +For now, leave the file’s contents blank. + + + index.html: An HTML file you can populate with whatever HTML content +you’d like, for example: + + + + + Home + + +

Hello World!

+ + + + + + Gemfile: A file that describes dependencies for Ruby programs. + + Populate it with this content: + + +source ""https://rubygems.org"" + +gem ""jekyll"" + + + + +Choose a Docker image + + +In this example, the runner uses a Docker image +to run scripts and deploy the site. + +This specific Ruby image is maintained on DockerHub. + +Edit your .gitlab-ci.yml file and add this text as the first line: + +image: ruby:3.2 + + +If your SSG needs NodeJS to build, you must specify an +image that contains NodeJS as part of its file system. For example, for a +Hexo site, you can use image: node:12.17.0. + +Install Jekyll + + +To run Jekyll locally, you must install it: + + + Open your terminal. + Install Bundler by running gem install bundler. + Create Gemfile.lock by running bundle install. + Install Jekyll by running bundle exec jekyll build. + + +To run Jekyll in your project, edit the .gitlab-ci.yml file +and add the installation commands: + +script: + - gem install bundler + - bundle install + - bundle exec jekyll build + + +In addition, in the .gitlab-ci.yml file, each script is organized by a job. +A job includes the scripts and settings you want to apply to that specific +task. + +job: + script: + - gem install bundler + - bundle install + - bundle exec jekyll build + + +For GitLab Pages, this job has a specific name, called pages. +This setting tells the runner you want the job to deploy your website +with GitLab Pages: + +pages: + script: + - gem install bundler + - bundle install + - bundle exec jekyll build + + +Specify the public directory for output + + +Jekyll needs to know where to generate its output. +GitLab Pages only considers files in a directory called public. + +Jekyll uses a destination flag (-d) to specify an output directory for the built website. +Add the destination to your .gitlab-ci.yml file: + +pages: + script: + - gem install bundler + - bundle install + - bundle exec jekyll build -d public + + +Specify the public directory for artifacts + + +Now that Jekyll has output the files to the public directory, +the runner needs to know where to get them. The artifacts are stored +in the public directory: + +pages: + script: + - gem install bundler + - bundle install + - bundle exec jekyll build -d public + artifacts: + paths: + - public + + +Your .gitlab-ci.yml file should now look like this: + +image: ruby:3.2 + +pages: + script: + - gem install bundler + - bundle install + - bundle exec jekyll build -d public + artifacts: + paths: + - public + + +Deploy and view your website + + +After you have completed the preceding steps, +deploy your website: + + + Save and commit the .gitlab-ci.yml file. + Go to Build > Pipelines to watch the pipeline. + When the pipeline is finished, go to Deploy > Pages to find the link to +your Pages website. + + +When this pages job completes successfully, a special pages:deploy job +appears in the pipeline view. It prepares the content of the website for the +GitLab Pages daemon. GitLab runs it in the background and doesn’t use a runner. + +Other options for your CI/CD file + + +If you want to do more advanced tasks, you can update your .gitlab-ci.yml file +with other CI/CD YAML keywords. You can validate +your .gitlab-ci.yml file with the CI Lint tool that’s included with GitLab. + +The following topics show other examples of other options you can add to your CI/CD file. + +Deploy specific branches to a Pages site + + +You may want to deploy to a Pages site only from specific branches. + +First, add a workflow section to force the pipeline to run only when changes are +pushed to branches: + +image: ruby:3.2 + +workflow: + rules: + - if: $CI_COMMIT_BRANCH + +pages: + script: + - gem install bundler + - bundle install + - bundle exec jekyll build -d public + artifacts: + paths: + - public + + +Then configure the pipeline to run the job for the +default branch (here, main) only. + +image: ruby:3.2 + +workflow: + rules: + - if: $CI_COMMIT_BRANCH + +pages: + script: + - gem install bundler + - bundle install + - bundle exec jekyll build -d public + artifacts: + paths: + - public + rules: + - if: $CI_COMMIT_BRANCH == ""main"" + + +Specify a stage to deploy + + +There are three default stages for GitLab CI/CD: build, test, +and deploy. + +If you want to test your script and check the built site before deploying +to production, you can run the test exactly as it runs when you +push to your default branch (here, main). + +To specify a stage for your job to run in, +add a stage line to your CI file: + +image: ruby:3.2 + +workflow: + rules: + - if: $CI_COMMIT_BRANCH + +pages: + stage: deploy + script: + - gem install bundler + - bundle install + - bundle exec jekyll build -d public + artifacts: + paths: + - public + rules: + - if: $CI_COMMIT_BRANCH == ""main"" + environment: production + + +Now add another job to the CI file, telling it to +test every push to every branch except the main branch: + +image: ruby:3.2 + +workflow: + rules: + - if: $CI_COMMIT_BRANCH + +pages: + stage: deploy + script: + - gem install bundler + - bundle install + - bundle exec jekyll build -d public + artifacts: + paths: + - public + rules: + - if: $CI_COMMIT_BRANCH == ""main"" + environment: production + +test: + stage: test + script: + - gem install bundler + - bundle install + - bundle exec jekyll build -d test + artifacts: + paths: + - test + rules: + - if: $CI_COMMIT_BRANCH != ""main"" + + +When the test job runs in the test stage, Jekyll +builds the site in a directory called test. The job affects +all branches except main. + +When you apply stages to different jobs, every job in the same +stage builds in parallel. If your web application needs more than +one test before being deployed, you can run all your tests at the +same time. + +Remove duplicate commands + + +To avoid duplicating the same scripts in every job, you can add them +to a before_script section. + +In the example, gem install bundler and bundle install were running +for both jobs, pages and test. + +Move these commands to a before_script section: + +image: ruby:3.2 + +workflow: + rules: + - if: $CI_COMMIT_BRANCH + +before_script: + - gem install bundler + - bundle install + +pages: + stage: deploy + script: + - bundle exec jekyll build -d public + artifacts: + paths: + - public + rules: + - if: $CI_COMMIT_BRANCH == ""main"" + environment: production + +test: + stage: test + script: + - bundle exec jekyll build -d test + artifacts: + paths: + - test + rules: + - if: $CI_COMMIT_BRANCH != ""main"" + + +Build faster with cached dependencies + + +To build faster, you can cache the installation files for your +project’s dependencies by using the cache parameter. + +This example caches Jekyll dependencies in a vendor directory +when you run bundle install: + +image: ruby:3.2 + +workflow: + rules: + - if: $CI_COMMIT_BRANCH + +cache: + paths: + - vendor/ + +before_script: + - gem install bundler + - bundle install --path vendor + +pages: + stage: deploy + script: + - bundle exec jekyll build -d public + artifacts: + paths: + - public + rules: + - if: $CI_COMMIT_BRANCH == ""main"" + environment: production + +test: + stage: test + script: + - bundle exec jekyll build -d test + artifacts: + paths: + - test + rules: + - if: $CI_COMMIT_BRANCH != ""main"" + + +In this case, you need to exclude the /vendor +directory from the list of folders Jekyll builds. Otherwise, Jekyll +tries to build the directory contents along with the site. + +In the root directory, create a file called _config.yml +and add this content: + +exclude: + - vendor + + +Now GitLab CI/CD not only builds the website, but also: + + + Pushes with continuous tests to feature branches. + +Caches dependencies installed with Bundler. + +Continuously deploys every push to the main branch. + + +To view the HTML and other assets that were created for the site, +download the job artifacts. + +Related topics + + + + Deploy your web app to staging and production + Run jobs sequentially, in parallel, or build a custom pipeline + Pull specific directories from different projects + Use GitLab Pages to produce a code coverage report + + + +2. GitLab Pages administration + + + +GitLab Pages administration + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + +GitLab Pages allows for hosting of static sites. It must be configured by an +administrator. Separate user documentation is available. + + + note This guide is for Linux package installations. If you have a self-compiled GitLab installation, see +GitLab Pages administration for self-compiled installations. + + +The GitLab Pages daemon + + +GitLab Pages makes use of the GitLab Pages daemon, a basic HTTP server +written in Go that can listen on an external IP address and provide support for +custom domains and custom certificates. It supports dynamic certificates through +Server Name Indication (SNI) and exposes pages using HTTP2 by default. +You are encouraged to read its README to fully understand how +it works. + +In the case of custom domains (but not +wildcard domains), the Pages daemon needs to listen on +ports 80 and/or 443. For that reason, there is some flexibility in the way +which you can set it up: + + + Run the Pages daemon in the same server as GitLab, listening on a secondary IP. + Run the Pages daemon in a separate server. In that case, the +Pages path must also be present in the server that +the Pages daemon is installed, so you must share it through the network. + Run the Pages daemon in the same server as GitLab, listening on the same IP +but on different ports. In that case, you must proxy the traffic with +a load balancer. If you choose that route, you should use TCP load +balancing for HTTPS. If you use TLS-termination (HTTPS-load balancing), the +pages can’t be served with user-provided certificates. For +HTTP it’s OK to use HTTP or TCP load balancing. + + +In this document, we proceed assuming the first option. If you are not +supporting custom domains a secondary IP is not needed. + +Prerequisites + + +Before proceeding with the Pages configuration, you must: + + + + Have a domain for Pages that is not a subdomain of your GitLab instance domain. + + + + + GitLab domain + Pages domain + Does it work? + + + + + example.com + example.io + + Yes + + + example.com + pages.example.com + + No + + + gitlab.example.com + pages.example.com + + Yes + + + + + Configure a wildcard DNS record. + Optional. Have a wildcard certificate for that domain if you decide to +serve Pages under HTTPS. + Optional but recommended. Enable instance runners +so that your users don’t have to bring their own. + For custom domains, have a secondary IP. + + + + note If your GitLab instance and the Pages daemon are deployed in a private network or behind a firewall, your GitLab Pages websites are only accessible to devices/users that have access to the private network. + + +Add the domain to the Public Suffix List + + +The Public Suffix List is used by browsers to +decide how to treat subdomains. If your GitLab instance allows members of the +public to create GitLab Pages sites, it also allows those users to create +subdomains on the pages domain (example.io). Adding the domain to the Public +Suffix List prevents browsers from accepting +supercookies, +among other things. + +Follow these instructions to submit your +GitLab Pages subdomain. For instance, if your domain is example.io, you should +request that example.io is added to the Public Suffix List. GitLab.com +added gitlab.io in 2016. + +DNS configuration + + +GitLab Pages expect to run on their own virtual host. In your DNS server/provider +add a wildcard DNS A record pointing to the +host that GitLab runs. For example, an entry would look like this: + +*.example.io. 1800 IN A 192.0.2.1 +*.example.io. 1800 IN AAAA 2001:db8::1 + + +Where example.io is the domain GitLab Pages is served from, +192.0.2.1 is the IPv4 address of your GitLab instance, and 2001:db8::1 is the +IPv6 address. If you don’t have IPv6, you can omit the AAAA record. + +For namespace in URL path, without wildcard DNS + + + +Status: Experiment + + +History + + + + + +Introduced in GitLab 16.7. This feature is an Experiment. + + + + + + + + On self-managed GitLab, by default this feature is available. +On GitLab.com and GitLab Dedicated, this feature is not available. +This feature is not ready for production use. + + +Prerequisites: + + + Your instance must use the Linux package installation method. + + +If you need support for namespace in the URL path to remove the requirement for wildcard DNS: + + + Enable the GitLab Pages flag for this feature by adding +gitlab_pages[""namespace_in_path""] = true to /etc/gitlab/gitlab.rb. + + In your DNS provider, add entries for example.com and projects.example.com. +In both lines, replace example.com with your domain name, and 192.0.0.0 with +the IPv4 version of your IP address. The entries look like this: + + +example.com 1800 IN A 192.0.0.0 +projects.example.com 1800 IN A 192.0.0.0 + + + + Optional. If your GitLab instance has an IPv6 address, add entries for it. +In both lines, replace example.com with your domain name, and 2001:db8::1 with +the IPv6 version of your IP address. The entries look like this: + + +example.com 1800 IN AAAA 2001:db8::1 +projects.example.com 1800 IN AAAA 2001:db8::1 + + + + +DNS configuration for custom domains + + +If support for custom domains is needed, all subdomains of the Pages root domain should point to the +secondary IP (which is dedicated for the Pages daemon). Without this configuration, users can’t use +CNAME records to point their custom domains to their GitLab Pages. + +For example, an entry could look like this: + +example.com 1800 IN A 192.0.2.1 +*.example.io. 1800 IN A 192.0.2.2 + + +This example contains the following: + + + +example.com: The GitLab domain. + +example.io: The domain GitLab Pages is served from. + +192.0.2.1: The primary IP of your GitLab instance. + +192.0.2.2: The secondary IP, which is dedicated to GitLab Pages. It must be different than the primary IP. + + + + note You should not use the GitLab domain to serve user pages. For more information see the security section. + + +Configuration + + +Depending on your needs, you can set up GitLab Pages in 4 different ways. + +The following examples are listed from the easiest setup to the most +advanced one. The absolute minimum requirement is to set up the wildcard DNS +because that is needed in all configurations. + +Wildcard domains + + +Requirements: + + + Wildcard DNS setup + + + + +URL scheme: http://.example.io/ + +The following is the minimum setup that you can use Pages with. It is the base for all +other setups as described below. NGINX proxies all requests to the daemon. +The Pages daemon doesn’t listen to the outside world. + + + + Set the external URL for GitLab Pages in /etc/gitlab/gitlab.rb: + + +external_url ""http://example.com"" # external_url here is only for reference +pages_external_url 'http://example.io' # Important: not a subdomain of external_url, so cannot be http://pages.example.com + + + + Reconfigure GitLab. + + + +Watch the video tutorial for this configuration. + +Pages domain without wildcard DNS + + + +Status: Experiment + + +History + + + + + +Introduced in GitLab 16.7. This feature is an Experiment. + + + + + + + + On self-managed GitLab, by default this feature is available. +On GitLab.com and GitLab Dedicated, this feature is not available. +This feature is not ready for production use. + + +This configuration is the minimum setup for GitLab Pages. It is the base for all +other configurations. In this configuration, NGINX proxies all requests to the daemon, +because the GitLab Pages daemon doesn’t listen to the outside world. + +Prerequisites: + + + Your instance must use the Linux package installation method. + You have configured DNS setup +without a wildcard. + + + + + In /etc/gitlab/gitlab.rb, set the external URL for GitLab Pages, and enable +the feature flag: + + +# External_url here is only for reference +external_url ""http://example.com"" +pages_external_url 'http://example.io' + +pages_nginx['enable'] = true + +# Set this flag to enable this feature +gitlab_pages[""namespace_in_path""] = true + + + + Reconfigure GitLab. + + + +NGINX uses the custom proxy header X-Gitlab-Namespace-In-Path +to send the namespace to the GitLab Pages daemon. + +The resulting URL scheme is http://example.io//. + +Wildcard domains with TLS support + + +Requirements: + + + Wildcard DNS setup + TLS certificate. Can be either Wildcard, or any other type meeting the requirements. + + + + +URL scheme: https://.example.io/ + +NGINX proxies all requests to the daemon. Pages daemon doesn’t listen to the +outside world. + + + Place the wildcard TLS certificate for *.example.io and the key inside /etc/gitlab/ssl. + + In /etc/gitlab/gitlab.rb specify the following configuration: + + +external_url ""https://example.com"" # external_url here is only for reference +pages_external_url 'https://example.io' # Important: not a subdomain of external_url, so cannot be https://pages.example.com + +pages_nginx['redirect_http_to_https'] = true + + + + If you haven’t named your certificate and key example.io.crt and example.io.key, +you must also add the full paths as shown below: + + +pages_nginx['ssl_certificate'] = ""/etc/gitlab/ssl/pages-nginx.crt"" +pages_nginx['ssl_certificate_key'] = ""/etc/gitlab/ssl/pages-nginx.key"" + + + +Reconfigure GitLab. + If you’re using Pages Access Control, update the redirect URI in the GitLab Pages +System OAuth application +to use the HTTPS protocol. + + + + caution Multiple wildcards for one instance is not supported. Only one wildcard per instance can be assigned. + + + + caution GitLab Pages does not update the OAuth application if changes are made to the redirect URI. +Before you reconfigure, remove the gitlab_pages section from /etc/gitlab/gitlab-secrets.json, +then run gitlab-ctl reconfigure. For more information, read +GitLab Pages does not regenerate OAuth. + + +Pages domain with TLS support, without wildcard DNS + + + +Status: Experiment + + +History + + + + + +Introduced in GitLab 16.7. This feature is an Experiment. + + + + + + + + On self-managed GitLab, by default this feature is available. +On GitLab.com and GitLab Dedicated, this feature is not available. +This feature is not ready for production use. + + +Prerequisites: + + + Your instance must use the Linux package installation method. + You have configured DNS setup +without a wildcard. + You have a single TLS certificate that covers your domain (like example.com) +and the projects.* version of your domain, like projects.example.com. + + +In this configuration, NGINX proxies all requests to the daemon. The GitLab Pages +daemon doesn’t listen to the outside world: + + + Add your TLS certificate and key as mentioned in the prerequisites into /etc/gitlab/ssl. + + In /etc/gitlab/gitlab.rb, set the external URL for GitLab Pages, and enable +the feature flag: + + +# The external_url field is here only for reference. +external_url ""https://example.com"" +pages_external_url 'https://example.io' + +pages_nginx['enable'] = true +pages_nginx['redirect_http_to_https'] = true + +# Set this flag to enable this feature +gitlab_pages[""namespace_in_path""] = true + + + + If your TLS certificate and key don’t match the name of your domain, like +example.io.crt and example.io.key, +add the full paths for the certificate and key files to /etc/gitlab/gitlab.rb: + + +pages_nginx['ssl_certificate'] = ""/etc/gitlab/ssl/pages-nginx.crt"" +pages_nginx['ssl_certificate_key'] = ""/etc/gitlab/ssl/pages-nginx.key"" + + + + Reconfigure GitLab. + + + caution GitLab Pages does not update the OAuth application if changes are made to the redirect URI. +Before you reconfigure, remove the gitlab_pages section from /etc/gitlab/gitlab-secrets.json, +then run gitlab-ctl reconfigure. For more information, see +GitLab Pages does not regenerate OAuth. + + + If you’re using Pages Access Control, update the redirect URI in the GitLab Pages +System OAuth application +to use the HTTPS protocol. + + +NGINX uses the custom proxy header X-Gitlab-Namespace-In-Path +to send the namespace to the GitLab Pages daemon. + +The resulting URL scheme is https://example.io//. + +Wildcard domains with TLS-terminating Load Balancer + + +Requirements: + + + Wildcard DNS setup + TLS-terminating load balancer + + + + +URL scheme: https://.example.io/ + +This setup is primarily intended to be used when installing a GitLab POC on Amazon Web Services. This includes a TLS-terminating classic load balancer that listens for HTTPS connections, manages TLS certificates, and forwards HTTP traffic to the instance. + + + + In /etc/gitlab/gitlab.rb specify the following configuration: + + +external_url ""https://example.com"" # external_url here is only for reference +pages_external_url 'https://example.io' # Important: not a subdomain of external_url, so cannot be https://pages.example.com + +pages_nginx['enable'] = true +pages_nginx['listen_port'] = 80 +pages_nginx['listen_https'] = false +pages_nginx['redirect_http_to_https'] = true + + + + Reconfigure GitLab. + + + +Global settings + + +Below is a table of all configuration settings known to Pages in a Linux package installation, +and what they do. These options can be adjusted in /etc/gitlab/gitlab.rb, +and take effect after you reconfigure GitLab. +Most of these settings don’t have to be configured manually unless you need more granular +control over how the Pages daemon runs and serves content in your environment. + + + + + Setting + Description + + + + + pages_external_url + The URL where GitLab Pages is accessible, including protocol (HTTP / HTTPS). If https:// is used, additional configuration is required. See Wildcard domains with TLS support and Custom domains with TLS support for details. + + + gitlab_pages[] +   + + + access_control + Whether to enable access control. + + + api_secret_key + Full path to file with secret key used to authenticate with the GitLab API. Auto-generated when left unset. + + + artifacts_server + Enable viewing artifacts in GitLab Pages. + + + artifacts_server_timeout + Timeout (in seconds) for a proxied request to the artifacts server. + + + artifacts_server_url + API URL to proxy artifact requests to. Defaults to GitLab external URL + /api/v4, for example https://gitlab.com/api/v4. When running a separate Pages server, this URL must point to the main GitLab server’s API. + + + auth_redirect_uri + Callback URL for authenticating with GitLab. Defaults to project’s subdomain of pages_external_url + /auth. + + + auth_secret + Secret key for signing authentication requests. Leave blank to pull automatically from GitLab during OAuth registration. + + + dir + Working directory for configuration and secrets files. + + + enable + Enable or disable GitLab Pages on the current system. + + + external_http + Configure Pages to bind to one or more secondary IP addresses, serving HTTP requests. Multiple addresses can be given as an array, along with exact ports, for example ['1.2.3.4', '1.2.3.5:8063']. Sets value for listen_http. + + + external_https + Configure Pages to bind to one or more secondary IP addresses, serving HTTPS requests. Multiple addresses can be given as an array, along with exact ports, for example ['1.2.3.4', '1.2.3.5:8063']. Sets value for listen_https. + + + server_shutdown_timeout + GitLab Pages server shutdown timeout in seconds (default: 30s). + + + gitlab_client_http_timeout + GitLab API HTTP client connection timeout in seconds (default: 10s). + + + gitlab_client_jwt_expiry + JWT Token expiry time in seconds (default: 30s). + + + gitlab_cache_expiry + The maximum time a domain’s configuration is stored in the cache (default: 600s). + + + gitlab_cache_refresh + The interval at which a domain’s configuration is set to be due to refresh (default: 60s). + + + gitlab_cache_cleanup + The interval at which expired items are removed from the cache (default: 60s). + + + gitlab_retrieval_timeout + The maximum time to wait for a response from the GitLab API per request (default: 30s). + + + gitlab_retrieval_interval + The interval to wait before retrying to resolve a domain’s configuration via the GitLab API (default: 1s). + + + gitlab_retrieval_retries + The maximum number of times to retry to resolve a domain’s configuration via the API (default: 3). + + + domain_config_source + This parameter was removed in 14.0, on earlier versions it can be used to enable and test API domain configuration source + + + gitlab_id + The OAuth application public ID. Leave blank to automatically fill when Pages authenticates with GitLab. + + + gitlab_secret + The OAuth application secret. Leave blank to automatically fill when Pages authenticates with GitLab. + + + auth_scope + The OAuth application scope to use for authentication. Must match GitLab Pages OAuth application settings. Leave blank to use api scope by default. + + + auth_timeout + GitLab application client timeout for authentication in seconds (default: 5s). A value of 0 means no timeout. + + + auth_cookie_session_timeout + Authentication cookie session timeout in seconds (default: 10m). A value of 0 means the cookie is deleted after the browser session ends. + + + gitlab_server + Server to use for authentication when access control is enabled; defaults to GitLab external_url. + + + headers + Specify any additional http headers that should be sent to the client with each response. Multiple headers can be given as an array, header and value as one string, for example ['my-header: myvalue', 'my-other-header: my-other-value'] + + + + enable_disk + Allows the GitLab Pages daemon to serve content from disk. Shall be disabled if shared disk storage isn’t available. + + + insecure_ciphers + Use default list of cipher suites, may contain insecure ones like 3DES and RC4. + + + internal_gitlab_server + Internal GitLab server address used exclusively for API requests. Useful if you want to send that traffic over an internal load balancer. Defaults to GitLab external_url. + + + listen_proxy + The addresses to listen on for reverse-proxy requests. Pages binds to these addresses’ network sockets and receives incoming requests from them. Sets the value of proxy_pass in $nginx-dir/conf/gitlab-pages.conf. + + + log_directory + Absolute path to a log directory. + + + log_format + The log output format: text or json. + + + log_verbose + Verbose logging, true/false. + + + namespace_in_path + (Experimental) Enable or disable namespace in the URL path. This requires pages_nginx[enable] = true. Sets rewrite configuration in NGINX to support without wildcard DNS setup. Default: false + + + + propagate_correlation_id + Set to true (false by default) to re-use existing Correlation ID from the incoming request header X-Request-ID if present. If a reverse proxy sets this header, the value is propagated in the request chain. + + + max_connections + Limit on the number of concurrent connections to the HTTP, HTTPS or proxy listeners. + + + max_uri_length + The maximum length of URIs accepted by GitLab Pages. Set to 0 for unlimited length. Introduced in GitLab 14.5. + + + metrics_address + The address to listen on for metrics requests. + + + redirect_http + Redirect pages from HTTP to HTTPS, true/false. + + + redirects_max_config_size + The maximum size of the _redirects file, in bytes (default: 65536). + + + redirects_max_path_segments + The maximum number of path segments allowed in _redirects rules URLs (default: 25). + + + redirects_max_rule_count + The maximum number of rules allowed in _redirects (default: 1000). + + + sentry_dsn + The address for sending Sentry crash reporting to. + + + sentry_enabled + Enable reporting and logging with Sentry, true/false. + + + sentry_environment + The environment for Sentry crash reporting. + + + status_uri + The URL path for a status page, for example, /@status. + + + tls_max_version + Specifies the maximum TLS version (“tls1.2” or “tls1.3”). + + + tls_min_version + Specifies the minimum TLS version (“tls1.2” or “tls1.3”). + + + use_http2 + Enable HTTP2 support. + + + gitlab_pages['env'][] +   + + + http_proxy + Configure GitLab Pages to use an HTTP Proxy to mediate traffic between Pages and GitLab. Sets an environment variable http_proxy when starting Pages daemon. + + + gitlab_rails[] +   + + + pages_domain_verification_cron_worker + Schedule for verifying custom GitLab Pages domains. + + + pages_domain_ssl_renewal_cron_worker + Schedule for obtaining and renewing SSL certificates through Let’s Encrypt for GitLab Pages domains. + + + pages_domain_removal_cron_worker + Schedule for removing unverified custom GitLab Pages domains. + + + pages_path + The directory on disk where pages are stored, defaults to GITLAB-RAILS/shared/pages. + + + pages_nginx[] +   + + + enable + Include a virtual host server{} block for Pages inside NGINX. Needed for NGINX to proxy traffic back to the Pages daemon. Set to false if the Pages daemon should directly receive all requests, for example, when using custom domains. + + + FF_CONFIGURABLE_ROOT_DIR + Feature flag to customize the default folder (enabled by default). + + + FF_ENABLE_PLACEHOLDERS + Feature flag for rewrites (enabled by default). See Rewrites for more information. + + + use_legacy_storage + Temporarily-introduced parameter allowing to use legacy domain configuration source and storage. Removed in 14.3. + + + rate_limit_source_ip + Rate limit per source IP in number of requests per second. Set to 0 to disable this feature. + + + rate_limit_source_ip_burst + Rate limit per source IP maximum burst allowed per second. + + + rate_limit_domain + Rate limit per domain in number of requests per second. Set to 0 to disable this feature. + + + rate_limit_domain_burst + Rate limit per domain maximum burst allowed per second. + + + rate_limit_tls_source_ip + Rate limit per source IP in number of TLS connections per second. Set to 0 to disable this feature. + + + rate_limit_tls_source_ip_burst + Rate limit per source IP maximum TLS connections burst allowed per second. + + + rate_limit_tls_domain + Rate limit per domain in number of TLS connections per second. Set to 0 to disable this feature. + + + rate_limit_tls_domain_burst + Rate limit per domain maximum TLS connections burst allowed per second. + + + server_read_timeout + Maximum duration to read the request headers and body. For no timeout, set to 0 or a negative value. Default: 5s + + + + server_read_header_timeout + Maximum duration to read the request headers. For no timeout, set to 0 or a negative value. Default: 1s + + + + server_write_timeout + Maximum duration to write all files in the response. Larger files require more time. For no timeout, set to 0 or a negative value. Default: 0 + + + + server_keep_alive + The Keep-Alive period for network connections accepted by this listener. If 0, Keep-Alive is enabled if supported by the protocol and operating system. If negative, Keep-Alive is disabled. Default: 15s + + + + + +Advanced configuration + + +In addition to the wildcard domains, you can also have the option to configure +GitLab Pages to work with custom domains. Again, there are two options here: +support custom domains with and without TLS certificates. The easiest setup is +that without TLS certificates. In either case, you need a secondary IP. If +you have IPv6 as well as IPv4 addresses, you can use them both. + +Custom domains + + +Requirements: + + + Wildcard DNS setup + Secondary IP + + + + +URL scheme: http://.example.io/ and http://custom-domain.com + +In that case, the Pages daemon is running, NGINX still proxies requests to +the daemon but the daemon is also able to receive requests from the outside +world. Custom domains are supported, but no TLS. + + + + In /etc/gitlab/gitlab.rb specify the following configuration: + + +external_url ""http://example.com"" # external_url here is only for reference +pages_external_url 'http://example.io' # Important: not a subdomain of external_url, so cannot be http://pages.example.com +nginx['listen_addresses'] = ['192.0.2.1'] # The primary IP of the GitLab instance +pages_nginx['enable'] = false +gitlab_pages['external_http'] = ['192.0.2.2:80', '[2001:db8::2]:80'] # The secondary IPs for the GitLab Pages daemon + + + If you don’t have IPv6, you can omit the IPv6 address. + + + Reconfigure GitLab. + + + +Custom domains with TLS support + + +Requirements: + + + Wildcard DNS setup + TLS certificate. Can be either Wildcard, or any other type meeting the requirements. + Secondary IP + + + + +URL scheme: https://.example.io/ and https://custom-domain.com + +In that case, the Pages daemon is running, NGINX still proxies requests to +the daemon but the daemon is also able to receive requests from the outside +world. Custom domains and TLS are supported. + + + Place the wildcard LTS certificate for *.example.io and the key inside /etc/gitlab/ssl. + + In /etc/gitlab/gitlab.rb specify the following configuration: + + +external_url ""https://example.com"" # external_url here is only for reference +pages_external_url 'https://example.io' # Important: not a subdomain of external_url, so cannot be https://pages.example.com +nginx['listen_addresses'] = ['192.0.2.1'] # The primary IP of the GitLab instance +pages_nginx['enable'] = false +gitlab_pages['external_http'] = ['192.0.2.2:80', '[2001:db8::2]:80'] # The secondary IPs for the GitLab Pages daemon +gitlab_pages['external_https'] = ['192.0.2.2:443', '[2001:db8::2]:443'] # The secondary IPs for the GitLab Pages daemon +# Redirect pages from HTTP to HTTPS +gitlab_pages['redirect_http'] = true + + + If you don’t have IPv6, you can omit the IPv6 address. + + + If you haven’t named your certificate example.io.crt and your key example.io.key, +then you need to also add the full paths as shown below: + + +gitlab_pages['cert'] = ""/etc/gitlab/ssl/example.io.crt"" +gitlab_pages['cert_key'] = ""/etc/gitlab/ssl/example.io.key"" + + + +Reconfigure GitLab. + If you’re using Pages Access Control, update the redirect URI in the GitLab Pages +System OAuth application +to use the HTTPS protocol. + + +Custom domain verification + + +To prevent malicious users from hijacking domains that don’t belong to them, +GitLab supports custom domain verification. +When adding a custom domain, users are required to prove they own it by +adding a GitLab-controlled verification code to the DNS records for that domain. + + + caution Disabling domain verification is unsafe and can lead to various vulnerabilities. +If you do disable it, either ensure that the Pages root domain itself does not point to the +secondary IP or add the root domain as custom domain to a project; otherwise, any user can add this +domain as a custom domain to their project. + + +If your user base is private or otherwise trusted, you can disable the +verification requirement: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > Preferences. + Expand Pages. + Clear the Require users to prove ownership of custom domains checkbox. +This setting is enabled by default. + + +Let’s Encrypt integration + + + +History + + + + + +Introduced in GitLab 12.1. + + + + + + +GitLab Pages’ Let’s Encrypt integration +allows users to add Let’s Encrypt SSL certificates for GitLab Pages +sites served under a custom domain. + +To enable it: + + + Choose an email address on which you want to receive notifications about expiring domains. + On the left sidebar, at the bottom, select Admin Area. + Select Settings > Preferences. + Expand Pages. + Enter the email address for receiving notifications and accept Let’s Encrypt’s Terms of Service. + Select Save changes. + + +Access control + + +GitLab Pages access control can be configured per-project, and allows access to a Pages +site to be controlled based on a user’s membership to that project. + +Access control works by registering the Pages daemon as an OAuth application +with GitLab. Whenever a request to access a private Pages site is made by an +unauthenticated user, the Pages daemon redirects the user to GitLab. If +authentication is successful, the user is redirected back to Pages with a token, +which is persisted in a cookie. The cookies are signed with a secret key, so +tampering can be detected. + +Each request to view a resource in a private site is authenticated by Pages +using that token. For each request it receives, it makes a request to the GitLab +API to check that the user is authorized to read that site. + +Pages access control is disabled by default. To enable it: + + + + Enable it in /etc/gitlab/gitlab.rb: + + +gitlab_pages['access_control'] = true + + + +Reconfigure GitLab. + Users can now configure it in their projects’ settings. + + + + note For this setting to be effective with multi-node setups, it has to be applied to +all the App nodes and Sidekiq nodes. + + +Using Pages with reduced authentication scope + + + +History + + + + + +Introduced in GitLab 13.10. + + + + + + +By default, the Pages daemon uses the api scope to authenticate. You can configure this. For +example, this reduces the scope to read_api in /etc/gitlab/gitlab.rb: + +gitlab_pages['auth_scope'] = 'read_api' + + +The scope to use for authentication must match the GitLab Pages OAuth application settings. Users of +pre-existing applications must modify the GitLab Pages OAuth application. Follow these steps to do +this: + + + Enable access control. + On the left sidebar, at the bottom, select Admin Area. + Select Applications. + Expand GitLab Pages. + Clear the api scope’s checkbox and select the desired scope’s checkbox (for example, +read_api). + Select Save changes. + + +Disable public access to all Pages sites + + + +History + + + + + +Introduced in GitLab 12.7. + + + + + + +You can enforce Access Control for all GitLab Pages websites hosted +on your GitLab instance. By doing so, only authenticated users have access to them. +This setting overrides Access Control set by users in individual projects. + +This can be helpful to restrict information published with Pages websites to the users +of your instance only. +To do that: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > Preferences. + Expand Pages. + Select the Disable public access to Pages sites checkbox. + Select Save changes. + + + + note You must enable Access Control first for the setting to show in the Admin Area. + + +Running behind a proxy + + +Like the rest of GitLab, Pages can be used in those environments where external +internet connectivity is gated by a proxy. To use a proxy for GitLab Pages: + + + + Configure in /etc/gitlab/gitlab.rb: + + +gitlab_pages['env']['http_proxy'] = 'http://example:8080' + + + + Reconfigure GitLab for the changes to take effect. + + + +Using a custom Certificate Authority (CA) + + +When using certificates issued by a custom CA, Access Control and +the online view of HTML job artifacts +fails to work if the custom CA is not recognized. + +This usually results in this error: +Post /oauth/token: x509: certificate signed by unknown authority. + +For Linux package installations, this is fixed by installing a custom CA. + +For self-compiled installations, this can be fixed by installing the custom Certificate +Authority (CA) in the system certificate store. + +ZIP serving and cache configuration + + + +History + + + + + +Introduced in GitLab 13.7. + + + + + + + + caution These instructions deal with some advanced settings of your GitLab instance. The recommended default values are set inside GitLab Pages. You should +change these settings only if absolutely necessary. Use extreme caution. + + +GitLab Pages can serve content from ZIP archives through object storage (an +issue exists for supporting disk storage +as well). It uses an in-memory cache to increase the performance when serving content from a ZIP +archive. You can modify the cache behavior by changing the following configuration flags. + + + + + Setting + Description + + + + + zip_cache_expiration + The cache expiration interval of ZIP archives. Must be greater than zero to avoid serving stale content. Default is 60s. + + + zip_cache_cleanup + The interval at which archives are cleaned from memory if they have already expired. Default is 30s. + + + zip_cache_refresh + The time interval in which an archive is extended in memory if accessed before zip_cache_expiration. This works together with zip_cache_expiration to determine if an archive is extended in memory. See the example below for important details. Default is 30s. + + + zip_open_timeout + The maximum time allowed to open a ZIP archive. Increase this time for big archives or slow network connections, as doing so may affect the latency of serving Pages. Default is 30 s. + + + zip_http_client_timeout + The maximum time for the ZIP HTTP client. Default is 30m. + + + + +ZIP cache refresh example + + +Archives are refreshed in the cache (extending the time they are held in memory) if they’re accessed +before zip_cache_expiration, and the time left before expiring is less than or equal to +zip_cache_refresh. For example, if archive.zip is accessed at time 0s, it expires in 60s (the +default for zip_cache_expiration). In the example below, if the archive is opened again after 15s +it is not refreshed because the time left for expiry (45s) is greater than zip_cache_refresh +(default 30s). However, if the archive is accessed again after 45s (from the first time it was +opened) it’s refreshed. This extends the time the archive remains in memory from +45s + zip_cache_expiration (60s), for a total of 105s. + +After an archive reaches zip_cache_expiration, it’s marked as expired and removed on the next +zip_cache_cleanup interval. + + + +HTTP Strict Transport Security (HSTS) support + + +HTTP Strict Transport Security (HSTS) can be enabled through the gitlab_pages['headers'] configuration option. HSTS informs browsers that the website they are visiting should always provide its content over HTTPS to ensure that attackers cannot force subsequent connections to happen unencrypted. It can also improve loading speed of pages as it prevents browsers from attempting to connect over an unencrypted HTTP channel before being redirected to HTTPS. + +gitlab_pages['headers'] = ['Strict-Transport-Security: max-age=63072000'] + + +Pages project redirects limits + + + +History + + + + + +Introduced in GitLab 15.2. + + + + + + +GitLab Pages comes with a set of default limits for the _redirects file +to minimize the impact on performance. You can configure these limits if you’d like to increase or decrease the limits. + +gitlab_pages['redirects_max_config_size'] = 131072 +gitlab_pages['redirects_max_path_segments'] = 50 +gitlab_pages['redirects_max_rule_count'] = 2000 + + +Use environment variables + + +You can pass an environment variable to the Pages daemon (for example, +to enable or disable a feature flag). + +To disable the configurable directory feature: + + + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_pages['env'] = { + 'FF_CONFIGURABLE_ROOT_DIR' => ""false"" +} + + + + Save the file and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + +Activate verbose logging for daemon + + +Follow the steps below to configure verbose logging of GitLab Pages daemon. + + + + By default the daemon only logs with INFO level. +If you wish to make it log events with level DEBUG you must configure this in +/etc/gitlab/gitlab.rb: + + +gitlab_pages['log_verbose'] = true + + + + Reconfigure GitLab. + + + +Propagating the correlation ID + + + +History + + + + + +Introduced in GitLab 13.10. + + + + + + +Setting the propagate_correlation_id to true allows installations behind a reverse proxy to generate +and set a correlation ID to requests sent to GitLab Pages. When a reverse proxy sets the header value X-Request-ID, +the value propagates in the request chain. +Users can find the correlation ID in the logs. + +To enable the propagation of the correlation ID: + + + + Set the parameter to true in /etc/gitlab/gitlab.rb: + + +gitlab_pages['propagate_correlation_id'] = true + + + + Reconfigure GitLab. + + + +Change storage path + + +Follow the steps below to change the default path where GitLab Pages’ contents +are stored. + + + + Pages are stored by default in /var/opt/gitlab/gitlab-rails/shared/pages. +If you wish to store them in another location you must set it up in +/etc/gitlab/gitlab.rb: + + +gitlab_rails['pages_path'] = ""/mnt/storage/pages"" + + + + Reconfigure GitLab. + + + +Configure listener for reverse proxy requests + + +Follow the steps below to configure the proxy listener of GitLab Pages. + + + + By default the listener is configured to listen for requests on localhost:8090. + + If you wish to disable it you must configure this in +/etc/gitlab/gitlab.rb: + + +gitlab_pages['listen_proxy'] = nil + + + If you wish to make it listen on a different port you must configure this also in +/etc/gitlab/gitlab.rb: + + +gitlab_pages['listen_proxy'] = ""localhost:10080"" + + + + Reconfigure GitLab. + + + +Set global maximum size of each GitLab Pages site + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + +Prerequisites: + + + You must have administrator access to the instance. + + +To set the global maximum pages size for a project: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > Preferences. + Expand Pages. + In Maximum size of pages, enter a value. The default is 100. + Select Save changes. + + +Set maximum size of each GitLab Pages site in a group + + + +Tier: Premium, Ultimate +Offering: Self-managed + +Prerequisites: + + + You must have administrator access to the instance. + + +To set the maximum size of each GitLab Pages site in a group, overriding the inherited setting: + + + On the left sidebar, select Search or go to and find your group. + Select Settings > General. + Expand Pages. + Enter a value under Maximum size in MB. + Select Save changes. + + +Set maximum size of GitLab Pages site in a project + + + +Tier: Premium, Ultimate +Offering: Self-managed + +Prerequisites: + + + You must have administrator access to the instance. + + +To set the maximum size of GitLab Pages site in a project, overriding the inherited setting: + + + On the left sidebar, select Search or go to and find your project. + Select Deploy > Pages. + In Maximum size of pages, enter the size in MB. + Select Save changes. + + +Set maximum number of GitLab Pages custom domains for a project + + +Prerequisites: + + + You must have administrator access to the instance. + + +To set the maximum number of GitLab Pages custom domains for a project: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > Preferences. + Expand Pages. + Enter a value for Maximum number of custom domains per project. Use 0 for unlimited domains. + Select Save changes. + + +Set maximum number of files per GitLab Pages website + + +The total number of file entries (including directories and symlinks) is limited to 200,000 per GitLab Pages website. + +You can update the limit in your self-managed instance using the +GitLab Rails console. + +For more information, see GitLab application limits. + +Running GitLab Pages on a separate server + + +You can run the GitLab Pages daemon on a separate server to decrease the load on +your main application server. This configuration does not support mutual TLS (mTLS). See the corresponding feature proposal for more information. + +To configure GitLab Pages on a separate server: + + + caution The following procedure includes steps to back up and edit the +gitlab-secrets.json file. This file contains secrets that control +database encryption. Proceed with caution. + + + + + Create a backup of the secrets file on the GitLab server: + + +cp /etc/gitlab/gitlab-secrets.json /etc/gitlab/gitlab-secrets.json.bak + + + + On the GitLab server, to enable Pages, add the following to /etc/gitlab/gitlab.rb: + + +pages_external_url ""http://"" + + + + Optionally, to enable access control, add the following to /etc/gitlab/gitlab.rb: + + +gitlab_pages['access_control'] = true + + + Set up object storage by either: + + +Configuring the object storage and migrating GitLab Pages data to it, or + +Configuring network storage. + + + + Reconfigure the GitLab server for the +changes to take effect. The gitlab-secrets.json file is now updated with the +new configuration. + + + Set up a new server. This becomes the Pages server. + + + On the Pages server, install GitLab by using the Linux package and modify /etc/gitlab/gitlab.rb +to include: + + +roles ['pages_role'] + +pages_external_url ""http://"" + +gitlab_pages['gitlab_server'] = 'http://' + +## If access control was enabled on step 3 +gitlab_pages['access_control'] = true + + + + If you have custom UID/GID settings on the GitLab server, add them to the Pages server /etc/gitlab/gitlab.rb as well, +otherwise running a gitlab-ctl reconfigure on the GitLab server can change file ownership and cause Pages requests to fail. + + + Create a backup of the secrets file on the Pages server: + + +cp /etc/gitlab/gitlab-secrets.json /etc/gitlab/gitlab-secrets.json.bak + + + + Copy the /etc/gitlab/gitlab-secrets.json file from the GitLab server +to the Pages server. + + +# On the GitLab server +cp /etc/gitlab/gitlab-secrets.json /mnt/pages/gitlab-secrets.json + +# On the Pages server +mv /var/opt/gitlab/gitlab-rails/shared/pages/gitlab-secrets.json /etc/gitlab/gitlab-secrets.json + + + + Reconfigure the Pages server for the changes to take effect. + + + On the GitLab server, make the following changes to /etc/gitlab/gitlab.rb: + + +pages_external_url ""http://"" +gitlab_pages['enable'] = false +pages_nginx['enable'] = false + + + +Reconfigure the GitLab server for the changes to take effect. + + +It’s possible to run GitLab Pages on multiple servers if you wish to distribute +the load. You can do this through standard load balancing practices such as +configuring your DNS server to return multiple IPs for your Pages server, or +configuring a load balancer to work at the IP level. If you wish to +set up GitLab Pages on multiple servers, perform the above procedure for each +Pages server. + +Domain source configuration + + +When GitLab Pages daemon serves pages requests it firstly needs to identify which project should be used to +serve the requested URL and how its content is stored. + +Before GitLab 13.3, all pages content was extracted to the special shared directory, +and each project had a special configuration file. +The Pages daemon was reading these configuration files and storing their content in memory. + +This approach had several disadvantages and was replaced with GitLab Pages using the internal GitLab API +every time a new domain is requested. +The domain information is also cached by the Pages daemon to speed up subsequent requests. + +Starting from GitLab 14.0 GitLab Pages uses API +by default and fails to start if it can’t connect to it. +For common issues, see troubleshooting. + +For more details see this blog post. + +GitLab API cache configuration + + + +History + + + + + +Introduced in GitLab 13.10. + + + + + + +API-based configuration uses a caching mechanism to improve performance and reliability of serving Pages. +The cache behavior can be modified by changing the cache settings, however, the recommended values are set for you and should only be modified if needed. +Incorrect configuration of these values may result in intermittent +or persistent errors, or the Pages Daemon serving old content. + + + note Expiry, interval and timeout flags use Go duration formatting. +A duration string is a possibly signed sequence of decimal numbers, +each with optional fraction and a unit suffix, such as 300ms, 1.5h or 2h45m. +Valid time units are ns, us (or µs), ms, s, m, h. + + +Examples: + + + Increasing gitlab_cache_expiry allows items to exist in the cache longer. +This setting might be useful if the communication between GitLab Pages and GitLab Rails +is not stable. + Increasing gitlab_cache_refresh reduces the frequency at which GitLab Pages +requests a domain’s configuration from GitLab Rails. This setting might be useful +GitLab Pages generates too many requests to GitLab API and content does not change frequently. + Decreasing gitlab_cache_cleanup removes expired items from the cache more frequently, +reducing the memory usage of your Pages node. + Decreasing gitlab_retrieval_timeout allows you to stop the request to GitLab Rails +more quickly. Increasing it allows more time to receive a response from the API, +useful in slow networking environments. + Decreasing gitlab_retrieval_interval makes requests to the API more frequently, +only when there is an error response from the API, for example a connection timeout. + Decreasing gitlab_retrieval_retries reduces the number of times a domain’s +configuration is tried to be resolved automatically before reporting an error. + + +Object storage settings + + +The following object storage settings are: + + + Nested under pages: and then object_store: on self-compiled installations. + Prefixed by pages_object_store_ on Linux package installations. + + + + + + Setting + Description + Default + + + + + enabled + Whether object storage is enabled. + false + + + remote_directory + The name of the bucket where Pages site content is stored. +   + + + connection + Various connection options described below. +   + + + + + + note If you want to stop using and disconnect the NFS server, you need to +explicitly disable local storage, and it’s only possible after upgrading to GitLab 13.11. + + +S3-compatible connection settings + + +In GitLab 13.2 and later, you should use the +consolidated object storage settings. +This section describes the earlier configuration format. + +See the available connection settings for different providers. + +Linux package (Omnibus)Self-compiled (source) + + Add the following lines to /etc/gitlab/gitlab.rb and replace the values with the ones you want: + + +gitlab_rails['pages_object_store_enabled'] = true +gitlab_rails['pages_object_store_remote_directory'] = ""pages"" +gitlab_rails['pages_object_store_connection'] = { + 'provider' => 'AWS', + 'region' => 'eu-central-1', + 'aws_access_key_id' => 'AWS_ACCESS_KEY_ID', + 'aws_secret_access_key' => 'AWS_SECRET_ACCESS_KEY' +} + + + If you use AWS IAM profiles, be sure to omit the AWS access key and secret access key/value +pairs: + + +gitlab_rails['pages_object_store_connection'] = { + 'provider' => 'AWS', + 'region' => 'eu-central-1', + 'use_iam_profile' => true +} + + + + Save the file and reconfigure GitLab +for the changes to take effect. + + + Migrate existing Pages deployments to object storage. + + + + Edit /home/git/gitlab/config/gitlab.yml and add or amend the following lines: + + +pages: + object_store: + enabled: true + remote_directory: ""pages"" # The bucket name + connection: + provider: AWS # Only AWS supported at the moment + aws_access_key_id: AWS_ACCESS_KEY_ID + aws_secret_access_key: AWS_SECRET_ACCESS_KEY + region: eu-central-1 + + + + Save the file and restart GitLab +for the changes to take effect. + + + Migrate existing Pages deployments to object storage. + + + +Migrate Pages deployments to object storage + + +Existing Pages deployment objects (zip archives) can be stored in either: + + + Local storage + Object storage + + +Migrate your existing Pages deployments from local storage to object storage: + +sudo gitlab-rake gitlab:pages:deployments:migrate_to_object_storage + + +You can track progress and verify that all Pages deployments migrated successfully using the +PostgreSQL console: + + + +sudo gitlab-rails dbconsole for Linux package installations running GitLab 14.1 and earlier. + +sudo gitlab-rails dbconsole --database main for Linux package installations running 14.2 and later. + +sudo -u git -H psql -d gitlabhq_production for self-compiled installations. + + +Verify objectstg below (where store=2) has count of all Pages deployments: + +gitlabhq_production=# SELECT count(*) AS total, sum(case when file_store = '1' then 1 else 0 end) AS filesystem, sum(case when file_store = '2' then 1 else 0 end) AS objectstg FROM pages_deployments; + +total | filesystem | objectstg +------+------------+----------- + 10 | 0 | 10 + + +After verifying everything is working correctly, +disable Pages local storage. + +Rolling Pages deployments back to local storage + + +After the migration to object storage is performed, you can choose to move your Pages deployments back to local storage: + +sudo gitlab-rake gitlab:pages:deployments:migrate_to_local + + +Disable Pages local storage + + + +History + + + + + +Introduced in GitLab 13.11. + + + + + + +If you use object storage, you can disable local storage to avoid unnecessary disk usage/writes: + + + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['pages_local_store_enabled'] = false + + + + Reconfigure GitLab for the changes to take effect. + + + +Enable Pages network storage in multi-node environments + + +Object storage is the preferred configuration for most environments. However, +if your requirements call for network storage and you want to configure Pages +to run on a separate server, you should: + + + Ensure the shared storage volume you intend to use is already mounted and +available on both the primary server and your intended Pages server. + + Update the /etc/gitlab/gitlab.rb of each node to include: + + +gitlab_pages['enable_disk'] = true +gitlab_rails['pages_path'] = ""/var/opt/gitlab/gitlab-rails/shared/pages"" # Path to your network storage + + + Switch over Pages to your separate server. + + +After you successfully configure Pages on your separate server, only that server +needs access to the shared storage volume. Consider keeping the shared storage volume +mounted on your primary server, in case you must migrate back to a single-node environment. + +ZIP storage + + +In GitLab 14.0 the underlying storage format of GitLab Pages changed from +files stored directly in disk to a single ZIP archive per project. + +These ZIP archives can be stored either locally on disk storage or on object storage if it is configured. + +Starting from GitLab 13.5 ZIP archives are stored every time pages site is updated. + +Backup + + +GitLab Pages are part of the regular backup, so there is no separate backup to configure. + +Security + + +You should strongly consider running GitLab Pages under a different hostname +than GitLab to prevent XSS attacks. + +Rate limits + + +You can enforce rate limits to help minimize the risk of a Denial of Service (DoS) attack. GitLab Pages +uses a token bucket algorithm to enforce rate limiting. By default, +requests or TLS connections that exceed the specified limits are reported but not rejected. + +GitLab Pages supports the following types of rate limiting: + + + Per source_ip. It limits how many requests or TLS connections are allowed from the single client IP address. + Per domain. It limits how many requests or TLS connections are allowed per domain hosted on GitLab Pages. It can be a custom domain like example.com, or group domain like group.gitlab.io. + + +HTTP request-based rate limits are enforced using the following: + + + +rate_limit_source_ip: Set the maximum threshold in number of requests per client IP per second. Set to 0 to disable this feature. + +rate_limit_source_ip_burst: Sets the maximum threshold of number of requests allowed in an initial outburst of requests per client IP. +For example, when you load a web page that loads a number of resources at the same time. + +rate_limit_domain: Set the maximum threshold in number of requests per hosted pages domain per second. Set to 0 to disable this feature. + +rate_limit_domain_burst: Sets the maximum threshold of number of requests allowed in an initial outburst of requests per hosted pages domain. + + +TLS connection-based rate limits are enforced using the following: + + + +rate_limit_tls_source_ip: Set the maximum threshold in number of TLS connections per client IP per second. Set to 0 to disable this feature. + +rate_limit_tls_source_ip_burst: Sets the maximum threshold of number of TLS connections allowed in an initial outburst of TLS connections per client IP. +For example, when you load a web page from different web browsers at the same time. + +rate_limit_tls_domain: Set the maximum threshold in number of TLS connections per hosted pages domain per second. Set to 0 to disable this feature. + +rate_limit_tls_domain_burst: Sets the maximum threshold of number of TLS connections allowed in an initial outburst of TLS connections per hosted pages domain. + + +An IPv6 address receives a large prefix in the 128-bit address space. The prefix is typically at least size /64. Because of the large number of possible addresses, if the client’s IP address is IPv6, the limit is applied to the IPv6 prefix with a length of 64, rather than the entire IPv6 address. + +Enable HTTP requests rate limits by source-IP + + + +History + + + + + +Introduced in GitLab 14.5. + + + + + + + + + Set rate limits in /etc/gitlab/gitlab.rb: + + +gitlab_pages['rate_limit_source_ip'] = 20.0 +gitlab_pages['rate_limit_source_ip_burst'] = 600 + + + + Reconfigure GitLab. + + + +Enable HTTP requests rate limits by domain + + + +History + + + + + +Introduced in GitLab 14.7. + + + + + + + + + Set rate limits in /etc/gitlab/gitlab.rb: + + +gitlab_pages['rate_limit_domain'] = 1000 +gitlab_pages['rate_limit_domain_burst'] = 5000 + + + + Reconfigure GitLab. + + + +Enable TLS connections rate limits by source-IP + + + +History + + + + + +Introduced in GitLab 14.9. + + + + + + + + + Set rate limits in /etc/gitlab/gitlab.rb: + + +gitlab_pages['rate_limit_tls_source_ip'] = 20.0 +gitlab_pages['rate_limit_tls_source_ip_burst'] = 600 + + + + Reconfigure GitLab. + + + +Enable TLS connections rate limits by domain + + + +History + + + + + +Introduced in GitLab 14.9. + + + + + + + + + Set rate limits in /etc/gitlab/gitlab.rb: + + +gitlab_pages['rate_limit_tls_domain'] = 1000 +gitlab_pages['rate_limit_tls_domain_burst'] = 5000 + + + + Reconfigure GitLab. + + + +Related topics + + + + Troubleshooting GitLab Pages administration + + + +" +where is gitlab.com hosted in gcp?,,"1. Installing GitLab on Google Cloud Platform + + + +Installing GitLab on Google Cloud Platform + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + +You can install GitLab on a Google Cloud Platform (GCP) using the official Linux package. You should customize it to accommodate your needs. + + + note To deploy production-ready GitLab on +Google Kubernetes Engine, +you can follow Google Cloud Platform’s +Click to Deploy steps +It’s an alternative to using a GCP VM, and uses +the Cloud native GitLab Helm chart. + + +Prerequisites + + +There are two prerequisites to install GitLab on GCP: + + + You must have a Google account. + You must sign up for the GCP program. If this is your first time, Google +gives you $300 credit for free to consume over a 60-day period. + + +After you have performed those two steps, you can create a VM. + +Creating the VM + + +To deploy GitLab on GCP you must create a virtual machine: + + + Go to https://console.cloud.google.com/compute/instances and sign in with your Google credentials. + + Select Create + + + + + On the next page, you can select the type of VM as well as the +estimated costs. Provide the name of the instance, desired data center, and machine type. +Note our hardware requirements for different user base sizes. + + + + + To select the size, type, and desired operating system, +select Change under Boot disk. select Select when finished. + + As a last step allow HTTP and HTTPS traffic, then select Create. The process finishes in a few seconds. + + +Installing GitLab + + +After a few seconds, the instance is created and available to sign in. The next step is to install GitLab onto the instance. + + + + + Make a note of the external IP address of the instance, as you will need that in a later step. + + Select SSH under the connect column to connect to the instance. + + A new window appears, with you logged into the instance. + + + + + Next, follow the instructions for installing GitLab for the operating system you choose, at https://about.gitlab.com/install/. You can use the external IP address you noted before as the hostname. + + + Congratulations! GitLab is now installed and you can access it via your browser. To finish installation, open the URL in your browser and provide the initial administrator password. The username for this account is root. + + + + + +Next steps + + +These are the most important next steps to take after you installed GitLab for +the first time. + +Assigning a static IP + + +By default, Google assigns an ephemeral IP to your instance. It is strongly +recommended to assign a static IP if you are using GitLab in production +and use a domain name as shown below. + +Read Google’s documentation on how to promote an ephemeral IP address. + +Using a domain name + + +Assuming you have a domain name in your possession and you have correctly +set up DNS to point to the static IP you configured in the previous step, +here’s how you configure GitLab to be aware of the change: + + + + SSH into the VM. You can select SSH in the Google console +and a new window pops up. + + + + In the future you might want to set up connecting with an SSH key +instead. + + + Edit the configuration file of the Linux package using your favorite text editor: + + +sudo vim /etc/gitlab/gitlab.rb + + + + Set the external_url value to the domain name you wish GitLab to have +without https: + + +external_url 'http://gitlab.example.com' + + + We will set up HTTPS in the next step, no need to do this now. + + + Reconfigure GitLab for the changes to take effect: + + +sudo gitlab-ctl reconfigure + + + + You can now visit GitLab using the domain name. + + + +Configuring HTTPS with the domain name + + +Although not needed, it’s strongly recommended to secure GitLab with a +TLS certificate. + +Configuring the email SMTP settings + + +You must configure the email SMTP settings correctly otherwise GitLab cannot send notification emails, like comments, and password changes. +Check the Linux package documentation how to do so. + +Further reading + + +GitLab can be configured to authenticate with other OAuth providers, like LDAP, +SAML, and Kerberos. Here are some documents you might be interested in reading: + + + Linux package documentation + Integration documentation + GitLab Pages configuration + GitLab container registry configuration + + + + + +2. Runner SaaS + + + +Runner SaaS + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com + +You can run your CI/CD jobs on GitLab.com using SaaS runners hosted by GitLab to seamlessly build, test and deploy +your application on different environments. +These runners fully integrated with GitLab.com and are enabled by default for all projects, with no configuration required. +Your jobs can run on: + + + Linux runners + GPU runners + +Windows runners (Beta) + +macOS runners (Beta) + + +For more information about the cost factor applied to the machine type based on size, see cost factor. +The number of minutes you can use on these runners depends on the maximum number of units of compute +in your subscription plan. + +Untagged jobs automatically run in containers +on the small Linux runners. + +The objective is to make 90% of CI/CD jobs start executing in 120 seconds or less. The error rate should be less than 0.5%. + +How SaaS runners work + + +When you use SaaS runners: + + + Each of your jobs runs in a newly provisioned VM, which is dedicated to the specific job. + The VM is active only for the duration of the job and immediately deleted. This means that any changes that your job makes to the virtual machine will not be available to a subsequent job. + The virtual machine where your job runs has sudo access with no password. + The storage is shared by the operating system, the image with pre-installed software, and a copy of your cloned repository. +This means that the available free disk space for your jobs to use is reduced. + + + + note Jobs handled by SaaS runners on GitLab.com time out after 3 hours, regardless of the timeout configured in a project. + + +Release cycle for SaaS runner + + +We aim to update to the latest version of GitLab Runner within a week of its release. + +You can find all GitLab Runner breaking changes under Deprecations and removals. + +Security for SaaS runners + + +GitLab SaaS runners on Linux and Windows run on Google Compute Platform. +The Google Infrastructure Security Design Overview whitepaper +provides an overview of how Google designs security into its technical infrastructure. +The GitLab Trust Center and +GitLab Security Compliance Controls +pages provide an overview of the security and compliance controls that govern the GitLab SaaS runners. + +The following section provides an overview of the additional built-in layers that harden the security of the GitLab Runner SaaS CI build environment. + +Security of CI job execution + + +A dedicated temporary runner VM hosts and runs each CI job. On GitLab SaaS, two CI jobs never run on the same VM. + +In this example, there are three jobs in the project’s pipeline. Therefore, there are three temporary VMs used to run that pipeline, or one VM per job. + + + +The build job ran on runner-ns46nmmj-project-43717858, test job on f131a6a2runner-new2m-od-project-43717858 and deploy job on runner-tmand5m-project-43717858. + +GitLab sends the command to remove the temporary runner VM to the Google Compute API immediately after the CI job completes. The Google Compute Engine hypervisor +takes over the task of securely deleting the virtual machine and associated data. + +Network security of CI job VMs + + + + Firewall rules only allow outbound communication from the temporary VM to the public internet. + Inbound communication from the public internet to the temporary VM is not allowed. + Firewall rules do not permit communication between VMs. + The only internal communication allowed to the temporary VMs is from the runner manager. + + +Supported image lifecycle + + +SaaS runners on macOS and Windows can only run jobs on supported images. You cannot bring your own image. Supported images have the following lifecycle: + + + Beta + Generally Available + Deprecated + + +Beta + + +To gather feedback on an image prior to making the image Generally Available (GA) and to address +any issues, new images are released as Beta. Any jobs running on Beta images are not +covered by the service-level agreement. If you use Beta images, you can provide feedback +by creating an issue. + +Generally Available + + +A Generally Available (GA) image is released after the image completes a Beta phase +and is considered suitable for general use. To become GA, the +image must fulfill the following requirements: + + + Successful completion of a Beta phase by resolving all reported significant bugs + Compatibility of installed software with the underlying OS + + +Jobs running on GA images are covered by the defined service-level agreement. Over time, these images are deprecated. + +Deprecated + + +A maximum of two Generally Available (GA) images are supported at a time. After a new GA image is released, +the oldest GA image becomes deprecated. A deprecated image is no longer +updated and is deleted after 3 months in accordance with the deprecation guidelines. + + +" +is it possible to resolve threads in issues?,,"1. Comments and threads + + + +Comments and threads + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + Paginated merge request discussions introduced in GitLab 15.1 with a flag named paginated_mr_discussions. Disabled by default. + Paginated merge request discussions enabled on GitLab.com in GitLab 15.2. + Paginated merge request discussions enabled on self-managed in GitLab 15.3. + Paginated merge request discussions generally available in GitLab 15.8. Feature flag paginated_mr_discussions removed. + + + + + + +GitLab encourages communication through comments, threads, and +Code Suggestions. + +Two types of comments are available: + + + A standard comment. + A comment in a thread, which can be resolved. + + +In a comment, you can enter Markdown and use quick actions. + +You can suggest code changes in your commit diff comment, +which the user can accept through the user interface. + +Places you can add comments + + +You can create comments in places like: + + + Commit diffs + Commits + Designs + Epics + Issues + Merge requests + Snippets + Tasks + OKRs + + +Each object can have as many as 5,000 comments. + +Mentions + + +You can mention a user or a group (including subgroups) in your GitLab +instance with @username or @groupname. All mentioned users are notified with to-do items and emails. +Users can change this setting for themselves in the notification settings. + +You can quickly see which comments involve you, because +mentions for yourself (the user who is signed in) are highlighted +in a different color. + +Mentioning all members + + + +History + + + + + +Flag named disable_all_mention introduced in GitLab 16.1. Disabled by default. Enabled on GitLab.com. + + + + + + + + On self-managed GitLab, by default this flag is not enabled. To make it available, an administrator can enable the feature flag +named disable_all_mention. +On GitLab.com, this flag is enabled. + + +When this feature flag is enabled, typing @all in comments and descriptions +results in plain text instead of a mention. +When you disable this feature, existing @all mentions in the Markdown texts are not affected +and remain as links. Only future @all mentions appear as plain text. + +Avoid mentioning @all in comments and descriptions. +When you do it, you don’t only mention the participants of the project, issue, or merge request, +but to all members of that project’s parent group. +All these users receive an email notification and a to-do item. It might be interpreted as spam. + +Notifications and mentions can be disabled in +a group’s settings. + +Mention a group in an issue or merge request + + +When you mention a group in a comment, every member of the group gets a to-do item +added to their To-do list. + + + On the left sidebar, select Search or go to and find your project. + For merge requests, select Code > Merge requests, and find your merge request. + For issues, select Plan > Issues, and find your issue. + In a comment, type @ followed by the user, group, or subgroup namespace. +For example, @alex, @alex-team, or @alex-team/marketing. + Select Comment. + + +A to-do item is created for all the group and subgroup members. + +For more information on mentioning subgroups see Mention subgroups. + +Add a comment to a merge request diff + + +You can add comments to a merge request diff. These comments +persist, even when you: + + + Force-push after a rebase. + Amend a commit. + + +To add a commit diff comment: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests, and find your merge request. + Select the Commits tab, then select the commit message. + By the line you want to comment on, hover over the line number and select Comment ( ). +You can select multiple lines by dragging the Comment ( ) icon. + Enter your comment and select Start a review or Add comment now. + + +The comment is displayed on the merge request’s Overview tab. + +The comment is not displayed on your project’s Code > Commits page. + + + note When your comment contains a reference to a commit included in the merge request, +it’s converted to a link in the context of the merge request. +For example, 28719b171a056960dfdc0012b625d0b47b123196 becomes 28719b17 that links to +https://gitlab.example.com/example-group/example-project/-/merge_requests/12345/diffs?commit_id=28719b171a056960dfdc0012b625d0b47b123196. + + +Reply to a comment by sending email + + +If you have “reply by email” configured, +you can reply to comments by sending an email. + + + When you reply to a standard comment, it creates another standard comment. + When you reply to a threaded comment, it creates a reply in the thread. + When you send an email to an issue email address, +it creates a standard comment. + + +You can use Markdown and quick actions in your email replies. + +Edit a comment + + +You can edit your own comment at any time. +Anyone with at least the Maintainer role can also edit a comment made by someone else. + +To edit a comment: + + + On the comment, select Edit comment ( ). + Make your edits. + Select Save changes. + + +Editing a comment to add a mention + + +By default, when you mention a user, GitLab creates a to-do item +for them, and sends them a notification email. + +If you edit an existing comment to add a user mention that wasn’t there before, GitLab: + + + Creates a to-do item for the mentioned user. + Does not send a notification email. + + +Prevent comments by locking the discussion + + +You can prevent public comments in an issue or merge request. +When you do, only project members can add and edit comments. + +Prerequisites: + + + In merge requests, you must have at least the Developer role. + In issues, you must have at least the Reporter role. + + +To lock an issue or merge request: + + + On the left sidebar, select Search or go to and find your project. + For merge requests, select Code > Merge requests, and find your merge request. + For issues, select Plan > Issues, and find your issue. + In the upper-right corner, select Merge request actions or Issue actions ( ), then select Lock discussion. + + +A system note is added to the page details. + +If an issue or merge request is closed with a locked discussion, then you cannot reopen it until the discussion is unlocked. + +Add an internal note + + + +History + + + + + +Introduced in GitLab 13.9 with a flag named confidential_notes. Disabled by default. + +Changed in GitLab 14.10: you can only mark comments in issues and epics as confidential. Previously, it was also possible for comments in merge requests and snippets. + +Renamed from “confidential comments” to “internal notes” in GitLab 15.0. + +Enabled on GitLab.com and self-managed in GitLab 15.0. + +Feature flag confidential_notes removed in GitLab 15.2. + +Changed permissions in GitLab 15.6 to at least the Reporter role. In GitLab 15.5 and earlier, issue or epic authors and assignees could also read and create internal notes. + Internal comments introduced for merge requests in GitLab 16.9. + + + + + + +When you add an internal note to a public issue, epic, or merge request, only project members +with least the Reporter role can view the note. Internal notes cannot be converted to regular comments, +and all replies to internal notes are also internal. Internal notes are shown in a different +color than public comments, and display an Internal note badge: + + + +Prerequisites: + + + You must have at least the Reporter role for the project. + + +To add an internal note: + + + On the issue, epic, or merge request, in the Comment text box, type a comment. + Below the comment, select the Make this an internal note checkbox. + Select Add internal note. + + +You can also mark an entire issue as confidential, +or create confidential merge requests. + +Show only comments + + +In discussions with many comments, filter the discussion to show only comments or history of +changes (system notes). System notes include changes to the description, mentions in other GitLab +objects, or changes to labels, assignees, and the milestone. +GitLab saves your preference, and applies it to every issue, merge request, or epic you view. + + + On a merge request, issue, or epic, select the Overview tab. + On the right side of the page, from the Sort or filter dropdown list, select a filter: + + +Show all activity: Display all user comments and system notes. + +Show comments only: Display only user comments. + +Show history only: Display only activity notes. + + + + +Change activity sort order + + +Reverse the default order and interact with the activity feed sorted by most recent items +at the top. GitLab saves your preference in local storage and applies it to every issue, +merge request, or epic you view. + +To change the activity sort order: + + + Open the Overview tab in a merge request, issue, or epic. + On the right side of the page, from the Sort or filter dropdown list, select the sort order +Newest first or Oldest first (default). + + +View description change history + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +You can see changes to the description listed in the history. + +To compare the changes, select Compare with previous version. + +Assign an issue to the commenting user + + +You can assign an issue to a user who made a comment. + + + In the comment, select the More Actions ( ) menu. + Select Assign to commenting user: + + + To unassign the commenter, select the button again. + + +Create a thread by replying to a standard comment + + +When you reply to a standard comment, you create a thread. + +Prerequisites: + + + You must have at least the Guest role. + You must be in an issue, merge request, or epic. Threads in commits and snippets are not supported. + + +To create a thread by replying to a comment: + + + + In the upper-right corner of the comment, select Reply to comment ( ). + + The reply section is displayed. + + Enter your reply. + Select Reply or Add comment now (depending on where in the UI you are replying). + + +The top comment is converted to a thread. + +Create a thread without replying to a comment + + +You can create a thread without replying to a standard comment. + +Prerequisites: + + + You must have at least the Guest role. + You must be in an issue, merge request, commit, or snippet. + + +To create a thread: + + + Enter a comment. + Below the comment, to the right of Comment, select the down arrow ( ). + From the list, select Start thread. + Select Start thread again. + + + + +A threaded comment is created. + +Resolve a thread + + + +History + + + + + Resolvable threads for issues introduced in GitLab 16.3 with a flag named resolvable_issue_threads. Disabled by default. + Resolvable threads for issues enabled on GitLab.com and self-managed in GitLab 16.4. + Resolvable threads for issues generally available in GitLab 16.7. Feature flag resolvable_issue_threads removed. + + + + + + +You can resolve a thread when you want to finish a conversation. + +Prerequisites: + + + You must be in an issue or merge request. + You must have at least the Developer role or be the author of the issue or merge request. + + +To resolve a thread: + + + Go to the thread. + Do one of the following: + + In the upper-right corner of the original comment, select Resolve thread ( ). + Below the last reply, in the Reply field, select Resolve thread. + Below the last reply, in the Reply field, enter text, select the Resolve thread checkbox, and select Add comment now. + + + + + +2. Merge requests + + + +Merge requests + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +A merge request (MR) is a proposal to incorporate changes from a source branch to a target branch. + +When you open a merge request, you can visualize and collaborate on the changes before merge. +Merge requests include: + + + A description of the request. + Code changes and inline code reviews. + Information about CI/CD pipelines. + A comment section for discussion threads. + The list of commits. + + +Create a merge request + + +Learn the various ways to create a merge request. + +Use merge request templates + + +When you create a merge request, GitLab checks for the existence of a +description template to add data to your merge request. +GitLab checks these locations in order from 1 to 5, and applies the first template +found to your merge request: + + + + + Name + Project UIsetting + Groupdefault.md + + Instancedefault.md + + Projectdefault.md + + No template + + + + + Standard commit message + 1 + 2 + 3 + 4 + 5 + + + Commit message with an issue closing pattern like Closes #1234 + + 1 + 2 + 3 + 4 + 5 * + + + Branch name prefixed with an issue ID, like 1234-example + + 1 * + 2 * + 3 * + 4 * + 5 * + + + + + + note Items marked with an asterisk (*) also append an issue closing pattern. + + +View merge requests + + +You can view merge requests for your project, group, or yourself. + +For a project + + +To view all merge requests for a project: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests. + + +Or, to use a keyboard shortcut, press g + m. + +For all projects in a group + + +To view merge requests for all projects in a group: + + + On the left sidebar, select Search or go to and find your group. + Select Code > Merge requests. + + +If your group contains subgroups, this view also displays merge requests from the subgroup projects. + +Assigned to you + + +To view all merge requests assigned to you: + + + On the left sidebar, select Search or go to. + From the dropdown list, select Merge requests assigned to me. + + +or: + + + To use a keyboard shortcut, press Shift + m. + + +or: + + + On the left sidebar, select Code > Merge requests ( ). + From the dropdown list, select Assigned. + + +Filter the list of merge requests + + + +History + + + + + Filtering by source-branch introduced in GitLab 16.6. + Filtering by merged-by introduced in GitLab 16.9. Available only when the feature flag mr_merge_user_filter is enabled. + + + + + + +To filter the list of merge requests: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests. + Above the list of merge requests, select Search or filter results. + From the dropdown list, select the attribute you wish to filter by. Some examples: + + +By environment or deployment date. + +ID: Enter filter #30 to return only merge request 30. + User filters: Type (or select from the dropdown list) any of these filters to display a list of users: + + +Approved-By, for merge requests already approved by a user. Premium and Ultimate only. + +Approver, for merge requests that this user is eligible to approve. +(For more information, read about Code owners). Premium and Ultimate only. + +Merged-By, for merge requests merged by this user. + +Reviewer, for merge requests reviewed by this user. + + + + + Select or type the operator to use for filtering the attribute. The following operators are +available: + + +=: Is + +!=: Is not + + + Enter the text to filter the attribute by. +You can filter some attributes by None or Any. + Repeat this process to filter by multiple attributes. Multiple attributes are joined by a logical +AND. + Select a Sort direction, either for descending order, +or for ascending order. + + +By environment or deployment date + + +To filter merge requests by deployment data, such as the environment or a date, +you can type (or select from the dropdown list) the following: + + + Environment + Deployed-before + Deployed-after + + + + note Projects using a fast-forward merge method +do not return results, as this method does not create a merge commit. + + +When filtering by an environment, a dropdown list presents all environments that +you can choose from. + +When filtering by Deployed-before or Deployed-after: + + + The date refers to when the deployment to an environment (triggered by the +merge commit) completed successfully. + You must enter the deploy date manually. + Deploy dates use the format YYYY-MM-DD, and must be wrapped in double quotes ("") +if you want to specify both a date and time (""YYYY-MM-DD HH:MM""). + + +Add changes to a merge request + + +If you have permission to add changes to a merge request, you can add your changes +to an existing merge request in several ways, depending on the complexity of your +change and whether you need access to a development environment: + + + +Edit changes in the Web IDE in your browser with the +. keyboard shortcut. Use this +browser-based method to edit multiple files, or if you are not comfortable with Git commands. +You cannot run tests from the Web IDE. + +Edit changes in Gitpod, if you +need a fully-featured environment to both edit files, and run tests afterward. Gitpod +supports running the GitLab Development Kit (GDK). +To use Gitpod, you must enable Gitpod in your user account. + +Push changes from the command line, if you are +familiar with Git and the command line. + + +Assign a user to a merge request + + +To assign the merge request to a user, use the /assign @user +quick action in a text area in +a merge request, or: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests and find your merge request. + On the right sidebar, expand the right sidebar and locate the Assignees section. + Select Edit. + Search for the user you want to assign, and select the user. + + +The merge request is added to the user’s assigned merge request list. + +Assign multiple users + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + Moved to GitLab Premium in 13.9. + + + + + + +GitLab enables multiple assignees for merge requests, if multiple people are +accountable for it: + + + +To assign multiple assignees to a merge request, use the /assign @user +quick action in a text area, or: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests and find your merge request. + On the right sidebar, expand the right sidebar and locate the Assignees section. + Select Edit and, from the dropdown list, select all users you want +to assign the merge request to. + + +To remove an assignee, clear the user from the same dropdown list. + +Close a merge request + + +If you decide to permanently stop work on a merge request, +GitLab recommends you close the merge request rather than +delete it. The author and assignees of a merge request, and users with +Developer, Maintainer, or Owner roles in a project +can close merge requests in the project: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests and find your merge request. + Scroll to the comment box at the bottom of the page. + Following the comment box, select Close merge request. + + +GitLab closes the merge request, but preserves records of the merge request, +its comments, and any associated pipelines. + +Delete a merge request + + +GitLab recommends you close, rather than delete, merge requests. +You cannot undo the deletion of a merge request. + +Prerequisites: + + + You must have the Owner role for the project. + + +To delete a merge request: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests and find the merge request you want to delete. + Select Edit. + Scroll to the bottom of the page, and select Delete merge request. + + +Delete the source branch on merge + + +You can delete the source branch for a merge request: + + + When you create a merge request, by selecting Delete source branch when merge request accepted. + When you merge a merge request, if you have the Maintainer role, by selecting Delete source branch. + + +An administrator can make this option the default in the project’s settings. + +Update merge requests when target branch merges + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + + +History + + + + + Chained merge requests changed to automatically rebase on the new target branch in GitLab 16.9. + Chained merge requests no longer automatically rebase on the new target branch in GitLab 16.10 with a flag named :rebase_when_retargetting_mrs. Disabled by default. + + + + + + + + On self-managed GitLab, by default this feature is not available. To make it +available, an administrator can enable the feature flag named :rebase_when_retargetting_mrs. +On GitLab.com and GitLab Dedicated, this feature is not available. + + +Merge requests are often chained together, with one merge request depending on +the code added or changed in another merge request. To support keeping individual +merge requests small, GitLab can update up to four open merge requests when their +target branch merges into main. For example: + + + +Merge request 1: merge feature-alpha into main. + +Merge request 2: merge feature-beta into feature-alpha. + + +If these merge requests are open at the same time, and merge request 1 (feature-alpha) +merges into main, GitLab updates the destination of merge request 2 from feature-alpha +to main. + +Merge requests with interconnected content updates are usually handled in one of these ways: + + + Merge request 1 is merged into main first. Merge request 2 is then +retargeted to main. + Merge request 2 is merged into feature-alpha. The updated merge request 1, which +now contains the contents of feature-alpha and feature-beta, is merged into main. + + +This feature works only when a merge request is merged. Selecting Remove source branch +after merging does not retarget open merge requests. This improvement is +proposed as a follow-up. + +Move sidebar actions + + + + + +History + + + + + +Introduced in GitLab 14.10 with a flag named moved_mr_sidebar. Enabled by default. + +Changed to also move actions on issues, incidents, and epics in GitLab 16.0. + + + + + + +When this feature flag is enabled, in the upper-right corner, +Merge request actions ( ) contains the following actions: + + + The notifications toggle + Mark merge request as ready or draft + + Close merge request + Lock discussion + Copy reference + + +In GitLab 16.0 and later, similar action menus are available on issues, incidents, and epics. + +When this feature flag is disabled, these actions are in the right sidebar. + +Merge request workflows + + +For a software developer working in a team: + + + You check out a new branch, and submit your changes through a merge request. + You gather feedback from your team. + You work on the implementation optimizing code with Code Quality reports. + You verify your changes with Unit test reports in GitLab CI/CD. + You avoid using dependencies whose license is not compatible with your project with License approval policies. + You request the approval from your manager. + Your manager: + + Pushes a commit with their final review. + +Approves the merge request. + Sets it to auto-merge (formerly Merge when pipeline succeeds). + + + Your changes get deployed to production with manual jobs for GitLab CI/CD. + Your implementations were successfully shipped to your customer. + + +For a web developer writing a webpage for your company’s website: + + + You check out a new branch and submit a new page through a merge request. + You gather feedback from your reviewers. + You preview your changes with Review Apps. + You request your web designers for their implementation. + You request the approval from your manager. + Once approved, your merge request is squashed and merged, and deployed to staging with GitLab Pages. + Your production team cherry-picks the merge commit into production. + + +Filter activity in a merge request + + + +History + + + + + +Introduced in GitLab 15.11 with a flag named mr_activity_filters. Disabled by default. + +Enabled on GitLab.com in GitLab 16.0. + +Enabled on self-managed in GitLab 16.3 by default. + +Generally available in GitLab 16.5. Feature flag mr_activity_filters removed. + Filtering bot comments introduced in GitLab 16.9. + + + + + + +To understand the history of a merge request, filter its activity feed to show you +only the items that are relevant to you. + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests. + Select a merge request. + Scroll to Activity. + On the right side of the page, select Activity filter to show the filter options. +If you’ve selected filter options previously, this field shows a summary of your +choices, like Activity + 5 more. + + Select the types of activity you want to see. Options include: + + + Assignees & Reviewers + Approvals + Comments (from bots) + Comments (from users) + Commits & branches + Edits + Labels + Lock status + Mentions + Merge request status + Tracking + + + Optional. Select Sort ( ) to reverse the sort order. + + +Your selection persists across all merge requests. You can also change the +sort order by clicking the sort button on the right. + +Resolve a thread + + +When you want to finish a conversation in a merge request, +resolve a thread. + +The number of unresolved threads is shown in the top right corner of a +merge request, like this: 7 unresolved threads. + +Move all unresolved threads in a merge request to an issue + + +If you have multiple unresolved threads in a merge request, you can +create an issue to resolve them separately: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests and find your merge request. + In the merge request, in the top right, find the Unresolved threads +dropdown list, and select Thread options ( ). + Select Resolve all with new issue. + Fill out the fields in the new issue, and select Create issue. + + +All threads are marked as resolved, and a link is added from the merge request to +the newly created issue. + +Move one unresolved thread in a merge request to an issue + + +If you have one specific unresolved thread in a merge request, you can +create an issue to resolve it separately: + + + On the left sidebar, select Search or go to and find your project. + Select Code > Merge requests and find your merge request. + In the merge request, find the thread you want to move. + Below the last reply to the thread, next to Resolve thread, select +Create issue to resolve thread ( ). + Fill out the fields in the new issue, and select Create issue. + + +The thread is marked as resolved, and a link is added from the merge request to +the newly created issue. + +Prevent merge unless all threads are resolved + + +You can prevent merge requests from being merged until all threads are +resolved. When this setting is enabled, the Unresolved threads counter in a merge request +is shown in orange when at least one thread remains unresolved. + + + On the left sidebar, select Search or go to and find your project. + Select Settings > Merge requests. + In the Merge checks section, select the All threads must be resolved checkbox. + Select Save changes. + + +Automatically resolve threads in a merge request when they become outdated + + +You can set merge requests to automatically resolve threads when lines are modified +with a new push. + + + On the left sidebar, select Search or go to and find your project. + Select Settings > Merge requests. + In the Merge options section, select +Automatically resolve merge request diff threads when they become outdated. + Select Save changes. + + +Threads are now resolved if a push makes a diff section outdated. +Threads on lines that don’t change and top-level resolvable threads are not resolved. + +Move notifications and to-dos + + +DETAILs: +Tier: Free, Premium, Ultimate +Offering: Self-managed + + +History + + + + + +Introduced in GitLab 16.5 with a flag named notifications_todos_buttons. Disabled by default. + +Issues, incidents, and epics also updated. + + + + + + + + On self-managed GitLab, by default this feature is not available. To make it available, an administrator can enable the feature flag named notifications_todos_buttons. +On GitLab.com and GitLab Dedicated, this feature is not available. + + +When this feature flag is enabled, the notifications and to-do item buttons are moved to the upper right corner of the page. + + + On merge requests, these buttons are located to the far right of the tabs. + On issues, incidents, and epics, these buttons are located at the top of the right sidebar. + + +Related topics + + + + Create a merge request + Review a merge request + Authorization for merge requests + Testing and reports + GitLab keyboard shortcuts + Comments and threads + Suggest code changes + CI/CD pipelines + +Push options for merge requests + + + +" +how do i create a secure connection from a ci job to aws?,,"1. Configure OpenID Connect in AWS to retrieve temporary credentials + + + +Configure OpenID Connect in AWS to retrieve temporary credentials + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + + caution +CI_JOB_JWT_V2 was deprecated in GitLab 15.9 +and is scheduled to be removed in GitLab 17.0. Use ID tokens instead. + + +In this tutorial, we’ll show you how to use a GitLab CI/CD job with a JSON web token (JWT) to retrieve temporary credentials from AWS without needing to store secrets. +To do this, you must configure OpenID Connect (OIDC) for ID federation between GitLab and AWS. For background and requirements for integrating GitLab using OIDC, see Connect to cloud services. + +To complete this tutorial: + + + Add the identity provider + Configure the role and trust + Retrieve a temporary credential + + +Add the identity provider + + +Create GitLab as a IAM OIDC provider in AWS following these instructions. + +Include the following information: + + + +Provider URL: The address of your GitLab instance, such as https://gitlab.com or http://gitlab.example.com. This address must be publically accessible. + +Audience: The address of your GitLab instance, such as https://gitlab.com or http://gitlab.example.com. + + The address must include https://. + Do not include a trailing slash. + + + + +Configure a role and trust + + +After you create the identity provider, configure a web identity role with conditions for limiting access to GitLab resources. Temporary credentials are obtained using AWS Security Token Service, so set the Action to sts:AssumeRoleWithWebIdentity. + +You can create a custom trust policy +for the role to limit authorization to a specific group, project, branch, or tag. +For the full list of supported filtering types, see Connect to cloud services. + +{ + ""Version"": ""2012-10-17"", + ""Statement"": [ + { + ""Effect"": ""Allow"", + ""Principal"": { + ""Federated"": ""arn:aws:iam::AWS_ACCOUNT:oidc-provider/gitlab.example.com"" + }, + ""Action"": ""sts:AssumeRoleWithWebIdentity"", + ""Condition"": { + ""StringEquals"": { + ""gitlab.example.com:sub"": ""project_path:mygroup/myproject:ref_type:branch:ref:main"" + } + } + } + ] +} + + +After the role is created, attach a policy defining permissions to an AWS service (S3, EC2, Secrets Manager). + +Retrieve temporary credentials + + +After you configure the OIDC and role, the GitLab CI/CD job can retrieve a temporary credential from AWS Security Token Service (STS). + +assume role: + id_tokens: + GITLAB_OIDC_TOKEN: + aud: https://gitlab.example.com + script: + - > + export $(printf ""AWS_ACCESS_KEY_ID=%s AWS_SECRET_ACCESS_KEY=%s AWS_SESSION_TOKEN=%s"" + $(aws sts assume-role-with-web-identity + --role-arn ${ROLE_ARN} + --role-session-name ""GitLabRunner-${CI_PROJECT_ID}-${CI_PIPELINE_ID}"" + --web-identity-token ${GITLAB_OIDC_TOKEN} + --duration-seconds 3600 + --query 'Credentials.[AccessKeyId,SecretAccessKey,SessionToken]' + --output text)) + - aws sts get-caller-identity + + + + +ROLE_ARN: The role ARN defined in this step. + +GITLAB_OIDC_TOKEN: An OIDC ID token. + + +Working examples + + + + See this reference project for provisioning OIDC in AWS using Terraform and a sample script to retrieve temporary credentials. + +OIDC and Multi-Account Deployment with GitLab and ECS. + AWS Partner (APN) Blog: Setting up OpenID Connect with GitLab CI/CD. + +GitLab at AWS re:Inforce 2023: Secure GitLab CD pipelines to AWS w/ OpenID and JWT. + + +Troubleshooting + + + +An error occurred (AccessDenied) when calling the AssumeRoleWithWebIdentity operation: Not authorized to perform sts:AssumeRoleWithWebIdentity + + +This error can occur for multiple reasons: + + + The cloud administrator has not configured the project to use OIDC with GitLab. + The role is restricted from being run on the branch or tag. See configure a conditional role. + +StringEquals is used instead of StringLike when using a wildcard condition. See related issue. + + + +Could not connect to openid configuration of provider error + + +After adding the Identity Provider in AWS IAM, you might get the following error: + +Your request has a problem. Please see the following details. + - Could not connect to openid configuration of provider: `https://gitlab.example.com` + + +This error occurs when the OIDC identity provider’s issuer presents a certificate chain +that’s out of order, or includes duplicate or additional certificates. + +Verify your GitLab instance’s certificate chain. The chain must start with the domain or issuer URL, +then the intermediate certificate, and end with the root certificate. Use this command to +review the certificate chain, replacing gitlab.example.com with your GitLab hostname: + +echo | /opt/gitlab/embedded/bin/openssl s_client -connect gitlab.example.com:443 + + + +Couldn't retrieve verification key from your identity provider error + + +You might receive an error similar to: + + + An error occurred (InvalidIdentityToken) when calling the AssumeRoleWithWebIdentity operation: Couldn't retrieve verification key from your identity provider, please reference AssumeRoleWithWebIdentity documentation for requirements + + +This error might be because: + + + The .well_known URL and jwks_uri of the identity provider (IdP) are inaccessible from the public internet. + A custom firewall is blocking the requests. + There’s latency of more than 5 seconds in API requests from the IdP to reach the AWS STS endpoint. + STS is making too many requests to your .well_known URL or the jwks_uri of the IdP. + + +As documented in the AWS Knowledge Center article for this error, +your GitLab instance needs to be publicly accessible so that the .well_known URL and jwks_uri can be resolved. +If this is not possible, for example if your GitLab instance is in an offline environment, +you can follow issue #391928 +where a workaround and more permanent solution is being investigated. + + +2. Deploy to AWS from GitLab CI/CD + + + +Deploy to AWS from GitLab CI/CD + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +GitLab provides Docker images with the libraries and tools you need to deploy +to AWS. You can reference these images in your CI/CD pipeline. + +If you’re using GitLab.com and deploying to the Amazon Elastic Container Service (ECS), +read about deploying to ECS. + + + note If you are comfortable configuring a deployment yourself and just need to retrieve +AWS credentials, consider using ID tokens and OpenID Connect. +ID tokens are more secure than storing credentials in CI/CD variables, but do not +work with the guidance on this page. + + +Authenticate GitLab with AWS + + +To use GitLab CI/CD to connect to AWS, you must authenticate. +After you set up authentication, you can configure CI/CD to deploy. + + + Sign on to your AWS account. + Create an IAM user. + Select your user to access its details. Go to Security credentials > Create a new access key. + Note the Access key ID and Secret access key. + + In your GitLab project, go to Settings > CI/CD. Set the following +CI/CD variables: + + + + + Environment variable name + Value + + + + + AWS_ACCESS_KEY_ID + Your Access key ID. + + + AWS_SECRET_ACCESS_KEY + Your secret access key. + + + AWS_DEFAULT_REGION + Your region code. You might want to confirm that the AWS service you intend to use is available in the chosen region. + + + + + Variables are protected by default. +To use GitLab CI/CD with branches or tags that are not protected, +clear the Protect variable checkbox. + + +Use an image to run AWS commands + + +If an image contains the AWS Command Line Interface, +you can reference the image in your project’s .gitlab-ci.yml file. Then you can run +aws commands in your CI/CD jobs. + +For example: + +deploy: + stage: deploy + image: registry.gitlab.com/gitlab-org/cloud-deploy/aws-base:latest + script: + - aws s3 ... + - aws create-deployment ... + environment: production + + +GitLab provides a Docker image that includes the AWS CLI: + + + Images are hosted in the GitLab container registry. The latest image is +registry.gitlab.com/gitlab-org/cloud-deploy/aws-base:latest. + +Images are stored in a GitLab repository. + + +Alternately, you can use an Amazon Elastic Container Registry (ECR) image. +Learn how to push an image to your ECR repository. + +You can also use an image from any third-party registry. + +Deploy your application to ECS + + + +History + + + + + +Introduced in GitLab 12.9. + The Deploy-ECS.gitlab-ci.yml template was moved to AWS/Deploy-ECS.gitlab-ci.yml in GitLab 13.2. + + + + + + +You can automate deployments of your application to your Amazon ECS +cluster. + +Prerequisites: + + + +Authenticate AWS with GitLab. + Create a cluster on Amazon ECS. + Create related components, like an ECS service or a database on Amazon RDS. + Create an ECS task definition, where the value for the containerDefinitions[].name attribute is +the same as the Container name defined in your targeted ECS service. The task definition can be: + + An existing task definition in ECS. + +In GitLab 13.3 and later, +a JSON file in your GitLab project. Use the +template in the AWS documentation +and save the file in your project. For example /ci/aws/task-definition.json. + + + + +To deploy to your ECS cluster: + + + + In your GitLab project, go to Settings > CI/CD. Set the following +CI/CD variables. You can find these names by +selecting the targeted cluster on your Amazon ECS dashboard. + + + + + Environment variable name + Value + + + + + CI_AWS_ECS_CLUSTER + The name of the AWS ECS cluster that you’re targeting for your deployments. + + + CI_AWS_ECS_SERVICE + The name of the targeted service tied to your AWS ECS cluster. Ensure that this variable is scoped to the appropriate environment (production, staging, review/*). + + + CI_AWS_ECS_TASK_DEFINITION + If the task definition is in ECS, the name of the task definition tied to the service. + + + CI_AWS_ECS_TASK_DEFINITION_FILE + If the task definition is a JSON file in GitLab, the filename, including the path. For example, ci/aws/my_task_definition.json. If the name of the task definition in your JSON file is the same name as an existing task definition in ECS, then a new revision is created when CI/CD runs. Otherwise, a brand new task definition is created, starting at revision 1. + + + + + + caution If you define both CI_AWS_ECS_TASK_DEFINITION_FILE and CI_AWS_ECS_TASK_DEFINITION, +CI_AWS_ECS_TASK_DEFINITION_FILE takes precedence. + + + + Include this template in .gitlab-ci.yml: + + +include: + - template: AWS/Deploy-ECS.gitlab-ci.yml + + + The AWS/Deploy-ECS template ships with GitLab and is available +on GitLab.com. + + + Commit and push your updated .gitlab-ci.yml to your project’s repository. + + + +Your application Docker image is rebuilt and pushed to the GitLab container registry. +If your image is located in a private registry, make sure your task definition is +configured with a repositoryCredentials attribute. + +The targeted task definition is updated with the location of the new +Docker image, and a new revision is created in ECS as result. + +Finally, your AWS ECS service is updated with the new revision of the +task definition, making the cluster pull the newest version of your +application. + + + note ECS deploy jobs wait for the rollout to complete before exiting. To disable this behavior, +set CI_AWS_ECS_WAIT_FOR_ROLLOUT_COMPLETE_DISABLED to a non-empty value. + + + + caution The AWS/Deploy-ECS.gitlab-ci.yml +template includes two templates: Jobs/Build.gitlab-ci.yml +and Jobs/Deploy/ECS.gitlab-ci.yml. Do not include these templates on their own. Only include the +AWS/Deploy-ECS.gitlab-ci.yml template. These other templates are designed to be +used only with the main template. They may move or change unexpectedly. Also, the job names within +these templates may change. Do not override these job names in your own pipeline, +because the override stops working when the name changes. + + +Deploy your application to EC2 + + + +History + + + + + +Introduced in GitLab 13.5. + + + + + + +GitLab provides a template, called AWS/CF-Provision-and-Deploy-EC2, +to assist you in deploying to Amazon EC2. + +When you configure related JSON objects and use the template, the pipeline: + + + +Creates the stack: Your infrastructure is provisioned by using +the AWS CloudFormation API. + +Pushes to an S3 bucket: When your build runs, it creates an artifact. +The artifact is pushed to an AWS S3 bucket. + +Deploys to EC2: The content is deployed on an AWS EC2 instance. + + + + +Configure the template and JSON + + +To deploy to EC2, complete the following steps. + + + Create JSON for your stack. Use the AWS template. + + Create JSON to push to S3. Include the following details. + + +{ + ""applicationName"": ""string"", + ""source"": ""string"", + ""s3Location"": ""s3://your/bucket/project_built_file...]"" +} + + + The source is the location where a build job built your application. +The build is saved to artifacts:paths. + + Create JSON to deploy to EC2. Use the AWS template. + Make the JSON objects accessible to your pipeline: + + + If you want these JSON objects saved in your repository, save the objects as three +separate files. + + In your .gitlab-ci.yml file, add CI/CD variables +that point to the file paths relative to the project root. For example, +if your JSON files are in a /aws folder: + + +variables: + CI_AWS_CF_CREATE_STACK_FILE: 'aws/cf_create_stack.json' + CI_AWS_S3_PUSH_FILE: 'aws/s3_push.json' + CI_AWS_EC2_DEPLOYMENT_FILE: 'aws/create_deployment.json' + + + + If you do not want these JSON objects saved in your repository, add each object +as a separate file type CI/CD variable +in the project settings. Use the same variable names as above. + + + + + In your .gitlab-ci.yml file, create a CI/CD variable for the name of the stack. For example: + + +variables: + CI_AWS_CF_STACK_NAME: 'YourStackName' + + + + In your .gitlab-ci.yml file, add the CI template: + + +include: + - template: AWS/CF-Provision-and-Deploy-EC2.gitlab-ci.yml + + + + Run the pipeline. + + + Your AWS CloudFormation stack is created based on the content of your +CI_AWS_CF_CREATE_STACK_FILE variable. +If your stack already exists, this step is skipped, but the provision +job it belongs to still runs. + Your built application is pushed to your S3 bucket then and deployed to your EC2 instance, based +on the related JSON object’s content. The deployment job finishes when the deployment to EC2 +is done or has failed. + + + + +Troubleshooting + + +Error 'ascii' codec can't encode character '\uxxxx' + + +This error can occur when the response from the aws-cli utility used by the Cloud Deploy images contains a Unicode character. The Cloud Deploy images we provide do not have a defined locale and default to using ASCII. To resolve this error, add the following CI/CD variable: + +variables: + LANG: ""UTF-8"" + + + +" +is it wise to set a lifecycle rule on top of a bucket in google cloud storage if my files in the bucket are managed by an external application? i have noi control on when the files will be deleted: does it make sense to start moving them to lower storage classes in this case?,,"1. GitLab container registry administration + + + +GitLab container registry administration + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed + + + note The next-generation container registry +is now available for upgrade and testing on self-managed instances as a beta feature. +This upgraded registry supports online garbage collection, and has significant performance +and reliability improvements. + + +With the GitLab container registry, every project can have its +own space to store Docker images. + +For more details about the Distribution Registry: + + + Configuration + Storage drivers + Deploy a registry server + + +This document is the administrator’s guide. To learn how to use the GitLab Container +Registry, see the user documentation. + +Enable the container registry + + +The process for enabling the container registry depends on the type of installation you use. + +Linux package installations + + +If you installed GitLab by using the Linux package, the container registry +may or may not be available by default. + +The container registry is automatically enabled and available on your GitLab domain, port 5050 if +you’re using the built-in Let’s Encrypt integration. + +Otherwise, the container registry is not enabled. To enable it: + + + You can configure it for your GitLab domain, or + You can configure it for a different domain. + + +The container registry works under HTTPS by default. You can use HTTP +but it’s not recommended and is beyond the scope of this document. + +Self-compiled installations + + +If you self-compiled your GitLab installation: + + + You must deploy a registry using the image corresponding to the +version of GitLab you are installing +(for example: registry.gitlab.com/gitlab-org/build/cng/gitlab-container-registry:v3.15.0-gitlab) + After the installation is complete, to enable it, you must configure the Registry’s +settings in gitlab.yml. + Use the sample NGINX configuration file from under +lib/support/nginx/registry-ssl and edit it to match the +host, port, and TLS certificate paths. + + +The contents of gitlab.yml are: + +registry: + enabled: true + host: registry.gitlab.example.com + port: 5005 + api_url: http://localhost:5000/ + key: config/registry.key + path: shared/registry + issuer: gitlab-issuer + + +Where: + + + + + Parameter + Description + + + + + enabled + +true or false. Enables the Registry in GitLab. By default this is false. + + + host + The host URL under which the Registry runs and users can use. + + + port + The port the external Registry domain listens on. + + + api_url + The internal API URL under which the Registry is exposed. It defaults to http://localhost:5000. Do not change this unless you are setting up an external Docker registry. + + + key + The private key location that is a pair of Registry’s rootcertbundle. + + + path + This should be the same directory like specified in Registry’s rootdirectory. This path needs to be readable by the GitLab user, the web-server user and the Registry user. + + + issuer + This should be the same value as configured in Registry’s issuer. + + + + +A Registry init file is not shipped with GitLab if you install it from source. +Hence, restarting GitLab does not restart the Registry should +you modify its settings. Read the upstream documentation on how to achieve that. + +At the absolute minimum, make sure your Registry configuration +has container_registry as the service and https://gitlab.example.com/jwt/auth +as the realm: + +auth: + token: + realm: https://gitlab.example.com/jwt/auth + service: container_registry + issuer: gitlab-issuer + rootcertbundle: /root/certs/certbundle + + + + caution If auth is not set up, users can pull Docker images without authentication. + + +Container registry domain configuration + + +You can configure the Registry’s external domain in either of these ways: + + + +Use the existing GitLab domain. +The Registry listens on a port and reuses the TLS certificate from GitLab. + +Use a completely separate domain with a new TLS certificate +for that domain. + + +Because the container registry requires a TLS certificate, cost may be a factor. + +Take this into consideration before configuring the container registry +for the first time. + +Configure container registry under an existing GitLab domain + + +If the container registry is configured to use the existing GitLab domain, you can +expose the container registry on a port. This way you can reuse the existing GitLab TLS +certificate. + +If the GitLab domain is https://gitlab.example.com and the port to the outside world is 5050, +to configure the container registry: + + + Edit gitlab.rb if you are using a Linux package installation. + Edit gitlab.yml if you are using a self-compiled installation. + + +Ensure you choose a port different than the one that Registry listens to (5000 by default), +otherwise conflicts occur. + + + note Host and container firewall rules must be configured to allow traffic in through the port listed +under the registry_external_url line, rather than the port listed under +gitlab_rails['registry_port'] (default 5000). + + +Linux package (Omnibus)Self-compiled (source) + + Your /etc/gitlab/gitlab.rb should contain the Registry URL as well as the +path to the existing TLS certificate and key used by GitLab: + + +registry_external_url 'https://gitlab.example.com:5050' + + + The registry_external_url is listening on HTTPS under the +existing GitLab URL, but on a different port. + + If your TLS certificate is not in /etc/gitlab/ssl/gitlab.example.com.crt +and key not in /etc/gitlab/ssl/gitlab.example.com.key uncomment the lines +below: + + +registry_nginx['ssl_certificate'] = ""/path/to/certificate.pem"" +registry_nginx['ssl_certificate_key'] = ""/path/to/certificate.key"" + + + + Save the file and reconfigure GitLab +for the changes to take effect. + + + Validate using: + + +openssl s_client -showcerts -servername gitlab.example.com -connect gitlab.example.com:5050 > cacert.pem + + +If your certificate provider provides the CA Bundle certificates, append them to the TLS certificate file.An administrator may want the container registry listening on an arbitrary port such as 5678. +However, the registry and application server are behind an AWS application load balancer that only +listens on ports 80 and 443. The administrator may remove the port number for +registry_external_url, so HTTP or HTTPS is assumed. Then, the rules apply that map the load +balancer to the registry from ports 80 or 443 to the arbitrary port. This is important if users +rely on the docker login example in the container registry. Here’s an example:registry_external_url 'https://registry-gitlab.example.com' +registry_nginx['redirect_http_to_https'] = true +registry_nginx['listen_port'] = 5678 + + + Open /home/git/gitlab/config/gitlab.yml, find the registry entry and +configure it with the following settings: + + +registry: + enabled: true + host: gitlab.example.com + port: 5050 + + + Save the file and restart GitLab for the changes to take effect. + Make the relevant changes in NGINX as well (domain, port, TLS certificates path). + + +Users should now be able to sign in to the container registry with their GitLab +credentials using: + +docker login gitlab.example.com:5050 + + +Configure container registry under its own domain + + +When the Registry is configured to use its own domain, you need a TLS +certificate for that specific domain (for example, registry.example.com). You might need +a wildcard certificate if hosted under a subdomain of your existing GitLab +domain. For example, *.gitlab.example.com, is a wildcard that matches registry.gitlab.example.com, +and is distinct from *.example.com. + +As well as manually generated SSL certificates (explained here), certificates automatically +generated by Let’s Encrypt are also supported in Linux package installations. + +Let’s assume that you want the container registry to be accessible at +https://registry.gitlab.example.com. + +Linux package (Omnibus)Self-compiled (source) + + Place your TLS certificate and key in +/etc/gitlab/ssl/registry.gitlab.example.com.crt and +/etc/gitlab/ssl/registry.gitlab.example.com.key and make sure they have +correct permissions: + + +chmod 600 /etc/gitlab/ssl/registry.gitlab.example.com.* + + + + After the TLS certificate is in place, edit /etc/gitlab/gitlab.rb with: + + +registry_external_url 'https://registry.gitlab.example.com' + + + The registry_external_url is listening on HTTPS. + + + Save the file and reconfigure GitLab for the changes to take effect. + +If you have a wildcard certificate, you must specify the path to the +certificate in addition to the URL, in this case /etc/gitlab/gitlab.rb +looks like:registry_nginx['ssl_certificate'] = ""/etc/gitlab/ssl/certificate.pem"" +registry_nginx['ssl_certificate_key'] = ""/etc/gitlab/ssl/certificate.key"" + + + Open /home/git/gitlab/config/gitlab.yml, find the registry entry and +configure it with the following settings: + + +registry: + enabled: true + host: registry.gitlab.example.com + + + Save the file and restart GitLab for the changes to take effect. + Make the relevant changes in NGINX as well (domain, port, TLS certificates path). + + +Users should now be able to sign in to the container registry using their GitLab +credentials: + +docker login registry.gitlab.example.com + + +Disable container registry site-wide + + +When you disable the Registry by following these steps, you do not +remove any existing Docker images. Docker image removal is handled by the +Registry application itself. + +Linux package (Omnibus)Self-compiled (source) + + Open /etc/gitlab/gitlab.rb and set registry['enable'] to false: + + +registry['enable'] = false + + + + Save the file and reconfigure GitLab for the changes to take effect. + + + + Open /home/git/gitlab/config/gitlab.yml, find the registry entry and +set enabled to false: + + +registry: + enabled: false + + + + Save the file and restart GitLab for the changes to take effect. + + + +Disable container registry for new projects site-wide + + +If the container registry is enabled, then it should be available on all new +projects. To disable this function and let the owners of a project to enable +the container registry by themselves, follow the steps below. + +Linux package (Omnibus)Self-compiled (source) + + Edit /etc/gitlab/gitlab.rb and add the following line: + + +gitlab_rails['gitlab_default_projects_features_container_registry'] = false + + + + Save the file and reconfigure GitLab for the changes to take effect. + + + + Open /home/git/gitlab/config/gitlab.yml, find the default_projects_features +entry and configure it so that container_registry is set to false: + + +## Default project features settings +default_projects_features: + issues: true + merge_requests: true + wiki: true + snippets: false + builds: true + container_registry: false + + + + Save the file and restart GitLab for the changes to take effect. + + + +Increase token duration + + +In GitLab, tokens for the container registry expire every five minutes. +To increase the token duration: + + + On the left sidebar, at the bottom, select Admin Area. + Select Settings > CI/CD. + Expand Container Registry. + For the Authorization token duration (minutes), update the value. + Select Save changes. + + +Configure storage for the container registry + + + + note For storage backends that support it, you can use object versioning to preserve, retrieve, and +restore the non-current versions of every object stored in your buckets. However, this may result in +higher storage usage and costs. Due to how the registry operates, image uploads are first stored in +a temporary path and then transferred to a final location. For object storage backends, including S3 +and GCS, this transfer is achieved with a copy followed by a delete. With object versioning enabled, +these deleted temporary upload artifacts are kept as non-current versions, therefore increasing the +storage bucket size. To ensure that non-current versions are deleted after a given amount of time, +you should configure an object lifecycle policy with your storage provider. + + + + caution Do not directly modify the files or objects stored by the container registry. Anything other than the registry writing or deleting these entries can lead to instance-wide data consistency and instability issues from which recovery may not be possible. + + +You can configure the container registry to use various storage backends by +configuring a storage driver. By default the GitLab container registry +is configured to use the file system driver +configuration. + +The different supported drivers are: + + + + + Driver + Description + + + + + filesystem + Uses a path on the local file system + + + azure + Microsoft Azure Blob Storage + + + gcs + Google Cloud Storage + + + s3 + Amazon Simple Storage Service. Be sure to configure your storage bucket with the correct S3 Permission Scopes. + + + + +Although most S3 compatible services (like MinIO) should work with the container registry, +we only guarantee support for AWS S3. Because we cannot assert the correctness of third-party S3 implementations, +we can debug issues, but we cannot patch the registry unless an issue is reproducible against an AWS S3 bucket. + + + + + caution Support for the following drivers was deprecated +in GitLab 16.6, and is planned for removal in 17.0. This change is a breaking change. + + + + + + Driver + Description + + + + + swift + OpenStack Swift Object Storage + + + oss + Aliyun OSS + + + + + + +Use file system + + +If you want to store your images on the file system, you can change the storage +path for the container registry, follow the steps below. + +This path is accessible to: + + + The user running the container registry daemon. + The user running GitLab. + + +All GitLab, Registry, and web server users must +have access to this directory. + +Linux package (Omnibus)Self-compiled (source)The default location where images are stored in Linux package installations is +/var/opt/gitlab/gitlab-rails/shared/registry. To change it: + + Edit /etc/gitlab/gitlab.rb: + + +gitlab_rails['registry_path'] = ""/path/to/registry/storage"" + + + + Save the file and reconfigure GitLab for the changes to take effect. + +The default location where images are stored in self-compiled installations is +/home/git/gitlab/shared/registry. To change it: + + Open /home/git/gitlab/config/gitlab.yml, find the registry entry and +change the path setting: + + +registry: + path: shared/registry + + + + Save the file and restart GitLab for the changes to take effect. + + + +Use object storage + + +If you want to store your images on object storage, you can change the storage +driver for the container registry. + +Read more about using object storage with GitLab. + + + caution GitLab does not back up Docker images that are not stored on the +file system. Enable backups with your object storage provider if +desired. + + +Configure s3 and gcs storage drivers for Linux package installations + + +The following configuration steps are for the s3 and gcs storage drivers. Other storage drivers are supported. + +To configure the s3 storage driver for a Linux package installation: + + + + Edit /etc/gitlab/gitlab.rb: + + +registry['storage'] = { + 's3' => { + 'accesskey' => 's3-access-key', + 'secretkey' => 's3-secret-key-for-access-key', + 'bucket' => 'your-s3-bucket', + 'region' => 'your-s3-region', + 'regionendpoint' => 'your-s3-regionendpoint' + } +} + + + To avoid using static credentials, use an +IAM role +and omit accesskey and secretkey. Make sure that your IAM profile follows +the permissions documented by Docker. + + +registry['storage'] = { + 's3' => { + 'bucket' => 'your-s3-bucket', + 'region' => 'your-s3-region' + } +} + + + If using with an AWS S3 VPC endpoint, +then set regionendpoint to your VPC endpoint address and set pathstyle to false: + + +registry['storage'] = { + 's3' => { + 'accesskey' => 's3-access-key', + 'secretkey' => 's3-secret-key-for-access-key', + 'bucket' => 'your-s3-bucket', + 'region' => 'your-s3-region', + 'regionendpoint' => 'your-s3-vpc-endpoint', + 'pathstyle' => false + } +} + + + + +regionendpoint is only required when configuring an S3 compatible service such as MinIO, or +when using an AWS S3 VPC Endpoint. + +your-s3-bucket should be the name of a bucket that exists, and can’t include subdirectories. + +pathstyle should be set to true to use host/bucket_name/object style paths instead of +bucket_name.host/object. Set to false for AWS S3. + + + You can set a rate limit on connections to S3 to avoid 503 errors from the S3 API. To do this, +set maxrequestspersecond to a number within the S3 request rate threshold: + + + registry['storage'] = { + 's3' => { + 'accesskey' => 's3-access-key', + 'secretkey' => 's3-secret-key-for-access-key', + 'bucket' => 'your-s3-bucket', + 'region' => 'your-s3-region', + 'regionendpoint' => 'your-s3-regionendpoint', + 'maxrequestspersecond' => 100 + } + } + + + + Save the file and reconfigure GitLab for the changes to take effect. + + + +To configure the gcs storage driver for a Linux package installation: + + + + Edit /etc/gitlab/gitlab.rb: + + + registry['storage'] = { + 'gcs' => { + 'bucket' => 'BUCKET_NAME', + 'keyfile' => 'PATH/TO/KEYFILE', + # If you have the bucket shared with other apps beyond the registry, uncomment the following: + # 'rootdirectory' => '/gcs/object/name/prefix' + } + } + + + GitLab supports all available parameters. + + + Save the file and reconfigure GitLab for the changes to take effect. + + + +Self-compiled installations + + +Configuring the storage driver is done in the registry configuration YAML file created +when you deployed your Docker registry. + +s3 storage driver example: + +storage: + s3: + accesskey: 's3-access-key' # Not needed if IAM role used + secretkey: 's3-secret-key-for-access-key' # Not needed if IAM role used + bucket: 'your-s3-bucket' + region: 'your-s3-region' + regionendpoint: 'your-s3-regionendpoint' + cache: + blobdescriptor: inmemory + delete: + enabled: true + + +your-s3-bucket should be the name of a bucket that exists, and can’t include subdirectories. + +Migrate to object storage without downtime + + + + caution Using AWS DataSync +to copy the registry data to or between S3 buckets creates invalid metadata objects in the bucket. +For additional details, see Tags with an empty name. +To move data to and between S3 buckets, the AWS CLI sync operation is recommended. + + +To migrate storage without stopping the container registry, set the container registry +to read-only mode. On large instances, this may require the container registry +to be in read-only mode for a while. During this time, +you can pull from the container registry, but you cannot push. + + + Optional: To reduce the amount of data to be migrated, run the garbage collection tool without downtime. + + This example uses the aws CLI. If you haven’t configured the +CLI before, you have to configure your credentials by running sudo aws configure. +Because a non-administrator user likely can’t access the container registry folder, +ensure you use sudo. To check your credential configuration, run +ls to list +all buckets. + + +sudo aws --endpoint-url https://your-object-storage-backend.com s3 ls + + + If you are using AWS as your back end, you do not need the --endpoint-url. + + + Copy initial data to your S3 bucket, for example with the aws CLI +cp +or sync +command. Make sure to keep the docker folder as the top-level folder inside the bucket. + + +sudo aws --endpoint-url https://your-object-storage-backend.com s3 sync registry s3://mybucket + + + + note If you have a lot of data, you may be able to improve performance by +running parallel sync operations. + + + To perform the final data sync, +put the container registry in read-only mode and +reconfigure GitLab. + + Sync any changes dating from after the initial data load to your S3 bucket, and delete files that exist in the destination bucket but not in the source: + + +sudo aws --endpoint-url https://your-object-storage-backend.com s3 sync registry s3://mybucket --delete --dryrun + + + After verifying the command performs as expected, remove the +--dryrun +flag and run the command. + + + caution The --delete +flag deletes files that exist in the destination but not in the source. +If you swap the source and destination, all data in the Registry is deleted. + + + + Verify all container registry files have been uploaded to object storage +by looking at the file count returned by these two commands: + + +sudo find registry -type f | wc -l + + + +sudo aws --endpoint-url https://your-object-storage-backend.com s3 ls s3://mybucket --recursive | wc -l + + + The output of these commands should match, except for the content in the +_uploads directories and sub-directories. + + Configure your registry to use the S3 bucket for storage. + For the changes to take effect, set the Registry back to read-write mode and reconfigure GitLab. + + +Moving to Azure Object Storage + + + +History + + + + + The default configuration for the storage driver is scheduled to be changed in GitLab 16.0. + + + + + + +When moving from an existing file system or another object storage provider to Azure Object Storage, you must configure the registry to use the standard root directory. +Configure it by setting trimlegacyrootprefix: true in the Azure storage driver section of the registry configuration. +Without this configuration, the Azure storage driver uses // instead of / as the first section of the root path, rendering the migrated images inaccessible. + +Linux package (Omnibus)Self-compiled (source)registry['storage'] = { + 'azure' => { + 'accountname' => 'accountname', + 'accountkey' => 'base64encodedaccountkey', + 'container' => 'containername', + 'rootdirectory' => '/azure/virtual/container', + 'trimlegacyrootprefix' => true + } +} +storage: + azure: + accountname: accountname + accountkey: base64encodedaccountkey + container: containername + rootdirectory: /azure/virtual/container + trimlegacyrootprefix: true + + +By default, Azure Storage Driver uses the core.windows.net realm. You can set another value for realm in the azure section (for example, core.usgovcloudapi.net for Azure Government Cloud). + +Disable redirect for storage driver + + +By default, users accessing a registry configured with a remote backend are redirected to the default backend for the storage driver. For example, registries can be configured using the s3 storage driver, which redirects requests to a remote S3 bucket to alleviate load on the GitLab server. + +However, this behavior is undesirable for registries used by internal hosts that usually can’t access public servers. To disable redirects and proxy download, set the disable flag to true as follows. This makes all traffic always go through the Registry service. This results in improved security (less surface attack as the storage backend is not publicly accessible), but worse performance (all traffic is redirected via the service). + +Linux package (Omnibus)Self-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +registry['storage'] = { + 's3' => { + 'accesskey' => 's3-access-key', + 'secretkey' => 's3-secret-key-for-access-key', + 'bucket' => 'your-s3-bucket', + 'region' => 'your-s3-region', + 'regionendpoint' => 'your-s3-regionendpoint' + }, + 'redirect' => { + 'disable' => true + } +} + + + + Save the file and reconfigure GitLab for the changes to take effect. + + + + Add the redirect flag to your registry configuration YAML file: + + +storage: + s3: + accesskey: 'AKIAKIAKI' + secretkey: 'secret123' + bucket: 'gitlab-registry-bucket-AKIAKIAKI' + region: 'your-s3-region' + regionendpoint: 'your-s3-regionendpoint' + redirect: + disable: true + cache: + blobdescriptor: inmemory + delete: + enabled: true + + + + Save the file and restart GitLab for the changes to take effect. + + + +Encrypted S3 buckets + + +You can use server-side encryption with AWS KMS for S3 buckets that have +SSE-S3 or SSE-KMS encryption enabled by default. +Customer master keys (CMKs) and SSE-C encryption aren’t supported because this requires sending the +encryption keys in every request. + +For SSE-S3, you must enable the encrypt option in the registry settings. How you do this depends +on how you installed GitLab. Follow the instructions here that match your installation method. + +Linux package (Omnibus)Self-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +registry['storage'] = { + 's3' => { + 'accesskey' => 's3-access-key', + 'secretkey' => 's3-secret-key-for-access-key', + 'bucket' => 'your-s3-bucket', + 'region' => 'your-s3-region', + 'regionendpoint' => 'your-s3-regionendpoint', + 'encrypt' => true + } +} + + + + Save the file and reconfigure GitLab +for the changes to take effect. + + + + Edit your registry configuration YAML file: + + +storage: + s3: + accesskey: 'AKIAKIAKI' + secretkey: 'secret123' + bucket: 'gitlab-registry-bucket-AKIAKIAKI' + region: 'your-s3-region' + regionendpoint: 'your-s3-regionendpoint' + encrypt: true + + + + Save the file and restart GitLab +for the changes to take effect. + + + +Storage limitations + + +There is no storage limitation, which means a user can upload an +infinite amount of Docker images with arbitrary sizes. This setting should be +configurable in future releases. + +Change the registry’s internal port + + +The Registry server listens on localhost at port 5000 by default, +which is the address for which the Registry server should accept connections. +In the examples below we set the Registry’s port to 5010. + +Linux package (Omnibus)Self-compiled (source) + + Open /etc/gitlab/gitlab.rb and set registry['registry_http_addr']: + + +registry['registry_http_addr'] = ""localhost:5010"" + + + + Save the file and reconfigure GitLab for the changes to take effect. + + + + Open the configuration file of your Registry server and edit the +http:addr value: + + +http: + addr: localhost:5010 + + + + Save the file and restart the Registry server. + + + +Disable container registry per project + + +If Registry is enabled in your GitLab instance, but you don’t need it for your +project, you can disable it from your project’s settings. + +Use an external container registry with GitLab as an auth endpoint + + + + caution Using third-party container registries in GitLab was deprecated +in GitLab 15.8 and support ended in GitLab 16.0. +If you need to use third-party container registries instead of the GitLab container registry, +tell us about your use cases in feedback issue 958. + + +If you use an external container registry, some features associated with the +container registry may be unavailable or have inherent risks. + +For the integration to work, the external registry must be configured to +use a JSON Web Token to authenticate with GitLab. The +external registry’s runtime configuration +must have the following entries: + +auth: + token: + realm: https://gitlab.example.com/jwt/auth + service: container_registry + issuer: gitlab-issuer + rootcertbundle: /root/certs/certbundle + + +Without these entries, the registry logins cannot authenticate with GitLab. +GitLab also remains unaware of +nested image names +under the project hierarchy, like +registry.example.com/group/project/image-name:tag or +registry.example.com/group/project/my/image-name:tag, and only recognizes +registry.example.com/group/project:tag. + +Linux package installations + + +You can use GitLab as an auth endpoint with an external container registry. + + + + Open /etc/gitlab/gitlab.rb and set necessary configurations: + + +gitlab_rails['registry_enabled'] = true +gitlab_rails['registry_api_url'] = ""https://:5000"" +gitlab_rails['registry_issuer'] = ""gitlab-issuer"" + + + + +gitlab_rails['registry_enabled'] = true is needed to enable GitLab +container registry features and authentication endpoint. The GitLab bundled +container registry service does not start, even with this enabled. + +gitlab_rails['registry_api_url'] = ""http://:5000"" +must be changed to match the host where Registry is installed. +It must also specify https if the external registry is +configured to use TLS. + + + + A certificate-key pair is required for GitLab and the external container +registry to communicate securely. You need to create a certificate-key +pair, configuring the external container registry with the public +certificate (rootcertbundle) and configuring GitLab with the private key. +To do that, add the following to /etc/gitlab/gitlab.rb: + + +# registry['internal_key'] should contain the contents of the custom key +# file. Line breaks in the key file should be marked using `\n` character +# Example: +registry['internal_key'] = ""---BEGIN RSA PRIVATE KEY---\nMIIEpQIBAA\n"" + +# Optionally define a custom file for a Linux package installation to write the contents +# of registry['internal_key'] to. +gitlab_rails['registry_key_path'] = ""/custom/path/to/registry-key.key"" + + + Each time reconfigure is executed, the file specified at registry_key_path +gets populated with the content specified by internal_key. If +no file is specified, Linux package installations default it to +/var/opt/gitlab/gitlab-rails/etc/gitlab-registry.key and populates +it. + + + To change the container registry URL displayed in the GitLab Container +Registry pages, set the following configurations: + + +gitlab_rails['registry_host'] = ""registry.gitlab.example.com"" +gitlab_rails['registry_port'] = ""5005"" + + + + Save the file and reconfigure GitLab +for the changes to take effect. + + + +Self-compiled installations + + + + + Open /home/git/gitlab/config/gitlab.yml, and edit the configuration settings under registry: + + +## Container registry + +registry: + enabled: true + host: ""registry.gitlab.example.com"" + port: ""5005"" + api_url: ""https://:5000"" + path: /var/lib/registry + key: /path/to/keyfile + issuer: gitlab-issuer + + + Read more about what these parameters mean. + + + Save the file and restart GitLab for the changes to take effect. + + + +Configure container registry notifications + + +You can configure the container registry to send webhook notifications in +response to events happening in the registry. + +Read more about the container registry notifications configuration options in the +Docker Registry notifications documentation. + +You can configure multiple endpoints for the container registry. + +Linux package (Omnibus)Self-compiled (source)To configure a notification endpoint for a Linux package installation: + + Edit /etc/gitlab/gitlab.rb: + + +registry['notifications'] = [ + { + 'name' => 'test_endpoint', + 'url' => 'https://gitlab.example.com/notify', + 'timeout' => '500ms', + 'threshold' => 5, + 'backoff' => '1s', + 'headers' => { + ""Authorization"" => [""AUTHORIZATION_EXAMPLE_TOKEN""] + } + } +] + + + + Save the file and reconfigure GitLab for the changes to take effect. + +Configuring the notification endpoint is done in your registry configuration YAML file created +when you deployed your Docker registry.Example:notifications: + endpoints: + - name: alistener + disabled: false + url: https://my.listener.com/event + headers: + timeout: 500 + threshold: 5 + backoff: 1000 + + +Run the Cleanup policy now + + + + caution If you’re using a distributed architecture and Sidekiq is running on a different node, the cleanup +policies don’t work. To fix this: + + + + Configure the gitlab.rb file on the Sidekiq nodes to +point to the correct registry URL. + Copy the registry.key file to each Sidekiq node. + + +For more information, see the Sidekiq configuration +page. + +To reduce the amount of Container Registry disk space used by a given project, +administrators can setup cleanup policies +and run garbage collection. + +Registry Disk Space Usage by Project + + +To find the disk space used by each project, run the following in the +GitLab Rails console: + +projects_and_size = [[""project_id"", ""creator_id"", ""registry_size_bytes"", ""project path""]] +# You need to specify the projects that you want to look through. You can get these in any manner. +projects = Project.last(100) + +projects.each do |p| + project_total_size = 0 + container_repositories = p.container_repositories + + container_repositories.each do |c| + c.tags.each do |t| + project_total_size = project_total_size + t.total_size unless t.total_size.nil? + end + end + + if project_total_size > 0 + projects_and_size << [p.project_id, p.creator&.id, project_total_size, p.full_path] + end +end + +# print it as comma separated output +projects_and_size.each do |ps| + puts ""%s,%s,%s,%s"" % ps +end + + +To remove image tags by running the cleanup policy, run the following commands in the +GitLab Rails console: + +# Numeric ID of the project whose container registry should be cleaned up +P = + +# Numeric ID of a user with Developer, Maintainer, or Owner role for the project +U = + +# Get required details / objects +user = User.find_by_id(U) +project = Project.find_by_id(P) +policy = ContainerExpirationPolicy.find_by(project_id: P) + +# Loop through each container repository +project.container_repositories.find_each do |repo| + puts repo.attributes + + # Start the tag cleanup + puts Projects::ContainerRepository::CleanupTagsService.new(container_repository: repo, current_user: user, params: policy.attributes.except(""created_at"", ""updated_at"")).execute +end + + +You can also run cleanup on a schedule. + +To enable cleanup policies for all projects instance-wide, you need to find all projects +with a container registry, but with the cleanup policy disabled: + +# Find all projects where Container registry is enabled, and cleanup policies disabled + +projects = Project.find_by_sql (""SELECT * FROM projects WHERE id IN (SELECT project_id FROM container_expiration_policies WHERE enabled=false AND id IN (SELECT project_id FROM container_repositories))"") + +# Loop through each project +projects.each do |p| + +# Print project IDs and project full names + puts ""#{p.id},#{p.full_name}"" +end + + +Container registry metadata database + + + +Tier: Free, Premium, Ultimate +Offering: Self-managed +Status: Beta + +The metadata database enables many new registry features, including +online garbage collection, and increases the efficiency of many registry operations. +See the Container registry metadata database page for details. + +Container registry garbage collection + + + + note Retention policies in your object storage provider, such as Amazon S3 Lifecycle, may prevent +objects from being properly deleted. + + +The container registry can use considerable amounts of storage space, and you might want to +reduce storage usage. +Among the listed options, deleting tags is the most effective option. However, tag deletion +alone does not delete image layers, it only leaves the underlying image manifests untagged. + +To more effectively free up space, the container registry has a garbage collector that can +delete unreferenced layers and (optionally) untagged manifests. + +To start the garbage collector, use the registry-garbage-collect command provided by gitlab-ctl. + + + caution This command shuts down the container registry prior to the garbage collection and +only starts it again after garbage collection completes. If you prefer to avoid downtime, +you can manually set the container registry to read-only mode and bypass gitlab-ctl. + + +The time required to perform garbage collection is proportional to the container registry data size. + +Prerequisites: + + + You must have installed GitLab by using a Linux package or the +GitLab Helm chart. + + +Understanding the content-addressable layers + + +Consider the following example, where you first build the image: + +# This builds a image with content of sha256:111111 +docker build -t my.registry.com/my.group/my.project:latest . +docker push my.registry.com/my.group/my.project:latest + + +Now, you do overwrite :latest with a new version: + +# This builds a image with content of sha256:222222 +docker build -t my.registry.com/my.group/my.project:latest . +docker push my.registry.com/my.group/my.project:latest + + +Now, the :latest tag points to manifest of sha256:222222. +Due to the architecture of registry, this data is still accessible when pulling the +image my.registry.com/my.group/my.project@sha256:111111, though it is +no longer directly accessible via the :latest tag. + +Remove unreferenced layers + + +Image layers are the bulk of the container registry storage. A layer is considered +unreferenced when no image manifest references it. Unreferenced layers are the +default target of the container registry garbage collector. + +If you did not change the default location of the configuration file, run: + +sudo gitlab-ctl registry-garbage-collect + + +If you changed the location of the container registry config.yml: + +sudo gitlab-ctl registry-garbage-collect /path/to/config.yml + + +You can also remove all untagged manifests and unreferenced layers +to recover additional space. + +Removing untagged manifests and unreferenced layers + + +By default the container registry garbage collector ignores images that are untagged, +and users can keep pulling untagged images by digest. Users can also re-tag images +in the future, making them visible again in the GitLab UI and API. + +If you do not care about untagged images and the layers exclusively referenced by these images, +you can delete them all. Use the -m flag on the registry-garbage-collect command: + +sudo gitlab-ctl registry-garbage-collect -m + + +If you are unsure about deleting untagged images, back up your registry data before proceeding. + +Performing garbage collection without downtime + + +To do garbage collection while keeping the container registry online, put the registry +in read-only mode and bypass the built-in gitlab-ctl registry-garbage-collect command. + +You can pull but not push images while the container registry is in read-only mode. The container +registry must remain in read-only for the full duration of the garbage collection. + +By default, the registry storage path +is /var/opt/gitlab/gitlab-rails/shared/registry. + +To enable the read-only mode: + + + + In /etc/gitlab/gitlab.rb, specify the read-only mode: + + + registry['storage'] = { + 'filesystem' => { + 'rootdirectory' => """" + }, + 'maintenance' => { + 'readonly' => { + 'enabled' => true + } + } + } + + + + Save and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + This command sets the container registry into the read-only mode. + + + Next, trigger one of the garbage collect commands: + + +# Remove unreferenced layers +sudo /opt/gitlab/embedded/bin/registry garbage-collect /var/opt/gitlab/registry/config.yml + +# Remove untagged manifests and unreferenced layers +sudo /opt/gitlab/embedded/bin/registry garbage-collect -m /var/opt/gitlab/registry/config.yml + + + This command starts the garbage collection. The time to complete is proportional to the registry data size. + + + Once done, in /etc/gitlab/gitlab.rb change it back to read-write mode: + + + registry['storage'] = { + 'filesystem' => { + 'rootdirectory' => """" + }, + 'maintenance' => { + 'readonly' => { + 'enabled' => false + } + } + } + + + + Save and reconfigure GitLab: + + +sudo gitlab-ctl reconfigure + + + + +Running the garbage collection on schedule + + +Ideally, you want to run the garbage collection of the registry regularly on a +weekly basis at a time when the registry is not being in-use. +The simplest way is to add a new crontab job that it runs periodically +once a week. + +Create a file under /etc/cron.d/registry-garbage-collect: + +SHELL=/bin/sh +PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin + +# Run every Sunday at 04:05am +5 4 * * 0 root gitlab-ctl registry-garbage-collect + + +You may want to add the -m flag to remove untagged manifests and unreferenced layers. + +Stop garbage collection + + +If you anticipate stopping garbage collection, you should manually run garbage collection as +described in Performing garbage collection without downtime. +You can then stop garbage collection by pressing Control+C. + +Otherwise, interrupting gitlab-ctl could leave your registry service in a down state. In this +case, you must find the garbage collection process +itself on the system so that the gitlab-ctl command can bring the registry service back up again. + +Also, there’s no way to save progress or results during the mark phase of the process. Only once +blobs start being deleted is anything permanent done. + +Continuous Zero Downtime Garbage Collection + + + +Status: Beta + +You can run garbage collection in the background without the need to schedule it or require read-only mode, +if you migrate to the metadata database. + + + note If you would like to try this Beta feature, +you should review the known limitations. If you have any feedback, +you can let us know in the feedback issue. + + +Configure GitLab and Registry to run on separate nodes (Linux package installations) + + +By default, package assumes that both services are running on the same node. +To get GitLab and Registry to run on a separate nodes, separate configuration +is necessary for Registry and GitLab. + +Configure Registry + + +Below you can find configuration options you should set in /etc/gitlab/gitlab.rb, +for Registry to run separately from GitLab: + + + +registry['registry_http_addr'], default set programmatically. Needs to be reachable by web server (or LB). + +registry['token_realm'], default set programmatically. Specifies the endpoint to use to perform authentication, usually the GitLab URL. +This endpoint needs to be reachable by user. + +registry['http_secret'], random string. A random piece of data used to sign state that may be stored with the client to protect against tampering. + +registry['internal_key'], default automatically generated. Contents of the key that GitLab uses to sign the tokens. They key gets created on the Registry server, but it is not used there. + +gitlab_rails['registry_key_path'], default set programmatically. This is the path where internal_key contents are written to disk. + +registry['internal_certificate'], default automatically generated. Contents of the certificate that GitLab uses to sign the tokens. + +registry['rootcertbundle'], default set programmatically. Path to certificate. This is the path where internal_certificate +contents are written to disk. + +registry['health_storagedriver_enabled'], default set programmatically. Configure whether health checks on the configured storage driver are enabled. + +gitlab_rails['registry_issuer'], default value. This setting needs to be set the same between Registry and GitLab. + + +Configure GitLab + + +Below you can find configuration options you should set in /etc/gitlab/gitlab.rb, +for GitLab to run separately from Registry: + + + +gitlab_rails['registry_enabled'], must be set to true. This setting +signals to GitLab that it should allow Registry API requests. + +gitlab_rails['registry_api_url'], default set programmatically. This is the Registry URL used internally that users do not need to interact with, registry['registry_http_addr'] with scheme. + +gitlab_rails['registry_host'], for example, registry.gitlab.example. Registry endpoint without the scheme, the address that gets shown to the end user. + +gitlab_rails['registry_port']. Registry endpoint port, visible to the end user. + +gitlab_rails['registry_issuer'] must match the issuer in the Registry configuration. + +gitlab_rails['registry_key_path'], path to the key that matches the certificate on the +Registry side. + +gitlab_rails['internal_key'], contents of the key that GitLab uses to sign the tokens. + + +Architecture of GitLab container registry + + +The GitLab registry is what users use to store their own Docker images. +Because of that the Registry is client facing, meaning that we expose it directly +on the web server (or load balancers, LB for short). + + + +The flow described by the diagram above: + + + A user runs docker login registry.gitlab.example on their client. This reaches the web server (or LB) on port 443. + Web server connects to the Registry backend pool (by default, using port 5000). Since the user +didn’t provide a valid token, the Registry returns a 401 HTTP code and the URL (token_realm from +Registry configuration) where to get one. This points to the GitLab API. + The Docker client then connects to the GitLab API and obtains a token. + The API signs the token with the registry key and hands it to the Docker client + The Docker client now logs in again with the token received from the API. It can now push and pull Docker images. + + +Reference: https://distribution.github.io/distribution/spec/auth/token/ + +Communication between GitLab and Registry + + +Registry doesn’t have a way to authenticate users internally so it relies on +GitLab to validate credentials. The connection between Registry and GitLab is +TLS encrypted. The key is used by GitLab to sign the tokens while the certificate +is used by Registry to validate the signature. By default, a self-signed certificate key pair is generated +for all installations. This can be overridden as needed. + +GitLab interacts with the Registry using the Registry private key. When a Registry +request goes out, a new short-living (10 minutes) namespace limited token is generated +and signed with the private key. +The Registry then verifies that the signature matches the registry certificate +specified in its configuration and allows the operation. +GitLab background jobs processing (through Sidekiq) also interacts with Registry. +These jobs talk directly to Registry to handle image deletion. + +Migrate from a third-party registry + + +Using external container registries in GitLab was deprecated +in GitLab 15.8 and the end of support occurred in GitLab 16.0. See the deprecation notice for more details. + +The integration is not disabled in GitLab 16.0, but support for debugging and fixing issues +is no longer provided. Additionally, the integration is no longer being developed or +enhanced with new features. Third-party registry functionality might be completely removed +after the new GitLab container registry version is available for self-managed (see epic 5521). Only the GitLab container registry is planned to be supported. + +This section has guidance for administrators migrating from third-party registries +to the GitLab container registry. If the third-party container registry you are using is not listed here, +you can describe your use cases in the feedback issue. + +For all of the instructions provided below, you should try them first on a test environment. +Make sure everything continues to work as expected before replicating it in production. + +Docker Distribution Registry + + +The Docker Distribution Registry was donated to the CNCF +and is now known as the Distribution Registry. +This registry is the open source implementation that the GitLab container registry is based on. +The GitLab container registry is compatible with the basic functionality provided by the Distribution Registry, +including all the supported storage backends. To migrate to the GitLab container registry +you can follow the instructions on this page, and use the same storage backend as the Distribution Registry. +The GitLab container registry should accept the same configuration that you are using for the Distribution Registry. + +Troubleshooting + + +Before diving in to the following sections, here’s some basic troubleshooting: + + + + Check to make sure that the system clock on your Docker client and GitLab server have +been synchronized (for example, via NTP). + + + If you are using an S3-backed Registry, double check that the IAM +permissions and the S3 credentials (including region) are correct. See +the sample IAM policy +for more details. + + + Check the Registry logs (for example /var/log/gitlab/registry/current) and the GitLab production logs +for errors (for example /var/log/gitlab/gitlab-rails/production.log). You may be able to find clues +there. + + + +Using self-signed certificates with container registry + + +If you’re using a self-signed certificate with your container registry, you +might encounter issues during the CI jobs like the following: + +Error response from daemon: Get registry.example.com/v1/users/: x509: certificate signed by unknown authority + + +The Docker daemon running the command expects a cert signed by a recognized CA, +thus the error above. + +While GitLab doesn’t support using self-signed certificates with Container +Registry out of the box, it is possible to make it work by +instructing the Docker daemon to trust the self-signed certificates, +mounting the Docker daemon and setting privileged = false in the GitLab Runner +config.toml file. Setting privileged = true takes precedence over the Docker daemon: + + [runners.docker] + image = ""ruby:2.6"" + privileged = false + volumes = [""/var/run/docker.sock:/var/run/docker.sock"", ""/cache""] + + +Additional information about this: issue 18239. + +Docker login attempt fails with: ‘token signed by untrusted key’ + + +Registry relies on GitLab to validate credentials +If the registry fails to authenticate valid login attempts, you get the following error message: + +# docker login gitlab.company.com:4567 +Username: user +Password: +Error response from daemon: login attempt to https://gitlab.company.com:4567/v2/ failed with status: 401 Unauthorized + + +And more specifically, this appears in the /var/log/gitlab/registry/current log file: + +level=info msg=""token signed by untrusted key with ID: ""TOKE:NL6Q:7PW6:EXAM:PLET:OKEN:BG27:RCIB:D2S3:EXAM:PLET:OKEN"""" +level=warning msg=""error authorizing context: invalid token"" go.version=go1.12.7 http.request.host=""gitlab.company.com:4567"" http.request.id=74613829-2655-4f96-8991-1c9fe33869b8 http.request.method=GET http.request.remoteaddr=10.72.11.20 http.request.uri=""/v2/"" http.request.useragent=""docker/19.03.2 go/go1.12.8 git-commit/6a30dfc kernel/3.10.0-693.2.2.el7.x86_64 os/linux arch/amd64 UpstreamClient(Docker-Client/19.03.2 \(linux\))"" + + +GitLab uses the contents of the certificate key pair’s two sides to encrypt the authentication token +for the Registry. This message means that those contents do not align. + +Check which files are in use: + + + + grep -A6 'auth:' /var/opt/gitlab/registry/config.yml + + +## Container registry certificate + auth: + token: + realm: https://gitlab.my.net/jwt/auth + service: container_registry + issuer: omnibus-gitlab-issuer + --> rootcertbundle: /var/opt/gitlab/registry/gitlab-registry.crt + autoredirect: false + + + + grep -A9 'Container Registry' /var/opt/gitlab/gitlab-rails/etc/gitlab.yml + + +## Container registry key + registry: + enabled: true + host: gitlab.company.com + port: 4567 + api_url: http://127.0.0.1:5000 # internal address to the registry, is used by GitLab to directly communicate with API + path: /var/opt/gitlab/gitlab-rails/shared/registry +--> key: /var/opt/gitlab/gitlab-rails/etc/gitlab-registry.key + issuer: omnibus-gitlab-issuer + notification_secret: + + + + +The output of these openssl commands should match, proving that the cert-key pair is a match: + +/opt/gitlab/embedded/bin/openssl x509 -noout -modulus -in /var/opt/gitlab/registry/gitlab-registry.crt | /opt/gitlab/embedded/bin/openssl sha256 +/opt/gitlab/embedded/bin/openssl rsa -noout -modulus -in /var/opt/gitlab/gitlab-rails/etc/gitlab-registry.key | /opt/gitlab/embedded/bin/openssl sha256 + + +If the two pieces of the certificate do not align, remove the files and run gitlab-ctl reconfigure +to regenerate the pair. The pair is recreated using the existing values in /etc/gitlab/gitlab-secrets.json if they exist. To generate a new pair, +delete the registry section in your /etc/gitlab/gitlab-secrets.json before running gitlab-ctl reconfigure. + +If you have overridden the automatically generated self-signed pair with +your own certificates and have made sure that their contents align, you can delete the ‘registry’ +section in your /etc/gitlab/gitlab-secrets.json and run gitlab-ctl reconfigure. + +AWS S3 with the GitLab registry error when pushing large images + + +When using AWS S3 with the GitLab registry, an error may occur when pushing +large images. Look in the Registry log for the following error: + +level=error msg=""response completed with error"" err.code=unknown err.detail=""unexpected EOF"" err.message=""unknown error"" + + +To resolve the error specify a chunksize value in the Registry configuration. +Start with a value between 25000000 (25 MB) and 50000000 (50 MB). + +Linux package (Omnibus)Self-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +registry['storage'] = { + 's3' => { + 'accesskey' => 'AKIAKIAKI', + 'secretkey' => 'secret123', + 'bucket' => 'gitlab-registry-bucket-AKIAKIAKI', + 'chunksize' => 25000000 + } +} + + + + Save the file and reconfigure GitLab for the changes to take effect. + + + + Edit config/gitlab.yml: + + +storage: + s3: + accesskey: 'AKIAKIAKI' + secretkey: 'secret123' + bucket: 'gitlab-registry-bucket-AKIAKIAKI' + chunksize: 25000000 + + + + Save the file and restart GitLab for the changes to take effect. + + + +Supporting older Docker clients + + +The Docker container registry shipped with GitLab disables the schema1 manifest +by default. If you are still using older Docker clients (1.9 or older), you may +experience an error pushing images. See +issue 4145 for more details. + +You can add a configuration option for backwards compatibility. + +Linux package (Omnibus)Self-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +registry['compatibility_schema1_enabled'] = true + + + + Save the file and reconfigure GitLab for the changes to take effect. + + + + Edit the YAML configuration file you created when you deployed the registry. Add the following snippet: + + +compatibility: + schema1: + enabled: true + + + + Restart the registry for the changes to take affect. + + + +Docker connection error + + +A Docker connection error can occur when there are special characters in either the group, +project or branch name. Special characters can include: + + + Leading underscore + Trailing hyphen/dash + Double hyphen/dash + + +To get around this, you can change the group path, +change the project path or change the +branch name. Another option is to create a push rule to prevent +this at the instance level. + +Image push errors + + +When getting errors or “retrying” loops in an attempt to push an image but docker login works fine, +there is likely an issue with the headers forwarded to the registry by NGINX. The default recommended +NGINX configurations should handle this, but it might occur in custom setups where the SSL is +offloaded to a third party reverse proxy. + +This problem was discussed in a Docker project issue +and a simple solution would be to enable relative URLs in the Registry. + +Linux package (Omnibus)Self-compiled (source) + + Edit /etc/gitlab/gitlab.rb: + + +registry['env'] = { + ""REGISTRY_HTTP_RELATIVEURLS"" => true +} + + + + Save the file and reconfigure GitLab for the changes to take effect. + + + + Edit the YAML configuration file you created when you deployed the registry. Add the following snippet: + + +http: + relativeurls: true + + + + Save the file and restart GitLab for the changes to take effect. + + + +Enable the Registry debug server + + +You can use the container registry debug server to diagnose problems. The debug endpoint can monitor metrics and health, as well as do profiling. + + + caution Sensitive information may be available from the debug endpoint. +Access to the debug endpoint must be locked down in a production environment. + + +The optional debug server can be enabled by setting the registry debug address +in your gitlab.rb configuration. + +registry['debug_addr'] = ""localhost:5001"" + + +After adding the setting, reconfigure GitLab to apply the change. + +Use curl to request debug output from the debug server: + +curl ""localhost:5001/debug/health"" +curl ""localhost:5001/debug/vars"" + + +Access old schema v1 Docker images + + +Support for the Docker registry API V1, +including schema V1 image manifests, +was: + + + Deprecated in GitLab 13.7 + Removed in GitLab 13.9 + + +It’s no longer possible to push or pull v1 images from the GitLab container registry. + +If you had v1 images in the GitLab container registry, but you did not upgrade them (following the +steps Docker recommends) +ahead of the GitLab 13.9 upgrade, these images are no longer accessible. If you try to pull them, +this error appears: + + + Error response from daemon: manifest invalid: Schema 1 manifest not supported + + +For self-managed GitLab instances, you can regain access to these images by temporarily downgrading +the GitLab container registry to a version lower than v3.0.0-gitlab. Follow these steps to regain +access to these images: + + + Downgrade the container registry to v2.13.1-gitlab. + Upgrade any v1 images. + Revert the container registry downgrade. + + +There’s no need to put the registry in read-only mode during the image upgrade process. Ensure that +you are not relying on any new feature introduced since v3.0.0-gitlab. Such features are +unavailable during the upgrade process. See the complete registry changelog +for more information. + +The following sections provide additional details about each installation method. + +Helm chart (Kubernetes)Linux package (Omnibus)Self-compiled (source)For Helm chart installations: + Override the image.tag +configuration parameter with v2.13.1-gitlab. + Restart. + Performing the images upgrade) steps. + Revert the image.tag parameter to the previous value. +No other registry configuration changes are required.For Linux package installations: + + Temporarily replace the registry binary that ships with GitLab 13.9+ for one prior to +v3.0.0-gitlab. To do so, pull a previous version of the Docker image for the GitLab Container +Registry, such as v2.13.1-gitlab. You can then grab the registry binary from within this +image, located at /bin/registry: + + +id=$(docker create registry.gitlab.com/gitlab-org/build/cng/gitlab-container-registry:v2.13.1-gitlab) +docker cp $id:/bin/registry registry-2.13.1-gitlab +docker rm $id + + + + Replace the binary embedded in the Linux package installation located at +/opt/gitlab/embedded/bin/registry, with registry-2.13.1-gitlab. Make sure to start by backing +up the original binary embedded in the Linux package, and restore it after performing the +image upgrade steps. You should stop +the registry service before replacing its binary and start it right after. No registry +configuration changes are required. + +Locate your registry binary and temporarily replace it with the one +obtained from v3.0.0-gitlab, as explained for Linux package installations. +Make sure to start by backing up the original registry binary, and restore it after performing the +images upgrade steps. + +Images upgrade + + +Follow the steps that Docker recommends to upgrade v1 images. +The most straightforward option is to pull those images and push them once again to the registry, +using a Docker client version above v1.12. Docker converts images automatically before pushing them +to the registry. Once done, all your v1 images should now be available as v2 images. + +Tags with an empty name + + +If using AWS DataSync +to copy the registry data to or between S3 buckets, an empty metadata object is created in the root +path of each container repository in the destination bucket. This causes the registry to interpret +such files as a tag that appears with no name in the GitLab UI and API. For more information, see +this issue. + +To fix this you can do one of two things: + + + + Use the AWS CLI rm +command to remove the empty objects from the root of each affected repository. Pay special +attention to the trailing / and make sure not to use the --recursive option: + + +aws s3 rm s3:///docker/registry/v2/repositories// + + + + Use the AWS CLI sync +command to copy the registry data to a new bucket and configure the registry to use it. This +leaves the empty objects behind. + + + +Advanced Troubleshooting + + +We use a concrete example to illustrate how to +diagnose a problem with the S3 setup. + +Investigate a cleanup policy + + +If you’re unsure why your cleanup policy did or didn’t delete a tag, execute the policy line by line +by running the below script from the Rails console. +This can help diagnose problems with the policy. + +repo = ContainerRepository.find() +policy = repo.project.container_expiration_policy + +tags = repo.tags +tags.map(&:name) + +tags.reject!(&:latest?) +tags.map(&:name) + +regex_delete = ::Gitlab::UntrustedRegexp.new(""\\A#{policy.name_regex}\\z"") +regex_retain = ::Gitlab::UntrustedRegexp.new(""\\A#{policy.name_regex_keep}\\z"") + +tags.select! { |tag| regex_delete.match?(tag.name) && !regex_retain.match?(tag.name) } + +tags.map(&:name) + +now = DateTime.current +tags.sort_by! { |tag| tag.created_at || now }.reverse! # Lengthy operation + +tags = tags.drop(policy.keep_n) +tags.map(&:name) + +older_than_timestamp = ChronicDuration.parse(policy.older_than).seconds.ago + +tags.select! { |tag| tag.created_at && tag.created_at < older_than_timestamp } + +tags.map(&:name) + + + + The script builds the list of tags to delete (tags). + +tags.map(&:name) prints a list of tags to remove. This may be a lengthy operation. + After each filter, check the list of tags to see if it contains the intended tags to destroy. + + +Unexpected 403 error during push + + +A user attempted to enable an S3-backed Registry. The docker login step went +fine. However, when pushing an image, the output showed: + +The push refers to a repository [s3-testing.myregistry.com:5050/root/docker-test/docker-image] +dc5e59c14160: Pushing [==================================================>] 14.85 kB +03c20c1a019a: Pushing [==================================================>] 2.048 kB +a08f14ef632e: Pushing [==================================================>] 2.048 kB +228950524c88: Pushing 2.048 kB +6a8ecde4cc03: Pushing [==> ] 9.901 MB/205.7 MB +5f70bf18a086: Pushing 1.024 kB +737f40e80b7f: Waiting +82b57dbc5385: Waiting +19429b698a22: Waiting +9436069b92a3: Waiting +error parsing HTTP 403 response body: unexpected end of JSON input: """" + + +This error is ambiguous, as it’s not clear whether the 403 is coming from the +GitLab Rails application, the Docker Registry, or something else. In this +case, since we know that since the login succeeded, we probably need to look +at the communication between the client and the Registry. + +The REST API between the Docker client and Registry is described +in the Docker documentation. Usually, one would just +use Wireshark or tcpdump to capture the traffic and see where things went +wrong. However, since all communications between Docker clients and servers +are done over HTTPS, it’s a bit difficult to decrypt the traffic quickly even +if you know the private key. What can we do instead? + +One way would be to disable HTTPS by setting up an +insecure Registry. This could introduce a +security hole and is only recommended for local testing. If you have a +production system and can’t or don’t want to do this, there is another way: +use mitmproxy, which stands for Man-in-the-Middle Proxy. + +mitmproxy + + +mitmproxy allows you to place a proxy between your +client and server to inspect all traffic. One wrinkle is that your system +needs to trust the mitmproxy SSL certificates for this to work. + +The following installation instructions assume you are running Ubuntu: + + + +Install mitmproxy. + Run mitmproxy --port 9000 to generate its certificates. +Enter CTRL-C to quit. + + Install the certificate from ~/.mitmproxy to your system: + + +sudo cp ~/.mitmproxy/mitmproxy-ca-cert.pem /usr/local/share/ca-certificates/mitmproxy-ca-cert.crt +sudo update-ca-certificates + + + + +If successful, the output should indicate that a certificate was added: + +Updating certificates in /etc/ssl/certs... 1 added, 0 removed; done. +Running hooks in /etc/ca-certificates/update.d....done. + + +To verify that the certificates are properly installed, run: + +mitmproxy --port 9000 + + +This command runs mitmproxy on port 9000. In another window, run: + +curl --proxy ""http://localhost:9000"" ""https://httpbin.org/status/200"" + + +If everything is set up correctly, information is displayed on the mitmproxy window and +no errors are generated by the curl commands. + +Running the Docker daemon with a proxy + + +For Docker to connect through a proxy, you must start the Docker daemon with the +proper environment variables. The easiest way is to shutdown Docker (for example sudo initctl stop docker) +and then run Docker by hand. As root, run: + +export HTTP_PROXY=""http://localhost:9000"" +export HTTPS_PROXY=""https://localhost:9000"" +docker daemon --debug + + +This command launches the Docker daemon and proxies all connections through mitmproxy. + +Running the Docker client + + +Now that we have mitmproxy and Docker running, we can attempt to sign in and +push a container image. You may need to run as root to do this. For example: + +docker login s3-testing.myregistry.com:5050 +docker push s3-testing.myregistry.com:5050/root/docker-test/docker-image + + +In the example above, we see the following trace on the mitmproxy window: + + + +The above image shows: + + + The initial PUT requests went through fine with a 201 status code. + The 201 redirected the client to the S3 bucket. + The HEAD request to the AWS bucket reported a 403 Unauthorized. + + +What does this mean? This strongly suggests that the S3 user does not have the right +permissions to perform a HEAD request. +The solution: check the IAM permissions again. +Once the right permissions were set, the error goes away. + +Missing gitlab-registry.key prevents container repository deletion + + +If you disable your GitLab instance’s container registry and try to remove a project that has +container repositories, the following error occurs: + +Errno::ENOENT: No such file or directory @ rb_sysopen - /var/opt/gitlab/gitlab-rails/etc/gitlab-registry.key + + +In this case, follow these steps: + + + + Temporarily enable the instance-wide setting for the container registry in your gitlab.rb: + + +gitlab_rails['registry_enabled'] = true + + + Save the file and reconfigure GitLab +for the changes to take effect. + Try the removal again. + + +If you still can’t remove the repository using the common methods, you can use the +GitLab Rails console +to remove the project by force: + +# Path to the project you'd like to remove +prj = Project.find_by_full_path() + +# The following will delete the project's container registry, so be sure to double-check the path beforehand! +if prj.has_container_registry_tags? + prj.container_repositories.each { |p| p.destroy } +end + + + +2. Caching in GitLab CI/CD + + + +Caching in GitLab CI/CD + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +A cache is one or more files a job downloads and saves. Subsequent jobs that use +the same cache don’t have to download the files again, so they execute more quickly. + +To learn how to define the cache in your .gitlab-ci.yml file, +see the cache reference. + +How cache is different from artifacts + + +Use cache for dependencies, like packages you download from the internet. +Cache is stored where GitLab Runner is installed and uploaded to S3 if +distributed cache is enabled. + +Use artifacts to pass intermediate build results between stages. +Artifacts are generated by a job, stored in GitLab, and can be downloaded. + +Both artifacts and caches define their paths relative to the project directory, and +can’t link to files outside it. + +Cache + + + + Define cache per job by using the cache keyword. Otherwise it is disabled. + Subsequent pipelines can use the cache. + Subsequent jobs in the same pipeline can use the cache, if the dependencies are identical. + Different projects cannot share the cache. + By default, protected and non-protected branches do not share the cache. However, you can change this behavior. + + +Artifacts + + + + Define artifacts per job. + Subsequent jobs in later stages of the same pipeline can use artifacts. + Different projects cannot share artifacts. + Artifacts expire after 30 days by default. You can define a custom expiration time. + The latest artifacts do not expire if keep latest artifacts is enabled. + Use dependencies to control which jobs fetch the artifacts. + + +Good caching practices + + +To ensure maximum availability of the cache, do one or more of the following: + + + +Tag your runners and use the tag on jobs +that share the cache. + +Use runners that are only available to a particular project. + +Use a key that fits your workflow. For example, +you can configure a different cache for each branch. + + +For runners to work with caches efficiently, you must do one of the following: + + + Use a single runner for all your jobs. + Use multiple runners that have +distributed caching, +where the cache is stored in S3 buckets. Instance runners on GitLab.com behave this way. These runners can be in autoscale mode, +but they don’t have to be. To manage cache objects, +apply lifecycle rules to delete the cache objects after a period of time. +Lifecycle rules are available on the object storage server. + Use multiple runners with the same architecture and have these runners +share a common network-mounted directory to store the cache. This directory should use NFS or something similar. +These runners must be in autoscale mode. + + +Use multiple caches + + + +History + + + + + +Introduced in GitLab 13.10. + +Feature flag removed, in GitLab 13.12. + + + + + + +You can have a maximum of four caches: + +test-job: + stage: build + cache: + - key: + files: + - Gemfile.lock + paths: + - vendor/ruby + - key: + files: + - yarn.lock + paths: + - .yarn-cache/ + script: + - bundle config set --local path 'vendor/ruby' + - bundle install + - yarn install --cache-folder .yarn-cache + - echo Run tests... + + +If multiple caches are combined with a fallback cache key, +the global fallback cache is fetched every time a cache is not found. + +Use a fallback cache key + + +Per-cache fallback keys + + + +History + + + + + +Introduced in GitLab 16.0 + + + + + + +Each cache entry supports up to five fallback keys with the fallback_keys keyword. +When a job does not find a cache key, the job attempts to retrieve a fallback cache instead. +Fallback keys are searched in order until a cache is found. If no cache is found, +the job runs without using a cache. For example: + +test-job: + stage: build + cache: + - key: cache-$CI_COMMIT_REF_SLUG + fallback_keys: + - cache-$CI_DEFAULT_BRANCH + - cache-default + paths: + - vendor/ruby + script: + - bundle config set --local path 'vendor/ruby' + - bundle install + - echo Run tests... + + +In this example: + + + The job looks for the cache-$CI_COMMIT_REF_SLUG cache. + If cache-$CI_COMMIT_REF_SLUG is not found, the job looks for cache-$CI_DEFAULT_BRANCH +as a fallback option. + If cache-$CI_DEFAULT_BRANCH is also not found, the job looks for cache-default +as a second fallback option. + If none are found, the job downloads all the Ruby dependencies without using a cache, +but creates a new cache for cache-$CI_COMMIT_REF_SLUG when the job completes. + + +Fallback keys follow the same processing logic as cache:key: + + + If you clear caches manually, per-cache fallback keys are appended +with an index like other cache keys. + If the Use separate caches for protected branches setting is enabled, +per-cache fallback keys are appended with -protected or -non_protected. + + +Global fallback key + + + +History + + + + + +Introduced in GitLab Runner 13.4. + + + + + + +You can use the $CI_COMMIT_REF_SLUG predefined variable +to specify your cache:key. For example, if your +$CI_COMMIT_REF_SLUG is test, you can set a job to download cache that’s tagged with test. + +If a cache with this tag is not found, you can use CACHE_FALLBACK_KEY to +specify a cache to use when none exists. + +In the following example, if the $CI_COMMIT_REF_SLUG is not found, the job uses the key defined +by the CACHE_FALLBACK_KEY variable: + +variables: + CACHE_FALLBACK_KEY: fallback-key + +job1: + script: + - echo + cache: + key: ""$CI_COMMIT_REF_SLUG"" + paths: + - binaries/ + + +The order of caches extraction is: + + + Retrieval attempt for cache:key + + Retrieval attempts for each entry in order in fallback_keys + + Retrieval attempt for the global fallback key in CACHE_FALLBACK_KEY + + + +The cache extraction process stops after the first successful cache is retrieved. + +Disable cache for specific jobs + + +If you define the cache globally, each job uses the +same definition. You can override this behavior for each job. + +To disable it completely for a job, use an empty list: + +job: + cache: [] + + +Inherit global configuration, but override specific settings per job + + +You can override cache settings without overwriting the global cache by using +anchors. For example, if you want to override the +policy for one job: + +default: + cache: &global_cache + key: $CI_COMMIT_REF_SLUG + paths: + - node_modules/ + - public/ + - vendor/ + policy: pull-push + +job: + cache: + # inherit all global cache settings + <<: *global_cache + # override the policy + policy: pull + + +For more information, see cache: policy. + +Common use cases for caches + + +Usually you use caches to avoid downloading content, like dependencies +or libraries, each time you run a job. Node.js packages, +PHP packages, Ruby gems, Python libraries, and others can be cached. + +For examples, see the GitLab CI/CD templates. + +Share caches between jobs in the same branch + + +To have jobs in each branch use the same cache, define a cache with the key: $CI_COMMIT_REF_SLUG: + +cache: + key: $CI_COMMIT_REF_SLUG + + +This configuration prevents you from accidentally overwriting the cache. However, the +first pipeline for a merge request is slow. The next time a commit is pushed to the branch, the +cache is re-used and jobs run faster. + +To enable per-job and per-branch caching: + +cache: + key: ""$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"" + + +To enable per-stage and per-branch caching: + +cache: + key: ""$CI_JOB_STAGE-$CI_COMMIT_REF_SLUG"" + + +Share caches across jobs in different branches + + +To share a cache across all branches and all jobs, use the same key for everything: + +cache: + key: one-key-to-rule-them-all + + +To share a cache between branches, but have a unique cache for each job: + +cache: + key: $CI_JOB_NAME + + +Use a variable to control a job’s cache policy + + + +History + + + + + +Introduced in GitLab 16.1. + + + + + + +To reduce duplication of jobs where the only difference is the pull policy, you can use a CI/CD variable. + +For example: + +conditional-policy: + rules: + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + variables: + POLICY: pull-push + - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH + variables: + POLICY: pull + stage: build + cache: + key: gems + policy: $POLICY + paths: + - vendor/bundle + script: + - echo ""This job pulls and pushes the cache depending on the branch"" + - echo ""Downloading dependencies..."" + + +In this example, the job’s cache policy is: + + + +pull-push for changes to the default branch. + +pull for changes to other branches. + + +Cache Node.js dependencies + + +If your project uses npm to install Node.js +dependencies, the following example defines a default cache so that all jobs inherit it. +By default, npm stores cache data in the home folder (~/.npm). However, you +can’t cache things outside of the project directory. +Instead, tell npm to use ./.npm, and cache it per-branch: + +default: + image: node:latest + cache: # Cache modules in between jobs + key: $CI_COMMIT_REF_SLUG + paths: + - .npm/ + before_script: + - npm ci --cache .npm --prefer-offline + +test_async: + script: + - node ./specs/start.js ./specs/async.spec.js + + +Compute the cache key from the lock file + + +You can use cache:key:files to compute the cache +key from a lock file like package-lock.json or yarn.lock, and reuse it in many jobs. + +default: + cache: # Cache modules using lock file + key: + files: + - package-lock.json + paths: + - .npm/ + + +If you’re using Yarn, you can use yarn-offline-mirror +to cache the zipped node_modules tarballs. The cache generates more quickly, because +fewer files have to be compressed: + +job: + script: + - echo 'yarn-offline-mirror "".yarn-cache/""' >> .yarnrc + - echo 'yarn-offline-mirror-pruning true' >> .yarnrc + - yarn install --frozen-lockfile --no-progress + cache: + key: + files: + - yarn.lock + paths: + - .yarn-cache/ + + +Cache PHP dependencies + + +If your project uses Composer to install +PHP dependencies, the following example defines a default cache so that +all jobs inherit it. PHP libraries modules are installed in vendor/ and +are cached per-branch: + +default: + image: php:7.2 + cache: # Cache libraries in between jobs + key: $CI_COMMIT_REF_SLUG + paths: + - vendor/ + before_script: + # Install and run Composer + - curl --show-error --silent ""https://getcomposer.org/installer"" | php + - php composer.phar install + +test: + script: + - vendor/bin/phpunit --configuration phpunit.xml --coverage-text --colors=never + + +Cache Python dependencies + + +If your project uses pip to install +Python dependencies, the following example defines a default cache so that +all jobs inherit it. pip’s cache is defined under .cache/pip/ and is cached per-branch: + +default: + image: python:latest + cache: # Pip's cache doesn't store the python packages + paths: # https://pip.pypa.io/en/stable/topics/caching/ + - .cache/pip + before_script: + - python -V # Print out python version for debugging + - pip install virtualenv + - virtualenv venv + - source venv/bin/activate + +variables: # Change pip's cache directory to be inside the project directory since we can only cache local items. + PIP_CACHE_DIR: ""$CI_PROJECT_DIR/.cache/pip"" + +test: + script: + - python setup.py test + - pip install ruff + - ruff --format=gitlab . + + +Cache Ruby dependencies + + +If your project uses Bundler to install +gem dependencies, the following example defines a default cache so that all +jobs inherit it. Gems are installed in vendor/ruby/ and are cached per-branch: + +default: + image: ruby:2.6 + cache: # Cache gems in between builds + key: $CI_COMMIT_REF_SLUG + paths: + - vendor/ruby + before_script: + - ruby -v # Print out ruby version for debugging + - bundle config set --local path 'vendor/ruby' # The location to install the specified gems to + - bundle install -j $(nproc) # Install dependencies into ./vendor/ruby + +rspec: + script: + - rspec spec + + +If you have jobs that need different gems, use the prefix +keyword in the global cache definition. This configuration generates a different +cache for each job. + +For example, a testing job might not need the same gems as a job that deploys to +production: + +default: + cache: + key: + files: + - Gemfile.lock + prefix: $CI_JOB_NAME + paths: + - vendor/ruby + +test_job: + stage: test + before_script: + - bundle config set --local path 'vendor/ruby' + - bundle install --without production + script: + - bundle exec rspec + +deploy_job: + stage: production + before_script: + - bundle config set --local path 'vendor/ruby' # The location to install the specified gems to + - bundle install --without test + script: + - bundle exec deploy + + +Cache Go dependencies + + +If your project uses Go Modules to install +Go dependencies, the following example defines cache in a go-cache template, that +any job can extend. Go modules are installed in ${GOPATH}/pkg/mod/ and +are cached for all of the go projects: + +.go-cache: + variables: + GOPATH: $CI_PROJECT_DIR/.go + before_script: + - mkdir -p .go + cache: + paths: + - .go/pkg/mod/ + +test: + image: golang:1.13 + extends: .go-cache + script: + - go test ./... -v -short + + +Availability of the cache + + +Caching is an optimization, but it isn’t guaranteed to always work. You might need +to regenerate cached files in each job that needs them. + +After you define a cache in .gitlab-ci.yml, +the availability of the cache depends on: + + + The runner’s executor type. + Whether different runners are used to pass the cache between jobs. + + +Where the caches are stored + + +All caches defined for a job are archived in a single cache.zip file. +The runner configuration defines where the file is stored. By default, the cache +is stored on the machine where GitLab Runner is installed. The location also depends on the type of executor. + + + + + Runner executor + Default path of the cache + + + + + Shell + Locally, under the gitlab-runner user’s home directory: /home/gitlab-runner/cache////cache.zip. + + + Docker + Locally, under Docker volumes: /var/lib/docker/volumes//_data////cache.zip. + + + +Docker Machine (autoscale runners) + The same as the Docker executor. + + + + +If you use cache and artifacts to store the same path in your jobs, the cache might +be overwritten because caches are restored before artifacts. + +Cache key names + + + +History + + + + + +Introduced in GitLab 15.0. + + + + + + +A suffix is added to the cache key, with the exception of the global fallback cache key. + +As an example, assuming that cache.key is set to $CI_COMMIT_REF_SLUG, and that we have two branches main +and feature, then the following table represents the resulting cache keys: + + + + + Branch name + Cache key + + + + + main + main-protected + + + feature + feature-non_protected + + + + +Use the same cache for all branches + + + +History + + + + + +Introduced in GitLab 15.0. + + + + + + +If you do not want to use cache key names, +you can have all branches (protected and unprotected) use the same cache. + +The cache separation with cache key names is a security feature +and should only be disabled in an environment where all users with Developer role are highly trusted. + +To use the same cache for all branches: + + + On the left sidebar, select Search or go to and find your project. + Select Settings > CI/CD. + Expand General pipelines. + Clear the Use separate caches for protected branches checkbox. + Select Save changes. + + +How archiving and extracting works + + +This example shows two jobs in two consecutive stages: + +stages: + - build + - test + +default: + cache: + key: build-cache + paths: + - vendor/ + before_script: + - echo ""Hello"" + +job A: + stage: build + script: + - mkdir vendor/ + - echo ""build"" > vendor/hello.txt + after_script: + - echo ""World"" + +job B: + stage: test + script: + - cat vendor/hello.txt + + +If one machine has one runner installed, then all jobs for your project +run on the same host: + + + Pipeline starts. + +job A runs. + The cache is extracted (if found). + +before_script is executed. + +script is executed. + +after_script is executed. + +cache runs and the vendor/ directory is zipped into cache.zip. +This file is then saved in the directory based on the +runner’s setting and the cache: key. + +job B runs. + The cache is extracted (if found). + +before_script is executed. + +script is executed. + Pipeline finishes. + + +By using a single runner on a single machine, you don’t have the issue where +job B might execute on a runner different from job A. This setup guarantees the +cache can be reused between stages. It only works if the execution goes from the build stage +to the test stage in the same runner/machine. Otherwise, the cache might not be available. + +During the caching process, there’s also a couple of things to consider: + + + If some other job, with another cache configuration had saved its +cache in the same zip file, it is overwritten. If the S3 based shared cache is +used, the file is additionally uploaded to S3 to an object based on the cache +key. So, two jobs with different paths, but the same cache key, overwrites +their cache. + When extracting the cache from cache.zip, everything in the zip file is +extracted in the job’s working directory (usually the repository which is +pulled down), and the runner doesn’t mind if the archive of job A overwrites +things in the archive of job B. + + +It works this way because the cache created for one runner +often isn’t valid when used by a different one. A different runner may run on a +different architecture (for example, when the cache includes binary files). Also, +because the different steps might be executed by runners running on different +machines, it is a safe default. + +Clearing the cache + + +Runners use cache to speed up the execution +of your jobs by reusing existing data. This can sometimes lead to +inconsistent behavior. + +There are two ways to start with a fresh copy of the cache. + +Clear the cache by changing cache:key + + +Change the value for cache: key in your .gitlab-ci.yml file. +The next time the pipeline runs, the cache is stored in a different location. + +Clear the cache manually + + +You can clear the cache in the GitLab UI: + + + On the left sidebar, select Search or go to and find your project. + Select Build > Pipelines. + In the upper-right corner, select Clear runner caches. + + +On the next commit, your CI/CD jobs use a new cache. + + + note Each time you clear the cache manually, the internal cache name is updated. The name uses the format cache-, and the index increments by one. The old cache is not deleted. You can manually delete these files from the runner storage. + + +Troubleshooting + + +Cache mismatch + + +If you have a cache mismatch, follow these steps to troubleshoot. + + + + + Reason for a cache mismatch + How to fix it + + + + + You use multiple standalone runners (not in autoscale mode) attached to one project without a shared cache. + Use only one runner for your project or use multiple runners with distributed cache enabled. + + + You use runners in autoscale mode without a distributed cache enabled. + Configure the autoscale runner to use a distributed cache. + + + The machine the runner is installed on is low on disk space or, if you’ve set up distributed cache, the S3 bucket where the cache is stored doesn’t have enough space. + Make sure you clear some space to allow new caches to be stored. There’s no automatic way to do this. + + + You use the same key for jobs where they cache different paths. + Use different cache keys so that the cache archive is stored to a different location and doesn’t overwrite wrong caches. + + + You have not enabled the distributed runner caching on your runners. + Set Shared = false and re-provision your runners. + + + + +Cache mismatch example 1 + + +If you have only one runner assigned to your project, the cache +is stored on the runner’s machine by default. + +If two jobs have the same cache key but a different path, the caches can be overwritten. +For example: + +stages: + - build + - test + +job A: + stage: build + script: make build + cache: + key: same-key + paths: + - public/ + +job B: + stage: test + script: make test + cache: + key: same-key + paths: + - vendor/ + + + + +job A runs. + +public/ is cached as cache.zip. + +job B runs. + The previous cache, if any, is unzipped. + +vendor/ is cached as cache.zip and overwrites the previous one. + The next time job A runs it uses the cache of job B which is different +and thus isn’t effective. + + +To fix this issue, use different keys for each job. + +Cache mismatch example 2 + + +In this example, you have more than one runner assigned to your +project, and distributed cache is not enabled. + +The second time the pipeline runs, you want job A and job B to re-use their cache (which in this case +is different): + +stages: + - build + - test + +job A: + stage: build + script: build + cache: + key: keyA + paths: + - vendor/ + +job B: + stage: test + script: test + cache: + key: keyB + paths: + - vendor/ + + +Even if the key is different, the cached files might get “cleaned” before each +stage if the jobs run on different runners in subsequent pipelines. + +Concurrent runners missing local cache + + +If you have configured multiple concurrent runners with the Docker executor, locally cached files might +not be present for concurrently-running jobs as you expect. The names of cache volumes are constructed +uniquely for each runner instance, so files cached by one runner instance are not found in the cache by another runner +instance. + +To share the cache between concurrent runners, you can either: + + + Use the [runners.docker] section of the runners’ config.toml to configure a single mount point on the host that +is mapped to /cache in each container, preventing the runner from creating unique volume names. + Use a distributed cache. + + + +" +what are artifacts in gitlab ?,,"1. Job artifacts + + + +Job artifacts + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +Jobs can output an archive of files and directories. This output is known as a job artifact. + +You can download job artifacts by using the GitLab UI or the API. + + +For an overview of job artifacts, watch the video GitLab CI pipelines, artifacts, and environments. +Or, for an introduction, watch GitLab CI pipeline tutorial for beginners. + +For administrator information about job artifact storage, see administering job artifacts. + +Create job artifacts + + +To create job artifacts, use the artifacts keyword in your .gitlab-ci.yml file: + +pdf: + script: xelatex mycv.tex + artifacts: + paths: + - mycv.pdf + + +In this example, a job named pdf calls the xelatex command to build a PDF file from the +LaTeX source file, mycv.tex. + +The paths keyword determines which files to add to the job artifacts. +All paths to files and directories are relative to the repository where the job was created. + +With wildcards + + +You can use wildcards for paths and directories. For example, to create an artifact +with all the files inside the directories that end with xyz: + +job: + script: echo ""build xyz project"" + artifacts: + paths: + - path/*xyz/* + + +With an expiry + + +The expire_in keyword determines how long +GitLab keeps the job artifacts. For example: + +pdf: + script: xelatex mycv.tex + artifacts: + paths: + - mycv.pdf + expire_in: 1 week + + +If expire_in is not defined, the instance-wide setting +is used. + +To prevent artifacts from expiring, you can select Keep from the job details page. +The option is not available when an artifact has no expiry set. + +By default, the latest artifacts are always kept. + +With a dynamically defined name + + +You can use CI/CD variables to dynamically define the +artifacts file’s name. + +For example, to create an archive with a name of the current job: + +job: + artifacts: + name: ""$CI_JOB_NAME"" + paths: + - binaries/ + + +To create an archive with a name of the current branch or tag including only +the binaries directory: + +job: + artifacts: + name: ""$CI_COMMIT_REF_NAME"" + paths: + - binaries/ + + +If your branch-name contains forward slashes +(for example feature/my-feature) use $CI_COMMIT_REF_SLUG +instead of $CI_COMMIT_REF_NAME for proper naming of the artifact. + +With a Windows runner or shell executor + + +If you use Windows Batch to run your shell scripts you must replace $ with %: + +job: + artifacts: + name: ""%CI_JOB_STAGE%-%CI_COMMIT_REF_NAME%"" + paths: + - binaries/ + + +If you use Windows PowerShell to run your shell scripts you must replace $ with $env:: + +job: + artifacts: + name: ""$env:CI_JOB_STAGE-$env:CI_COMMIT_REF_NAME"" + paths: + - binaries/ + + +Without excluded files + + +Use artifacts:exclude to prevent files from +being added to an artifacts archive. + +For example, to store all files in binaries/, but not *.o files located in +subdirectories of binaries/. + +artifacts: + paths: + - binaries/ + exclude: + - binaries/**/*.o + + +Unlike artifacts:paths, exclude paths are not recursive. +To exclude all of the contents of a directory, match them explicitly rather +than matching the directory itself. + +For example, to store all files in binaries/ but nothing located in the temp/ subdirectory: + +artifacts: + paths: + - binaries/ + exclude: + - binaries/temp/**/* + + +With untracked files + + +Use artifacts:untracked to add all Git untracked +files as artifacts (along with the paths defined in artifacts:paths). Untracked +files are those that haven’t been added to the repository but exist in the repository checkout. + +For example, to save all Git untracked files and files in binaries: + +artifacts: + untracked: true + paths: + - binaries/ + + +For example, to save all untracked files but exclude *.txt files: + +artifacts: + untracked: true + exclude: + - ""*.txt"" + + +Prevent a job from fetching artifacts + + +Jobs downloads all artifacts from the completed jobs in previous stages by default. +To prevent a job from downloading any artifacts, set dependencies +to an empty array ([]): + +job: + stage: test + script: make build + dependencies: [] + + +View all job artifacts in a project + + + +History + + + + + +Introduced in GitLab 12.4 with a flag named artifacts_management_page. Disabled by default. + +Improved look in GitLab 15.6. + +Improved performance in GitLab 15.9. + +Generally available in GitLab 16.0. Feature flag artifacts_management_page removed. + + + + + + +You can view all artifacts stored in a project from the Build > Artifacts page. +This list displays all jobs and their associated artifacts. Expand an entry to access +all artifacts associated with a job, including: + + + Artifacts created with the artifacts: keyword. + +Report artifacts. + Job logs and metadata, which are stored internally as separate artifacts. + + +You can download or delete individual artifacts from this list. + +Download job artifacts + + +You can download job artifacts from: + + + Any Pipelines list. On the right of the pipeline, select Download artifacts ( ). + Any Jobs list. On the right of the job, select Download artifacts ( ). + A job’s detail page. On the right of the page, select Download. + A merge request Overview page. On the right of the latest pipeline, select Artifacts ( ). + The Artifacts page. On the right of the job, select Download ( ). + The artifacts browser. On the top of the page, +select Download artifacts archive ( ). + + +Report artifacts can only be downloaded from the Pipelines list +or Artifacts page. + +You can download job artifacts from the latest successful pipeline by using the job artifacts API. +You cannot download artifact reports with the job artifacts API, +unless the report is added as a regular artifact with artifacts:paths. + +From a URL + + +You can download the artifacts archive for a specific job with a publicly accessible +URL for the job artifacts API. + +For example: + + + + To download the latest artifacts of a job named build in the main branch of a project on GitLab.com: + + +https://gitlab.com/api/v4/projects//jobs/artifacts/main/download?job=build + + + + To download the file review/index.html from the latest job named build in the main branch of a project on GitLab.com: + + +https://gitlab.com/api/v4/projects//jobs/artifacts/main/raw/review/index.html?job=build + + + Files returned by this endpoint always have the plain/text content type. + + + +In both examples, replace with a valid project ID. You can find the project ID on the, +project overview page. + +Artifacts for parent and child pipelines +are searched in hierarchical order from parent to child. For example, if both parent and +child pipelines have a job with the same name, the job artifacts from the parent pipeline are returned. + +With a CI/CD job token + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +You can use a CI/CD job token to authenticate with the jobs artifacts API endpoint +and fetch artifacts from a different pipeline. You must specify which job to retrieve artifacts from, +for example: + +build_submodule: + stage: test + script: + - apt update && apt install -y unzip + - curl --location --output artifacts.zip ""https://gitlab.example.com/api/v4/projects/1/jobs/artifacts/main/download?job=test&job_token=$CI_JOB_TOKEN"" + - unzip artifacts.zip + + +Browse the contents of the artifacts archive + + +You can browse the contents of the artifacts from the UI without downloading the artifact locally, +from: + + + Any Jobs list. On the right of the job, select Browse ( ). + A job’s detail page. On the right of the page, select Browse. + The Artifacts page. On the right of the job, select Browse ( ). + + +If GitLab Pages is enabled in the project, you can preview +some artifacts file extensions directly in your browser. If the project is internal or private, you must enable GitLab Pages access control to enable the preview. + +The following extensions are supported: + + + + + File extension + GitLab.com + Linux package with built-in NGINX + + + + + .html + + Yes + + Yes + + + .json + + Yes + + Yes + + + .xml + + Yes + + Yes + + + .txt + + No + + Yes + + + .log + + No + + Yes + + + + +From a URL + + +You can browse the job artifacts of the latest successful pipeline for a specific job +with a publicly accessible URL. + +For example, to browse the latest artifacts of a job named build in the main branch of a project on GitLab.com: + +https://gitlab.com//-/jobs/artifacts/main/browse?job=build + + +Replace with a valid project path, you can find it in the URL for your project. + +Delete job log and artifacts + + + + caution Deleting the job log and artifacts is a destructive action that cannot be reverted. Use with caution. +Deleting certain files, including report artifacts, job logs, and metadata files, affects +GitLab features that use these files as data sources. + + +You can delete a job’s artifacts and log. + +Prerequisites: + + + You must be the owner of the job or a user with at least the Maintainer role for the project. + + +To delete a job: + + + Go to a job’s detail page. + In the upper-right corner of the job’s log, select Erase job log and artifacts ( ). + + +You can also delete individual artifacts from the Artifacts page. + +Bulk delete artifacts + + + +History + + + + + +Introduced in GitLab 15.10 with a flag named ci_job_artifact_bulk_destroy. Disabled by default. + +Feature flag removed in GitLab 16.1. + + + + + + +You can delete multiple artifacts at the same time: + + + On the left sidebar, select Search or go to and find your project. + Select Build > Artifacts. + Select the checkboxes next to the artifacts you want to delete. You can select up to 50 artifacts. + Select Delete selected. + + +Link to job artifacts in the merge request UI + + +Use the artifacts:expose_as keyword to display +a link to job artifacts in the merge request UI. + +For example, for an artifact with a single file: + +test: + script: [""echo 'test' > file.txt""] + artifacts: + expose_as: 'artifact 1' + paths: ['file.txt'] + + +With this configuration, GitLab adds artifact 1 as a link to file.txt to the +View exposed artifact section of the relevant merge request. + +Keep artifacts from most recent successful jobs + + + +History + + + + + +Introduced in GitLab 13.0. + +Feature flag removed in GitLab 13.4. + +Made optional with a CI/CD setting in GitLab 13.8. + Artifacts for blocked or failed pipelines no longer kept indefinitely in GitLab 16.7. + + + + + + +By default artifacts are always kept for successful pipelines for the most recent commit on each ref. +Any expire_in configuration does not apply to the most recent artifacts. + +A pipeline’s artifacts are only deleted according to the expire_in configuration +if a new pipeline runs for the same ref and: + + + Succeeds. + Fails. + Stops running due to being blocked by a manual job. + + +Additionally, artifacts are kept for the ref’s last successful pipeline even if it +is not the latest pipeline. As a result, if a new pipeline run fails, the last successful pipeline’s +artifacts are still kept. + +Keeping the latest artifacts can use a large amount of storage space in projects +with a lot of jobs or large artifacts. If the latest artifacts are not needed in +a project, you can disable this behavior to save space: + + + On the left sidebar, select Search or go to and find your project. + Select Settings > CI/CD. + Expand Artifacts. + Clear the Keep artifacts from most recent successful jobs checkbox. + + +After disabling this setting, all new artifacts expire according to the expire_in configuration. +Artifacts in old pipelines continue to be kept until a new pipeline runs for the same ref. +Then the artifacts in the earlier pipeline for that ref are allowed to expire too. + +You can disable this behavior for all projects on a self-managed instance in the +instance’s CI/CD settings. + + +2. CI/CD YAML syntax reference + + + +CI/CD YAML syntax reference + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +This document lists the configuration options for the GitLab .gitlab-ci.yml file. +This file is where you define the CI/CD jobs that make up your pipeline. + + + If you are already familiar with basic CI/CD concepts, try creating +your own .gitlab-ci.yml file by following a tutorial that demonstrates a simple +or complex pipeline. + For a collection of examples, see GitLab CI/CD examples. + To view a large .gitlab-ci.yml file used in an enterprise, see the +.gitlab-ci.yml file for gitlab. + + +When you are editing your .gitlab-ci.yml file, you can validate it with the +CI Lint tool. + +If you are editing content on this page, follow the instructions for documenting keywords. + +Keywords + + +A GitLab CI/CD pipeline configuration includes: + + + + Global keywords that configure pipeline behavior: + + + + + Keyword + Description + + + + + default + Custom default values for job keywords. + + + include + Import configuration from other YAML files. + + + stages + The names and order of the pipeline stages. + + + variables + Define CI/CD variables for all job in the pipeline. + + + workflow + Control what types of pipeline run. + + + + + + Header keywords + + + + + Keyword + Description + + + + + spec + Define specifications for external configuration files. + + + + + + Jobs configured with job keywords: + + + + + Keyword + Description + + + + + after_script + Override a set of commands that are executed after job. + + + allow_failure + Allow job to fail. A failed job does not cause the pipeline to fail. + + + artifacts + List of files and directories to attach to a job on success. + + + before_script + Override a set of commands that are executed before job. + + + cache + List of files that should be cached between subsequent runs. + + + coverage + Code coverage settings for a given job. + + + dast_configuration + Use configuration from DAST profiles on a job level. + + + dependencies + Restrict which artifacts are passed to a specific job by providing a list of jobs to fetch artifacts from. + + + environment + Name of an environment to which the job deploys. + + + extends + Configuration entries that this job inherits from. + + + identity + Authenticate with third party services using identity federation. + + + image + Use Docker images. + + + inherit + Select which global defaults all jobs inherit. + + + interruptible + Defines if a job can be canceled when made redundant by a newer run. + + + needs + Execute jobs earlier than the stage ordering. + + + pages + Upload the result of a job to use with GitLab Pages. + + + parallel + How many instances of a job should be run in parallel. + + + release + Instructs the runner to generate a release object. + + + resource_group + Limit job concurrency. + + + retry + When and how many times a job can be auto-retried in case of a failure. + + + rules + List of conditions to evaluate and determine selected attributes of a job, and whether or not it’s created. + + + script + Shell script that is executed by a runner. + + + secrets + The CI/CD secrets the job needs. + + + services + Use Docker services images. + + + stage + Defines a job stage. + + + tags + List of tags that are used to select a runner. + + + timeout + Define a custom job-level timeout that takes precedence over the project-wide setting. + + + trigger + Defines a downstream pipeline trigger. + + + variables + Define job variables on a job level. + + + when + When to run job. + + + + + + +Global keywords + + +Some keywords are not defined in a job. These keywords control pipeline behavior +or import additional pipeline configuration. + + +default + + + +History + + + + + Support for id_tokens introduced in GitLab 16.4. + + + + + + +You can set global defaults for some keywords. Each default keyword is copied to every job +that doesn’t already have it defined. If the job already has a keyword defined, that default +is not used. + +Keyword type: Global keyword. + +Possible inputs: These keywords can have custom defaults: + + + after_script + artifacts + before_script + cache + hooks + id_tokens + image + interruptible + retry + services + tags + +timeout, though due to issue 213634 +this keyword has no effect. + + +Example of default: + +default: + image: ruby:3.0 + retry: 2 + +rspec: + script: bundle exec rspec + +rspec 2.7: + image: ruby:2.7 + script: bundle exec rspec + + +In this example: + + + +image: ruby:3.0 and retry: 2 are the default keywords for all jobs in the pipeline. + The rspec job does not have image or retry defined, so it uses the defaults of +image: ruby:3.0 and retry: 2. + The rspec 2.7 job does not have retry defined, but it does have image explicitly defined. +It uses the default retry: 2, but ignores the default image and uses the image: ruby:2.7 +defined in the job. + + +Additional details: + + + Control inheritance of default keywords in jobs with inherit:default. + + + +include + + + +History + + + + + +Moved to GitLab Free in 11.4. + + + + + + +Use include to include external YAML files in your CI/CD configuration. +You can split one long .gitlab-ci.yml file into multiple files to increase readability, +or reduce duplication of the same configuration in multiple places. + +You can also store template files in a central repository and include them in projects. + +The include files are: + + + Merged with those in the .gitlab-ci.yml file. + Always evaluated first and then merged with the content of the .gitlab-ci.yml file, +regardless of the position of the include keyword. + + +The time limit to resolve all files is 30 seconds. + +Keyword type: Global keyword. + +Possible inputs: The include subkeys: + + + include:component + include:local + include:project + include:remote + include:template + + +And optionally: + + + include:inputs + include:rules + + +Additional details: + + + Only certain CI/CD variables can be used +with include keywords. + Use merging to customize and override included CI/CD configurations with local + You can override included configuration by having the same job name or global keyword +in the .gitlab-ci.yml file. The two configurations are merged together, and the +configuration in the .gitlab-ci.yml file takes precedence over the included configuration. + If you rerun a: + + Job, the include files are not fetched again. All jobs in a pipeline use the configuration +fetched when the pipeline was created. Any changes to the source include files +do not affect job reruns. + Pipeline, the include files are fetched again. If they changed after the last +pipeline run, the new pipeline uses the changed configuration. + + + You can have up to 150 includes per pipeline by default, including nested. Additionally: + + In GitLab 16.0 and later self-managed users can +change the maximum includes value. + In GitLab 15.10 and later you can have up to 150 includes. +In nested includes, the same file can be included multiple times, but duplicated includes +count towards the limit. + From GitLab 14.9 to GitLab 15.9, you can have up to 100 includes. +The same file can be included multiple times in nested includes, but duplicates are ignored. + In GitLab 14.9 and earlier you can have up to 100 includes, but the same file can not +be included multiple times. + + + + +Related topics: + + + +Use variables with include. + +Use rules with include. + + + +include:component + + +Use include:component to add a CI/CD component to the +pipeline configuration. + +Keyword type: Global keyword. + +Possible inputs: The full address of the CI/CD component, formatted as +//@. + +Example of include:component: + +include: + - component: gitlab.example.com/my-org/security-components/secret-detection@1.0 + + +Related topics: + + + +Use a CI/CD component. + + + +include:local + + +Use include:local to include a file that is in the same repository and branch as the configuration file containing the include keyword. +Use include:local instead of symbolic links. + +Keyword type: Global keyword. + +Possible inputs: + +A full path relative to the root directory (/): + + + The YAML file must have the extension .yml or .yaml. + You can use * and ** wildcards in the file path. + You can use certain CI/CD variables. + + +Example of include:local: + +include: + - local: '/templates/.gitlab-ci-template.yml' + + +You can also use shorter syntax to define the path: + +include: '.gitlab-ci-production.yml' + + +Additional details: + + + The .gitlab-ci.yml file and the local file must be on the same branch. + You can’t include local files through Git submodules paths. + All nested includes are executed in the scope of the project containing the configuration file with the include keyword, not the project running the pipeline. +You can use local, project, remote, or template includes. + + + +include:project + + + +History + + + + + Including multiple files from the same project introduced in GitLab 13.6. Feature flag removed in GitLab 13.8. + + + + + + +To include files from another private project on the same GitLab instance, +use include:project and include:file. + +Keyword type: Global keyword. + +Possible inputs: + + + +include:project: The full GitLab project path. + +include:file A full file path, or array of file paths, relative to the root directory (/). +The YAML files must have the .yml or .yaml extension. + +include:ref: Optional. The ref to retrieve the file from. Defaults to the HEAD of the project +when not specified. + You can use certain CI/CD variables. + + +Example of include:project: + +include: + - project: 'my-group/my-project' + file: '/templates/.gitlab-ci-template.yml' + - project: 'my-group/my-subgroup/my-project-2' + file: + - '/templates/.builds.yml' + - '/templates/.tests.yml' + + +You can also specify a ref: + +include: + - project: 'my-group/my-project' + ref: main # Git branch + file: '/templates/.gitlab-ci-template.yml' + - project: 'my-group/my-project' + ref: v1.0.0 # Git Tag + file: '/templates/.gitlab-ci-template.yml' + - project: 'my-group/my-project' + ref: 787123b47f14b552955ca2786bc9542ae66fee5b # Git SHA + file: '/templates/.gitlab-ci-template.yml' + + +Additional details: + + + All nested includes are executed in the scope of the project containing the configuration file with the nested include keyword. +You can use local (relative to the project containing the configuration file with the include keyword), project, remote, or template includes. + When the pipeline starts, the .gitlab-ci.yml file configuration included by all methods is evaluated. +The configuration is a snapshot in time and persists in the database. GitLab does not reflect any changes to +the referenced .gitlab-ci.yml file configuration until the next pipeline starts. + When you include a YAML file from another private project, the user running the pipeline +must be a member of both projects and have the appropriate permissions to run pipelines. +A not found or access denied error may be displayed if the user does not have access to any of the included files. + Be careful when including another project’s CI/CD configuration file. No pipelines or notifications trigger when CI/CD configuration files change. +From a security perspective, this is similar to pulling a third-party dependency. For the ref, consider: + + Using a specific SHA hash, which should be the most stable option. Use the +full 40-character SHA hash to ensure the desired commit is referenced, because +using a short SHA hash for the ref might be ambiguous. + Applying both protected branch and protected tag rules to +the ref in the other project. Protected tags and branches are more likely to pass through change management before changing. + + + + + +include:remote + + +Use include:remote with a full URL to include a file from a different location. + +Keyword type: Global keyword. + +Possible inputs: + +A public URL accessible by an HTTP/HTTPS GET request: + + + Authentication with the remote URL is not supported. + The YAML file must have the extension .yml or .yaml. + You can use certain CI/CD variables. + + +Example of include:remote: + +include: + - remote: 'https://gitlab.com/example-project/-/raw/main/.gitlab-ci.yml' + + +Additional details: + + + All nested includes are executed without context as a public user, +so you can only include public projects or templates. No variables are available in the include section of nested includes. + Be careful when including another project’s CI/CD configuration file. No pipelines or notifications trigger +when the other project’s files change. From a security perspective, this is similar to +pulling a third-party dependency. If you link to another GitLab project you own, consider the use of both +protected branches and protected tags +to enforce change management rules. + + + +include:template + + +Use include:template to include .gitlab-ci.yml templates. + +Keyword type: Global keyword. + +Possible inputs: + +A CI/CD template: + + + All templates can be viewed in lib/gitlab/ci/templates. +Not all templates are designed to be used with include:template, so check template +comments before using one. + You can use certain CI/CD variables. + + +Example of include:template: + +# File sourced from the GitLab template collection +include: + - template: Auto-DevOps.gitlab-ci.yml + + +Multiple include:template files: + +include: + - template: Android-Fastlane.gitlab-ci.yml + - template: Auto-DevOps.gitlab-ci.yml + + +Additional details: + + + All nested includes are executed without context as a public user, +so you can only include public projects or templates. No variables are available in the include section of nested includes. + + + +include:inputs + + + +History + + + + + +Introduced in GitLab 15.11 as a Beta feature. + + + + + + +Use include:inputs to set the values for input parameters when the included configuration +uses spec:inputs and is added to the pipeline. + +Keyword type: Global keyword. + +Possible inputs: A string, numeric value, or boolean. + +Example of include:inputs: + +include: + - local: 'custom_configuration.yml' + inputs: + website: ""My website"" + + +In this example: + + + The configuration contained in custom_configuration.yml is added to the pipeline, +with a website input set to a value of My website for the included configuration. + + +Additional details: + + + If the included configuration file uses spec:inputs:type, +the input value must match the defined type. + If the included configuration file uses spec:inputs:options, +the input value must match one of the listed options. + + +Related topics: + + + +Set input values when using include. + + + +stages + + + +History + + + + + Support for nested array of strings introduced in GitLab 16.9. + + + + + + +Use stages to define stages that contain groups of jobs. Use stage +in a job to configure the job to run in a specific stage. + +If stages is not defined in the .gitlab-ci.yml file, the default pipeline stages are: + + + .pre + build + test + deploy + .post + + +The order of the items in stages defines the execution order for jobs: + + + Jobs in the same stage run in parallel. + Jobs in the next stage run after the jobs from the previous stage complete successfully. + + +If a pipeline contains only jobs in the .pre or .post stages, it does not run. +There must be at least one other job in a different stage. .pre and .post stages +can be used in required pipeline configuration +to define compliance jobs that must run before or after project pipeline jobs. + +Keyword type: Global keyword. + +Example of stages: + +stages: + - build + - test + - deploy + + +In this example: + + + All jobs in build execute in parallel. + If all jobs in build succeed, the test jobs execute in parallel. + If all jobs in test succeed, the deploy jobs execute in parallel. + If all jobs in deploy succeed, the pipeline is marked as passed. + + +If any job fails, the pipeline is marked as failed and jobs in later stages do not +start. Jobs in the current stage are not stopped and continue to run. + +Additional details: + + + If a job does not specify a stage, the job is assigned the test stage. + If a stage is defined but no jobs use it, the stage is not visible in the pipeline, +which can help compliance pipeline configurations: + + Stages can be defined in the compliance configuration but remain hidden if not used. + The defined stages become visible when developers use them in job definitions. + + + + +Related topics: + + + To make a job start earlier and ignore the stage order, use the needs keyword. + + + +workflow + + + +History + + + + + +Introduced in GitLab 12.5 + + + + + + +Use workflow to control pipeline behavior. + +You can use some predefined CI/CD variables in +workflow configuration, but not variables that are only defined when jobs start. + +Related topics: + + + workflow: rules examples + Switch between branch pipelines and merge request pipelines + + + +workflow:auto_cancel:on_new_commit + + + +History + + + + + +Introduced in GitLab 16.8 with a flag named ci_workflow_auto_cancel_on_new_commit. Disabled by default. + +Enabled on GitLab.com and self-managed in GitLab 16.9. + +Generally available in GitLab 16.10. Feature flag ci_workflow_auto_cancel_on_new_commit removed. + + + + + + +Use workflow:auto_cancel:on_new_commit to configure the behavior of +the auto-cancel redundant pipelines feature. + +Possible inputs: + + + +conservative: Cancel the pipeline, but only if no jobs with interruptible: false have started yet. Default when not defined. + +interruptible: Cancel only jobs with interruptible: true. + +none: Do not auto-cancel any jobs. + + +Example of workflow:auto_cancel:on_new_commit: + +workflow: + auto_cancel: + on_new_commit: interruptible + +job1: + interruptible: true + script: sleep 60 + +job2: + interruptible: false # Default when not defined. + script: sleep 60 + + +In this example: + + + When a new commit is pushed to a branch, GitLab creates a new pipeline and job1 and job2 start. + If a new commit is pushed to the branch before the jobs complete, only job1 is canceled. + + + +workflow:auto_cancel:on_job_failure + + + +History + + + + + +Introduced in GitLab 16.10 with a flag named auto_cancel_pipeline_on_job_failure. Disabled by default. + + + + + + + + On self-managed GitLab, by default this feature is not available. +To enable the feature, an administrator can enable the feature flag named auto_cancel_pipeline_on_job_failure. +On GitLab.com and GitLab Dedicated, this feature is not available. + + +Use workflow:auto_cancel:on_job_failure to configure which jobs should be cancelled as soon as one job fails. + +Possible inputs: + + + +all: Cancel the pipeline and all running jobs as soon as one job fails. + +none: Do not auto-cancel any jobs. + + +Example of workflow:auto_cancel:on_job_failure: + +stages: [stage_a, stage_b] + +workflow: + auto_cancel: + on_job_failure: all + +job1: + stage: stage_a + script: sleep 60 + +job2: + stage: stage_a + script: + - sleep 30 + - exit 1 + +job3: + stage: stage_b + script: + - sleep 30 + + +In this example, if job2 fails, job1 is cancelled if it is still running and job3 does not start. + +Related topics: + + + Auto-cancel the parent pipeline from a downstream pipeline + + + +workflow:name + + + +History + + + + + +Introduced in GitLab 15.5 with a flag named pipeline_name. Disabled by default. + +Enabled on GitLab.com and self-managed in GitLab 15.7. + +Generally available in GitLab 15.8. Feature flag pipeline_name removed. + + + + + + +You can use name in workflow: to define a name for pipelines. + +All pipelines are assigned the defined name. Any leading or trailing spaces in the name are removed. + +Possible inputs: + + + A string. + +CI/CD variables. + A combination of both. + + +Examples of workflow:name: + +A simple pipeline name with a predefined variable: + +workflow: + name: 'Pipeline for branch: $CI_COMMIT_BRANCH' + + +A configuration with different pipeline names depending on the pipeline conditions: + +variables: + PROJECT1_PIPELINE_NAME: 'Default pipeline name' # A default is not required. + +workflow: + name: '$PROJECT1_PIPELINE_NAME' + rules: + - if: '$CI_PIPELINE_SOURCE == ""merge_request_event""' + variables: + PROJECT1_PIPELINE_NAME: 'MR pipeline: $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME' + - if: '$CI_MERGE_REQUEST_LABELS =~ /pipeline:run-in-ruby3/' + variables: + PROJECT1_PIPELINE_NAME: 'Ruby 3 pipeline' + - when: always # Other pipelines can run, but use the default name + + +Additional details: + + + If the name is an empty string, the pipeline is not assigned a name. A name consisting +of only CI/CD variables could evaluate to an empty string if all the variables are also empty. + +workflow:rules:variables become global variables available in all jobs, +including trigger jobs which forward variables to downstream pipelines by default. +If the downstream pipeline uses the same variable, the variable is overwritten +by the upstream variable value. Be sure to either: + + Use a unique variable name in every project’s pipeline configuration, like PROJECT1_PIPELINE_NAME. + Use inherit:variables in the trigger job and list the +exact variables you want to forward to the downstream pipeline. + + + + + +workflow:rules + + +The rules keyword in workflow is similar to rules defined in jobs, +but controls whether or not a whole pipeline is created. + +When no rules evaluate to true, the pipeline does not run. + +Possible inputs: You can use some of the same keywords as job-level rules: + + + +rules: if. + +rules: changes. + +rules: exists. + +when, can only be always or never when used with workflow. + +variables. + + +Example of workflow:rules: + +workflow: + rules: + - if: $CI_COMMIT_TITLE =~ /-draft$/ + when: never + - if: $CI_PIPELINE_SOURCE == ""merge_request_event"" + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + + +In this example, pipelines run if the commit title (first line of the commit message) does not end with -draft +and the pipeline is for either: + + + A merge request + The default branch. + + +Additional details: + + + If your rules match both branch pipelines (other than the default branch) and merge request pipelines, +duplicate pipelines can occur. + + +Related topics: + + + You can use the workflow:rules templates to import +a preconfigured workflow: rules entry. + +Common if clauses for workflow:rules. + +Use rules to run merge request pipelines. + + + +workflow:rules:variables + + + +History + + + + + +Introduced in GitLab 13.11. + +Feature flag removed in GitLab 14.1. + + + + + + +You can use variables in workflow:rules to define variables for +specific pipeline conditions. + +When the condition matches, the variable is created and can be used by all jobs +in the pipeline. If the variable is already defined at the global level, the workflow +variable takes precedence and overrides the global variable. + +Keyword type: Global keyword. + +Possible inputs: Variable name and value pairs: + + + The name can use only numbers, letters, and underscores (_). + The value must be a string. + + +Example of workflow:rules:variables: + +variables: + DEPLOY_VARIABLE: ""default-deploy"" + +workflow: + rules: + - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH + variables: + DEPLOY_VARIABLE: ""deploy-production"" # Override globally-defined DEPLOY_VARIABLE + - if: $CI_COMMIT_REF_NAME =~ /feature/ + variables: + IS_A_FEATURE: ""true"" # Define a new variable. + - when: always # Run the pipeline in other cases + +job1: + variables: + DEPLOY_VARIABLE: ""job1-default-deploy"" + rules: + - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH + variables: # Override DEPLOY_VARIABLE defined + DEPLOY_VARIABLE: ""job1-deploy-production"" # at the job level. + - when: on_success # Run the job in other cases + script: + - echo ""Run script with $DEPLOY_VARIABLE as an argument"" + - echo ""Run another script if $IS_A_FEATURE exists"" + +job2: + script: + - echo ""Run script with $DEPLOY_VARIABLE as an argument"" + - echo ""Run another script if $IS_A_FEATURE exists"" + + +When the branch is the default branch: + + + job1’s DEPLOY_VARIABLE is job1-deploy-production. + job2’s DEPLOY_VARIABLE is deploy-production. + + +When the branch is feature: + + + job1’s DEPLOY_VARIABLE is job1-default-deploy, and IS_A_FEATURE is true. + job2’s DEPLOY_VARIABLE is default-deploy, and IS_A_FEATURE is true. + + +When the branch is something else: + + + job1’s DEPLOY_VARIABLE is job1-default-deploy. + job2’s DEPLOY_VARIABLE is default-deploy. + + +Additional details: + + + +workflow:rules:variables become global variables available in all jobs, +including trigger jobs which forward variables to downstream pipelines by default. +If the downstream pipeline uses the same variable, the variable is overwritten +by the upstream variable value. Be sure to either: + + Use unique variable names in every project’s pipeline configuration, like PROJECT1_VARIABLE_NAME. + Use inherit:variables in the trigger job and list the +exact variables you want to forward to the downstream pipeline. + + + + + +workflow:rules:auto_cancel + + + +History + + + + + +Introduced in GitLab 16.8 with a flag named ci_workflow_auto_cancel_on_new_commit. Disabled by default. + +Enabled on GitLab.com and self-managed in GitLab 16.9. + +Generally available in GitLab 16.10. Feature flag ci_workflow_auto_cancel_on_new_commit removed. + + + + + + +Use workflow:rules:auto_cancel to configure the behavior of +the workflow:auto_cancel:on_new_commit feature. + +Possible inputs: + + + +on_new_commit: workflow:auto_cancel:on_new_commit + + + +Example of workflow:rules:auto_cancel: + +workflow: + auto_cancel: + on_new_commit: interruptible + rules: + - if: $CI_COMMIT_REF_PROTECTED == 'true' + auto_cancel: + on_new_commit: none + - when: always # Run the pipeline in other cases + +test-job1: + script: sleep 10 + interruptible: false + +test-job2: + script: sleep 10 + interruptible: true + + +In this example, workflow:auto_cancel:on_new_commit +is set to interruptible for all jobs by default. But if a pipeline runs for a protected branch, +the rule overrides the default with on_new_commit: none. For example, if a pipeline +is running for: + + + A non-protected branch and a new commit is pushed, test-job1 continues to run and test-job2 is canceled. + A protected branch and a new commit is pushed, both test-job1 and test-job2 continue to run. + + +Header keywords + + +Some keywords must be defined in a header section of a YAML configuration file. +The header must be at the top of the file, separated from the rest of the configuration +with ---. + + +spec + + + +History + + + + + +Introduced in GitLab 15.11 as a Beta feature. + + + + + + +Add a spec section to the header of a YAML file to configure the behavior of a pipeline +when a configuration is added to the pipeline with the include keyword. + + +spec:inputs + + +You can use spec:inputs to define input parameters for the CI/CD configuration you intend to add +to a pipeline with include. Use include:inputs to define the values to use when the pipeline runs. + +Use the inputs to customize the behavior of the configuration when included in CI/CD configuration. + +Use the interpolation format $[[ input.input-id ]] to reference the values outside of the header section. +Inputs are evaluated and interpolated when the configuration is fetched during pipeline creation, but before the +configuration is merged with the contents of the .gitlab-ci.yml file. + +Keyword type: Header keyword. specs must be declared at the top of the configuration file, +in a header section. + +Possible inputs: A hash of strings representing the expected inputs. + +Example of spec:inputs: + +spec: + inputs: + environment: + job-stage: +--- + +scan-website: + stage: $[[ inputs.job-stage ]] + script: ./scan-website $[[ inputs.environment ]] + + +Additional details: + + + Inputs are mandatory unless you use spec:inputs:default +to set a default value. + Inputs expect strings unless you use spec:inputs:type to set a +different input type. + A string containing an interpolation block must not exceed 1 MB. + The string inside an interpolation block must not exceed 1 KB. + + +Related topics: + + + +Define input parameters with spec:inputs. + + + +spec:inputs:default + + + +History + + + + + +Introduced in GitLab 15.11 as a Beta feature. + + + + + + +Inputs are mandatory when included, unless you set a default value with spec:inputs:default. + +Use default: '' to have no default value. + +Keyword type: Header keyword. specs must be declared at the top of the configuration file, +in a header section. + +Possible inputs: A string representing the default value, or ''. + +Example of spec:inputs:default: + +spec: + inputs: + website: + user: + default: 'test-user' + flags: + default: '' +--- + +# The pipeline configuration would follow... + + +In this example: + + + +website is mandatory and must be defined. + +user is optional. If not defined, the value is test-user. + +flags is optional. If not defined, it has no value. + + +Additional details: + + + The pipeline fails with a validation error when the input: + + Uses both default and options, but the default value +is not one of the listed options. + Uses both default and regex, but the default value does not match the regular expression. + Value does not match the type. + + + + + +spec:inputs:description + + + +History + + + + + +Introduced in GitLab 16.5. + + + + + + +Use description to give a description to a specific input. The description does +not affect the behavior of the input and is only used to help users of the file +understand the input. + +Keyword type: Header keyword. specs must be declared at the top of the configuration file, +in a header section. + +Possible inputs: A string representing the description. + +Example of spec:inputs:description: + +spec: + inputs: + flags: + description: 'Sample description of the `flags` input details.' +--- + +# The pipeline configuration would follow... + + + +spec:inputs:options + + + +History + + + + + +Introduced in GitLab 16.6. + + + + + + +Inputs can use options to specify a list of allowed values for an input. +The limit is 50 options per input. + +Keyword type: Header keyword. specs must be declared at the top of the configuration file, +in a header section. + +Possible inputs: An array of input options. + +Example of spec:inputs:options: + +spec: + inputs: + environment: + options: + - development + - staging + - production +--- + +# The pipeline configuration would follow... + + +In this example: + + + +environment is mandatory and must be defined with one of the values in the list. + + +Additional details: + + + The pipeline fails with a validation error when: + + The input uses both options and default, but the default value +is not one of the listed options. + Any of the input options do not match the type, which can +be either string or number, but not boolean when using options. + + + + + +spec:inputs:regex + + + +History + + + + + +Introduced in GitLab 16.5. + + + + + + +Use spec:inputs:regex to specify a regular expression that the input must match. + +Keyword type: Header keyword. specs must be declared at the top of the configuration file, +in a header section. + +Possible inputs: Must be a regular expression that starts and ends with the / character. + +Example of spec:inputs:regex: + +spec: + inputs: + version: + regex: /^v\d\.\d+(\.\d+)$/ +--- + +# The pipeline configuration would follow... + + +In this example, inputs of v1.0 or v1.2.3 match the regular expression and pass validation. +An input of v1.A.B does not match the regular expression and fails validation. + +Additional details: + + + +inputs:regex can only be used with a type of string, +not number or boolean. + + + +spec:inputs:type + + +By default, inputs expect strings. Use spec:inputs:type to set a different required +type for inputs. + +Keyword type: Header keyword. specs must be declared at the top of the configuration file, +in a header section. + +Possible inputs: Can be one of: + + + +string, to accept string inputs (default when not defined). + +number, to only accept numeric inputs. + +boolean, to only accept true or false inputs. + + +Example of spec:inputs:type: + +spec: + inputs: + job_name: + website: + type: string + port: + type: number + available: + type: boolean +--- + +# The pipeline configuration would follow... + + +Job keywords + + +The following topics explain how to use keywords to configure CI/CD pipelines. + + +after_script + + +Use after_script to define an array of commands that run after a job’s script section, including failed jobs with failure type of script_failure. +after_script commands do not run after other failure types. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: An array including: + + + Single line commands. + Long commands split over multiple lines. + +YAML anchors. + + +CI/CD variables are supported. + +Example of after_script: + +job: + script: + - echo ""An example script section."" + after_script: + - echo ""Execute this command after the `script` section completes."" + + +Additional details: + +Scripts you specify in after_script execute in a new shell, separate from any +before_script or script commands. As a result, they: + + + Have the current working directory set back to the default (according to the variables which define how the runner processes Git requests). + Don’t have access to changes done by commands defined in the before_script or script, +including: + + Command aliases and variables exported in script scripts. + Changes outside of the working tree (depending on the runner executor), like +software installed by a before_script or script script. + + + Have a separate timeout. For GitLab Runner 16.4 and later, this defaults to 5 minutes, and can be configured with the +RUNNER_AFTER_SCRIPT_TIMEOUT variable. +In GitLab 16.3 and earlier, the timeout is hard-coded to 5 minutes. + Don’t affect the job’s exit code. If the script section succeeds and the +after_script times out or fails, the job exits with code 0 (Job Succeeded). + + +If a job times out or is cancelled, the after_script commands do not execute. +An issue exists to add support for executing after_script commands for timed-out or cancelled jobs. + +Related topics: + + + +Use after_script with default +to define a default array of commands that should run after all jobs. + You can ignore non-zero exit codes. + +Use color codes with after_script +to make job logs easier to review. + +Create custom collapsible sections +to simplify job log output. + + + +allow_failure + + +Use allow_failure to determine whether a pipeline should continue running when a job fails. + + + To let the pipeline continue running subsequent jobs, use allow_failure: true. + To stop the pipeline from running subsequent jobs, use allow_failure: false. + + +When jobs are allowed to fail (allow_failure: true) an orange warning ( ) +indicates that a job failed. However, the pipeline is successful and the associated commit +is marked as passed with no warnings. + +This same warning is displayed when: + + + All other jobs in the stage are successful. + All other jobs in the pipeline are successful. + + +The default value for allow_failure is: + + + +true for manual jobs. + +false for jobs that use when: manual inside rules. + +false in all other cases. + + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +true or false. + + +Example of allow_failure: + +job1: + stage: test + script: + - execute_script_1 + +job2: + stage: test + script: + - execute_script_2 + allow_failure: true + +job3: + stage: deploy + script: + - deploy_to_staging + environment: staging + + +In this example, job1 and job2 run in parallel: + + + If job1 fails, jobs in the deploy stage do not start. + If job2 fails, jobs in the deploy stage can still start. + + +Additional details: + + + You can use allow_failure as a subkey of rules. + If allow_failure: true is set, the job is always considered successful, and later jobs with when: on_failure don’t start if this job fails. + You can use allow_failure: false with a manual job to create a blocking manual job. +A blocked pipeline does not run any jobs in later stages until the manual job +is started and completes successfully. + + + +allow_failure:exit_codes + + + +History + + + + + +Introduced in GitLab 13.8. + +Feature flag removed in GitLab 13.9. + + + + + + +Use allow_failure:exit_codes to control when a job should be +allowed to fail. The job is allow_failure: true for any of the listed exit codes, +and allow_failure false for any other exit code. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + A single exit code. + An array of exit codes. + + +Example of allow_failure: + +test_job_1: + script: + - echo ""Run a script that results in exit code 1. This job fails."" + - exit 1 + allow_failure: + exit_codes: 137 + +test_job_2: + script: + - echo ""Run a script that results in exit code 137. This job is allowed to fail."" + - exit 137 + allow_failure: + exit_codes: + - 137 + - 255 + + + +artifacts + + +Use artifacts to specify which files to save as job artifacts. +Job artifacts are a list of files and directories that are +attached to the job when it succeeds, fails, or always. + +The artifacts are sent to GitLab after the job finishes. They are +available for download in the GitLab UI if the size is smaller than the +maximum artifact size. + +By default, jobs in later stages automatically download all the artifacts created +by jobs in earlier stages. You can control artifact download behavior in jobs with +dependencies. + +When using the needs keyword, jobs can only download +artifacts from the jobs defined in the needs configuration. + +Job artifacts are only collected for successful jobs by default, and +artifacts are restored after caches. + +Read more about artifacts. + + +artifacts:paths + + +Paths are relative to the project directory ($CI_PROJECT_DIR) and can’t directly +link outside it. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + An array of file paths, relative to the project directory. + You can use Wildcards that use glob +patterns and: + + In GitLab Runner 13.0 and later, +doublestar.Glob. + In GitLab Runner 12.10 and earlier, filepath.Match. + + + + +CI/CD variables are supported. + +Example of artifacts:paths: + +job: + artifacts: + paths: + - binaries/ + - .config + + +This example creates an artifact with .config and all the files in the binaries directory. + +Additional details: + + + If not used with artifacts:name, the artifacts file +is named artifacts, which becomes artifacts.zip when downloaded. + + +Related topics: + + + To restrict which jobs a specific job fetches artifacts from, see dependencies. + +Create job artifacts. + + + +artifacts:exclude + + + +History + + + + + +Introduced in GitLab 13.1 + Requires GitLab Runner 13.1 + + + + + + +Use artifacts:exclude to prevent files from being added to an artifacts archive. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + An array of file paths, relative to the project directory. + You can use Wildcards that use glob or +doublestar.PathMatch patterns. + + +Example of artifacts:exclude: + +artifacts: + paths: + - binaries/ + exclude: + - binaries/**/*.o + + +This example stores all files in binaries/, but not *.o files located in +subdirectories of binaries/. + +Additional details: + + + +artifacts:exclude paths are not searched recursively. + Files matched by artifacts:untracked can be excluded using +artifacts:exclude too. + + +Related topics: + + + +Exclude files from job artifacts. + + + +artifacts:expire_in + + + +History + + + + + +Introduced in GitLab 13.0 behind a disabled feature flag, the latest job artifacts are kept regardless of expiry time. + +Made default behavior in GitLab 13.4. + +Introduced in GitLab 13.8, keeping latest job artifacts can be disabled at the project level. + +Introduced in GitLab 13.9, keeping latest job artifacts can be disabled instance-wide. + + + + + + +Use expire_in to specify how long job artifacts are stored before +they expire and are deleted. The expire_in setting does not affect: + + + Artifacts from the latest job, unless keeping the latest job artifacts is disabled +at the project level. +or instance-wide. + + +After their expiry, artifacts are deleted hourly by default (using a cron job), and are not +accessible anymore. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: The expiry time. If no unit is provided, the time is in seconds. +Valid values include: + + + '42' + 42 seconds + 3 mins 4 sec + 2 hrs 20 min + 2h20min + 6 mos 1 day + 47 yrs 6 mos and 4d + 3 weeks and 2 days + never + + +Example of artifacts:expire_in: + +job: + artifacts: + expire_in: 1 week + + +Additional details: + + + The expiration time period begins when the artifact is uploaded and stored on GitLab. +If the expiry time is not defined, it defaults to the instance wide setting. + To override the expiration date and protect artifacts from being automatically deleted: + + Select Keep on the job page. + +In GitLab 13.3 and later, set the value of +expire_in to never. + + + If the expiry time is too short, jobs in later stages of a long pipeline might try to fetch +expired artifacts from earlier jobs. If the artifacts are expired, jobs that try to fetch +them fail with a could not retrieve the needed artifacts error. +Set the expiry time to be longer, or use dependencies in later jobs +to ensure they don’t try to fetch expired artifacts. + + + +artifacts:expose_as + + + +History + + + + + +Introduced in GitLab 12.5. + + + + + + +Use the artifacts:expose_as keyword to +expose job artifacts in the merge request UI. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + The name to display in the merge request UI for the artifacts download link. +Must be combined with artifacts:paths. + + +Example of artifacts:expose_as: + +test: + script: [""echo 'test' > file.txt""] + artifacts: + expose_as: 'artifact 1' + paths: ['file.txt'] + + +Additional details: + + + Artifacts are saved, but do not display in the UI if the artifacts:paths values: + + Use CI/CD variables. + Define a directory, but do not end with /. For example, directory/ works with artifacts:expose_as, +but directory does not. + Start with ./. For example, file works with artifacts:expose_as, but ./file does not. + + + A maximum of 10 job artifacts per merge request can be exposed. + Glob patterns are unsupported. + If a directory is specified and there is more than one file in the directory, +the link is to the job artifacts browser. + If GitLab Pages is enabled, GitLab automatically +renders the artifacts when the artifacts is a single file with one of these extensions: + + +.html or .htm + + .txt + .json + .xml + .log + + + + +Related topics: + + + +Expose job artifacts in the merge request UI. + + + +artifacts:name + + +Use the artifacts:name keyword to define the name of the created artifacts +archive. You can specify a unique name for every archive. + +If not defined, the default name is artifacts, which becomes artifacts.zip when downloaded. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + The name of the artifacts archive. CI/CD variables are supported. +Must be combined with artifacts:paths. + + +Example of artifacts:name: + +To create an archive with a name of the current job: + +job: + artifacts: + name: ""job1-artifacts-file"" + paths: + - binaries/ + + +Related topics: + + + +Use CI/CD variables to define the artifacts name. + + + +artifacts:public + + + +History + + + + + +Introduced in GitLab 13.8 with a flag named non_public_artifacts, disabled by default. + +Updated in GitLab 15.10. Artifacts created with artifacts:public before 15.10 are not guaranteed to remain private after this update. + +Generally available in GitLab 16.7. Feature flag non_public_artifacts removed. + + + + + + +Use artifacts:public to determine whether the job artifacts should be +publicly available. + +When artifacts:public is true (default), the artifacts in +public pipelines are available for download by anonymous, guest, and reporter users. + +To deny read access to artifacts in public +pipelines for anonymous, guest, and reporter users, set artifacts:public to false: + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + +true (default if not defined) or false. + + +Example of artifacts:public: + +job: + artifacts: + public: false + + + +artifacts:reports + + +Use artifacts:reports to collect artifacts generated by +included templates in jobs. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + See list of available artifacts reports types. + + +Example of artifacts:reports: + +rspec: + stage: test + script: + - bundle install + - rspec --format RspecJunitFormatter --out rspec.xml + artifacts: + reports: + junit: rspec.xml + + +Additional details: + + + Combining reports in parent pipelines using artifacts from child pipelines is +not supported. Track progress on adding support in this issue. + To be able to browse the report output files, include the artifacts:paths keyword. This uploads and stores the artifact twice. + Artifacts created for artifacts: reports are always uploaded, regardless of the job results (success or failure). +You can use artifacts:expire_in to set an expiration +date for the artifacts. + + + +artifacts:untracked + + +Use artifacts:untracked to add all Git untracked files as artifacts (along +with the paths defined in artifacts:paths). artifacts:untracked ignores configuration +in the repository’s .gitignore, so matching artifacts in .gitignore are included. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + +true or false (default if not defined). + + +Example of artifacts:untracked: + +Save all Git untracked files: + +job: + artifacts: + untracked: true + + +Related topics: + + + +Add untracked files to artifacts. + + + +artifacts:when + + +Use artifacts:when to upload artifacts on job failure or despite the +failure. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + +on_success (default): Upload artifacts only when the job succeeds. + +on_failure: Upload artifacts only when the job fails. + +always: Always upload artifacts (except when jobs time out). For example, when +uploading artifacts +required to troubleshoot failing tests. + + +Example of artifacts:when: + +job: + artifacts: + when: on_failure + + +Additional details: + + + The artifacts created for artifacts:reports are always uploaded, +regardless of the job results (success or failure). artifacts:when does not change this behavior. + + + +before_script + + +Use before_script to define an array of commands that should run before each job’s +script commands, but after artifacts are restored. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: An array including: + + + Single line commands. + Long commands split over multiple lines. + +YAML anchors. + + +CI/CD variables are supported. + +Example of before_script: + +job: + before_script: + - echo ""Execute this command before any 'script:' commands."" + script: + - echo ""This command executes after the job's 'before_script' commands."" + + +Additional details: + + + Scripts you specify in before_script are concatenated with any scripts you specify +in the main script. The combined scripts execute together in a single shell. + Using before_script at the top level, but not in the default section, is deprecated. + + +Related topics: + + + +Use before_script with default +to define a default array of commands that should run before the script commands in all jobs. + You can ignore non-zero exit codes. + +Use color codes with before_script +to make job logs easier to review. + +Create custom collapsible sections +to simplify job log output. + + + +cache + + + +History + + + + + +Introduced in GitLab 15.0, caches are not shared between protected and unprotected branches. + + + + + + +Use cache to specify a list of files and directories to +cache between jobs. You can only use paths that are in the local working copy. + +Caches are: + + + Shared between pipelines and jobs. + By default, not shared between protected and unprotected branches. + Restored before artifacts. + Limited to a maximum of four different caches. + + +You can disable caching for specific jobs, +for example to override: + + + A default cache defined with default. + The configuration for a job added with include. + + +For more information about caches, see Caching in GitLab CI/CD. + + +cache:paths + + +Use the cache:paths keyword to choose which files or directories to cache. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + An array of paths relative to the project directory ($CI_PROJECT_DIR). +You can use wildcards that use glob +patterns: + + In GitLab Runner 13.0 and later, +doublestar.Glob. + In GitLab Runner 12.10 and earlier, +filepath.Match. + + + + +Example of cache:paths: + +Cache all files in binaries that end in .apk and the .config file: + +rspec: + script: + - echo ""This job uses a cache."" + cache: + key: binaries-cache + paths: + - binaries/*.apk + - .config + + +Additional details: + + + The cache:paths keyword includes files even if they are untracked or in your .gitignore file. + + +Related topics: + + + See the common cache use cases for more +cache:paths examples. + + + +cache:key + + +Use the cache:key keyword to give each cache a unique identifying key. All jobs +that use the same cache key use the same cache, including in different pipelines. + +If not set, the default key is default. All jobs with the cache keyword but +no cache:key share the default cache. + +Must be used with cache: paths, or nothing is cached. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + A string. + A predefined CI/CD variable. + A combination of both. + + +Example of cache:key: + +cache-job: + script: + - echo ""This job uses a cache."" + cache: + key: binaries-cache-$CI_COMMIT_REF_SLUG + paths: + - binaries/ + + +Additional details: + + + If you use Windows Batch to run your shell scripts you must replace +$ with %. For example: key: %CI_COMMIT_REF_SLUG% + + + The cache:key value can’t contain: + + + The / character, or the equivalent URI-encoded %2F. + Only the . character (any number), or the equivalent URI-encoded %2E. + + + The cache is shared between jobs, so if you’re using different +paths for different jobs, you should also set a different cache:key. +Otherwise cache content can be overwritten. + + +Related topics: + + + You can specify a fallback cache key +to use if the specified cache:key is not found. + You can use multiple cache keys in a single job. + See the common cache use cases for more +cache:key examples. + + + +cache:key:files + + + +History + + + + + +Introduced in GitLab 12.5. + + + + + + +Use the cache:key:files keyword to generate a new key when one or two specific files +change. cache:key:files lets you reuse some caches, and rebuild them less often, +which speeds up subsequent pipeline runs. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + An array of one or two file paths. + + +CI/CD variables are not supported. + +Example of cache:key:files: + +cache-job: + script: + - echo ""This job uses a cache."" + cache: + key: + files: + - Gemfile.lock + - package.json + paths: + - vendor/ruby + - node_modules + + +This example creates a cache for Ruby and Node.js dependencies. The cache +is tied to the current versions of the Gemfile.lock and package.json files. When one of +these files changes, a new cache key is computed and a new cache is created. Any future +job runs that use the same Gemfile.lock and package.json with cache:key:files +use the new cache, instead of rebuilding the dependencies. + +Additional details: + + + The cache key is a SHA computed from the most recent commits +that changed each listed file. +If neither file is changed in any commits, the fallback key is default. + + + +cache:key:prefix + + + +History + + + + + +Introduced in GitLab 12.5. + + + + + + +Use cache:key:prefix to combine a prefix with the SHA computed for cache:key:files. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + A string + A predefined variables + + A combination of both. + + +Example of cache:key:prefix: + +rspec: + script: + - echo ""This rspec job uses a cache."" + cache: + key: + files: + - Gemfile.lock + prefix: $CI_JOB_NAME + paths: + - vendor/ruby + + +For example, adding a prefix of $CI_JOB_NAME causes the key to look like rspec-feef9576d21ee9b6a32e30c5c79d0a0ceb68d1e5. +If a branch changes Gemfile.lock, that branch has a new SHA checksum for cache:key:files. +A new cache key is generated, and a new cache is created for that key. If Gemfile.lock +is not found, the prefix is added to default, so the key in the example would be rspec-default. + +Additional details: + + + If no file in cache:key:files is changed in any commits, the prefix is added to the default key. + + + +cache:untracked + + +Use untracked: true to cache all files that are untracked in your Git repository. +Untracked files include files that are: + + + Ignored due to .gitignore configuration. + Created, but not added to the checkout with git add. + + +Caching untracked files can create unexpectedly large caches if the job downloads: + + + Dependencies, like gems or node modules, which are usually untracked. + +Artifacts from a different job. Files extracted from the artifacts are untracked by default. + + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + +true or false (default). + + +Example of cache:untracked: + +rspec: + script: test + cache: + untracked: true + + +Additional details: + + + + You can combine cache:untracked with cache:paths to cache all untracked files, as well as files in the configured paths. +Use cache:paths to cache any specific files, including tracked files, or files that are outside of the working directory, +and use cache: untracked to also cache all untracked files. For example: + + +rspec: + script: test + cache: + untracked: true + paths: + - binaries/ + + + In this example, the job caches all untracked files in the repository, as well as all the files in binaries/. +If there are untracked files in binaries/, they are covered by both keywords. + + + + +cache:unprotect + + + +History + + + + + +Introduced in GitLab 15.8. + + + + + + +Use cache:unprotect to set a cache to be shared between protected +and unprotected branches. + + + caution When set to true, users without access to protected branches can read and write to +cache keys used by protected branches. + + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + +true or false (default). + + +Example of cache:unprotect: + +rspec: + script: test + cache: + unprotect: true + + + +cache:when + + + +History + + + + + +Introduced in GitLab 13.5 and GitLab Runner v13.5.0. + + + + + + +Use cache:when to define when to save the cache, based on the status of the job. + +Must be used with cache: paths, or nothing is cached. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + +on_success (default): Save the cache only when the job succeeds. + +on_failure: Save the cache only when the job fails. + +always: Always save the cache. + + +Example of cache:when: + +rspec: + script: rspec + cache: + paths: + - rspec/ + when: 'always' + + +This example stores the cache whether or not the job fails or succeeds. + + +cache:policy + + +To change the upload and download behavior of a cache, use the cache:policy keyword. +By default, the job downloads the cache when the job starts, and uploads changes +to the cache when the job ends. This caching style is the pull-push policy (default). + +To set a job to only download the cache when the job starts, but never upload changes +when the job finishes, use cache:policy:pull. + +To set a job to only upload a cache when the job finishes, but never download the +cache when the job starts, use cache:policy:push. + +Use the pull policy when you have many jobs executing in parallel that use the same cache. +This policy speeds up job execution and reduces load on the cache server. You can +use a job with the push policy to build the cache. + +Must be used with cache: paths, or nothing is cached. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + pull + push + +pull-push (default) + +CI/CD variables. + + +Example of cache:policy: + +prepare-dependencies-job: + stage: build + cache: + key: gems + paths: + - vendor/bundle + policy: push + script: + - echo ""This job only downloads dependencies and builds the cache."" + - echo ""Downloading dependencies..."" + +faster-test-job: + stage: test + cache: + key: gems + paths: + - vendor/bundle + policy: pull + script: + - echo ""This job script uses the cache, but does not update it."" + - echo ""Running tests..."" + + +Related topics: + + + You can use a variable to control a job’s cache policy. + + + +cache:fallback_keys + + +Use cache:fallback_keys to specify a list of keys to try to restore cache from +if there is no cache found for the cache:key. Caches are retrieved in the order specified +in the fallback_keys section. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + An array of cache keys + + +Example of cache:fallback_keys: + +rspec: + script: rspec + cache: + key: gems-$CI_COMMIT_REF_SLUG + paths: + - rspec/ + fallback_keys: + - gems + when: 'always' + + + +coverage + + +Use coverage with a custom regular expression to configure how code coverage +is extracted from the job output. The coverage is shown in the UI if at least one +line in the job output matches the regular expression. + +To extract the code coverage value from the match, GitLab uses +this smaller regular expression: \d+(?:\.\d+)?. + +Possible inputs: + + + An RE2 regular expression. Must start and end with /. Must match the coverage number. +May match surrounding text as well, so you don’t need to use a regular expression character group +to capture the exact number. +Because it uses RE2 syntax, all groups must be non-capturing. + + +Example of coverage: + +job1: + script: rspec + coverage: '/Code coverage: \d+(?:\.\d+)?/' + + +In this example: + + + GitLab checks the job log for a match with the regular expression. A line +like Code coverage: 67.89% of lines covered would match. + GitLab then checks the matched fragment to find a match to \d+(?:\.\d+)?. +The sample matching line above gives a code coverage of 67.89. + + +Additional details: + + + You can find parse examples in Code Coverage. + If there is more than one matched line in the job output, the last line is used +(the first result of reverse search). + If there are multiple matches in a single line, the last match is searched +for the coverage number. + If there are multiple coverage numbers found in the matched fragment, the first number is used. + Leading zeros are removed. + Coverage output from child pipelines +is not recorded or displayed. Check the related issue +for more details. + + + +dast_configuration + + + +Tier: Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 14.1. + + + + + + +Use the dast_configuration keyword to specify a site profile and scanner profile to be used in a +CI/CD configuration. Both profiles must first have been created in the project. The job’s stage must +be dast. + +Keyword type: Job keyword. You can use only as part of a job. + +Possible inputs: One each of site_profile and scanner_profile. + + + Use site_profile to specify the site profile to be used in the job. + Use scanner_profile to specify the scanner profile to be used in the job. + + +Example of dast_configuration: + +stages: + - build + - dast + +include: + - template: DAST.gitlab-ci.yml + +dast: + dast_configuration: + site_profile: ""Example Co"" + scanner_profile: ""Quick Passive Test"" + + +In this example, the dast job extends the dast configuration added with the include keyword +to select a specific site profile and scanner profile. + +Additional details: + + + Settings contained in either a site profile or scanner profile take precedence over those +contained in the DAST template. + + +Related topics: + + + +Site profile. + +Scanner profile. + + + +dependencies + + +Use the dependencies keyword to define a list of specific jobs to fetch artifacts +from. The specified jobs must all be in earlier stages. You can also set a job to download no artifacts at all. + +When dependencies is not defined in a job, all jobs in earlier stages are considered dependent +and the job fetches all artifacts from those jobs. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + The names of jobs to fetch artifacts from. + An empty array ([]), to configure the job to not download any artifacts. + + +Example of dependencies: + +build osx: + stage: build + script: make build:osx + artifacts: + paths: + - binaries/ + +build linux: + stage: build + script: make build:linux + artifacts: + paths: + - binaries/ + +test osx: + stage: test + script: make test:osx + dependencies: + - build osx + +test linux: + stage: test + script: make test:linux + dependencies: + - build linux + +deploy: + stage: deploy + script: make deploy + environment: production + + +In this example, two jobs have artifacts: build osx and build linux. When test osx is executed, +the artifacts from build osx are downloaded and extracted in the context of the build. +The same thing happens for test linux and artifacts from build linux. + +The deploy job downloads artifacts from all previous jobs because of +the stage precedence. + +Additional details: + + + The job status does not matter. If a job fails or it’s a manual job that isn’t triggered, no error occurs. + If the artifacts of a dependent job are expired or +deleted, then the job fails. + To fetch artifacts from a job in the same stage, you must use needs:artifacts. +You should not combine dependencies with needs in the same job. + + + +environment + + +Use environment to define the environment that a job deploys to. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: The name of the environment the job deploys to, in one of these +formats: + + + Plain text, including letters, digits, spaces, and these characters: -, _, /, $, {, }. + CI/CD variables, including predefined, project, group, instance, or variables defined in the +.gitlab-ci.yml file. You can’t use variables defined in a script section. + + +Example of environment: + +deploy to production: + stage: deploy + script: git push production HEAD:main + environment: production + + +Additional details: + + + If you specify an environment and no environment with that name exists, an environment is +created. + + + +environment:name + + +Set a name for an environment. + +Common environment names are qa, staging, and production, but you can use any name. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: The name of the environment the job deploys to, in one of these +formats: + + + Plain text, including letters, digits, spaces, and these characters: -, _, /, $, {, }. + +CI/CD variables, +including predefined, project, group, instance, or variables defined in the +.gitlab-ci.yml file. You can’t use variables defined in a script section. + + +Example of environment:name: + +deploy to production: + stage: deploy + script: git push production HEAD:main + environment: + name: production + + + +environment:url + + +Set a URL for an environment. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: A single URL, in one of these formats: + + + Plain text, like https://prod.example.com. + +CI/CD variables, +including predefined, project, group, instance, or variables defined in the +.gitlab-ci.yml file. You can’t use variables defined in a script section. + + +Example of environment:url: + +deploy to production: + stage: deploy + script: git push production HEAD:main + environment: + name: production + url: https://prod.example.com + + +Additional details: + + + After the job completes, you can access the URL by selecting a button in the merge request, +environment, or deployment pages. + + + +environment:on_stop + + +Closing (stopping) environments can be achieved with the on_stop keyword +defined under environment. It declares a different job that runs to close the +environment. + +Keyword type: Job keyword. You can use it only as part of a job. + +Additional details: + + + See environment:action for more details and an example. + + + +environment:action + + +Use the action keyword to specify how the job interacts with the environment. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: One of the following keywords: + + + + + Value + Description + + + + + start + Default value. Indicates that the job starts the environment. The deployment is created after the job starts. + + + prepare + Indicates that the job is only preparing the environment. It does not trigger deployments. Read more about preparing environments. + + + stop + Indicates that the job stops an environment. Read more about stopping an environment. + + + verify + Indicates that the job is only verifying the environment. It does not trigger deployments. Read more about verifying environments. + + + access + Indicates that the job is only accessing the environment. It does not trigger deployments. Read more about accessing environments. + + + + +Example of environment:action: + +stop_review_app: + stage: deploy + variables: + GIT_STRATEGY: none + script: make delete-app + when: manual + environment: + name: review/$CI_COMMIT_REF_SLUG + action: stop + + + +environment:auto_stop_in + + + +History + + + + + CI/CD variable support introduced in GitLab 15.4. + + + + + + +The auto_stop_in keyword specifies the lifetime of the environment. When an environment expires, GitLab +automatically stops it. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: A period of time written in natural language. For example, +these are all equivalent: + + + 168 hours + 7 days + one week + never + + +CI/CD variables are supported. + +Example of environment:auto_stop_in: + +review_app: + script: deploy-review-app + environment: + name: review/$CI_COMMIT_REF_SLUG + auto_stop_in: 1 day + + +When the environment for review_app is created, the environment’s lifetime is set to 1 day. +Every time the review app is deployed, that lifetime is also reset to 1 day. + +Related topics: + + + +Environments auto-stop documentation. + + + +environment:kubernetes + + + +History + + + + + +Introduced in GitLab 12.6. + + + + + + +Use the kubernetes keyword to configure deployments to a +Kubernetes cluster that is associated with your project. + +Keyword type: Job keyword. You can use it only as part of a job. + +Example of environment:kubernetes: + +deploy: + stage: deploy + script: make deploy-app + environment: + name: production + kubernetes: + namespace: production + + +This configuration sets up the deploy job to deploy to the production +environment, using the production +Kubernetes namespace. + +Additional details: + + + Kubernetes configuration is not supported for Kubernetes clusters +managed by GitLab. + + +Related topics: + + + +Available settings for kubernetes. + + + +environment:deployment_tier + + + +History + + + + + +Introduced in GitLab 13.10. + + + + + + +Use the deployment_tier keyword to specify the tier of the deployment environment. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: One of the following: + + + production + staging + testing + development + other + + +Example of environment:deployment_tier: + +deploy: + script: echo + environment: + name: customer-portal + deployment_tier: production + + +Additional details: + + + Environments created from this job definition are assigned a tier based on this value. + Existing environments don’t have their tier updated if this value is added later. Existing environments must have their tier updated via the Environments API. + + +Related topics: + + + +Deployment tier of environments. + + +Dynamic environments + + +Use CI/CD variables to dynamically name environments. + +For example: + +deploy as review app: + stage: deploy + script: make deploy + environment: + name: review/$CI_COMMIT_REF_SLUG + url: https://$CI_ENVIRONMENT_SLUG.example.com/ + + +The deploy as review app job is marked as a deployment to dynamically +create the review/$CI_COMMIT_REF_SLUG environment. $CI_COMMIT_REF_SLUG +is a CI/CD variable set by the runner. The +$CI_ENVIRONMENT_SLUG variable is based on the environment name, but suitable +for inclusion in URLs. If the deploy as review app job runs in a branch named +pow, this environment would be accessible with a URL like https://review-pow.example.com/. + +The common use case is to create dynamic environments for branches and use them +as Review Apps. You can see an example that uses Review Apps at +https://gitlab.com/gitlab-examples/review-apps-nginx/. + + +extends + + +Use extends to reuse configuration sections. It’s an alternative to YAML anchors +and is a little more flexible and readable. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + The name of another job in the pipeline. + A list (array) of names of other jobs in the pipeline. + + +Example of extends: + +.tests: + script: rake test + stage: test + only: + refs: + - branches + +rspec: + extends: .tests + script: rake rspec + only: + variables: + - $RSPEC + + +In this example, the rspec job uses the configuration from the .tests template job. +When creating the pipeline, GitLab: + + + Performs a reverse deep merge based on the keys. + Merges the .tests content with the rspec job. + Doesn’t merge the values of the keys. + + +The result is this rspec job: + +rspec: + script: rake rspec + stage: test + only: + refs: + - branches + variables: + - $RSPEC + + +Additional details: + + + In GitLab 12.0 and later, you can use multiple parents for extends. + The extends keyword supports up to eleven levels of inheritance, but you should +avoid using more than three levels. + In the example above, .tests is a hidden job, +but you can extend configuration from regular jobs as well. + + +Related topics: + + + +Reuse configuration sections by using extends. + Use extends to reuse configuration from included configuration files. + + + +hooks + + + +History + + + + + +Introduced in GitLab 15.6 with a flag named ci_hooks_pre_get_sources_script. Disabled by default. + +Generally available in GitLab 15.10. Feature flag ci_hooks_pre_get_sources_script removed. + + + + + + +Use hooks to specify lists of commands to execute on the runner +at certain stages of job execution, like before retrieving the Git repository. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + A hash of hooks and their commands. Available hooks: pre_get_sources_script. + + + +hooks:pre_get_sources_script + + + +History + + + + + +Introduced in GitLab 15.6 with a flag named ci_hooks_pre_get_sources_script. Disabled by default. + +Generally available in GitLab 15.10. Feature flag ci_hooks_pre_get_sources_script removed. + + + + + + +Use hooks:pre_get_sources_script to specify a list of commands to execute on the runner +before cloning the Git repository and any submodules. +You can use it for example to: + + + Adjust the Git configuration. + Export tracing variables. + + +Possible inputs: An array including: + + + Single line commands. + Long commands split over multiple lines. + +YAML anchors. + + +CI/CD variables are supported. + +Example of hooks:pre_get_sources_script: + +job1: + hooks: + pre_get_sources_script: + - echo 'hello job1 pre_get_sources_script' + script: echo 'hello job1 script' + + +Related topics: + + + GitLab Runner configuration + + + +identity + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com +Status: Beta + + +History + + + + + +Introduced in GitLab 16.9 with a flag named google_cloud_support_feature_flag. This feature is in Beta. + + + + + + + + On GitLab.com, this feature is available for a subset of users. On GitLab Dedicated, this feature is not available. + + +This feature is in Beta. +To join the list of users testing this feature, join the waitlist. + +Use identity to authenticate with third party services using identity federation. + +Keyword type: Job keyword. You can use it only as part of a job or in the default: section. + +Possible inputs: An identifier. Supported providers: + + + +google_cloud: Google Cloud. Must be configured with the Google Cloud IAM integration. + + +Example of identity: + +job_with_workload_identity: + identity: google_cloud + script: + - gcloud compute instances list + + +Related topics: + + + +Workload Identity Federation. + +Google Cloud IAM integration. + + + +id_tokens + + + +History + + + + + +Introduced in GitLab 15.7. + + + + + + +Use id_tokens to create JSON web tokens (JWT) to authenticate with third party services. All +JWTs created this way support OIDC authentication. The required aud sub-keyword is used to configure the aud claim for the JWT. + +Possible inputs: + + + Token names with their aud claims. aud supports: + + A single string. + An array of strings. + +CI/CD variables. + + + + +Example of id_tokens: + +job_with_id_tokens: + id_tokens: + ID_TOKEN_1: + aud: https://vault.example.com + ID_TOKEN_2: + aud: + - https://gcp.com + - https://aws.com + SIGSTORE_ID_TOKEN: + aud: sigstore + script: + - command_to_authenticate_with_vault $ID_TOKEN_1 + - command_to_authenticate_with_aws $ID_TOKEN_2 + - command_to_authenticate_with_gcp $ID_TOKEN_2 + + +Related topics: + + + +Keyless signing with Sigstore. + + + +image + + +Use image to specify a Docker image that the job runs in. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: The name of the image, including the registry path if needed, in one of these formats: + + + + (Same as using with the latest tag) + : + @ + + +CI/CD variables are supported. + +Example of image: + +default: + image: ruby:3.0 + +rspec: + script: bundle exec rspec + +rspec 2.7: + image: registry.example.com/my-group/my-project/ruby:2.7 + script: bundle exec rspec + + +In this example, the ruby:3.0 image is the default for all jobs in the pipeline. +The rspec 2.7 job does not use the default, because it overrides the default with +a job-specific image section. + +Related topics: + + + +Run your CI/CD jobs in Docker containers. + + + +image:name + + +The name of the Docker image that the job runs in. Similar to image used by itself. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: The name of the image, including the registry path if needed, in one of these formats: + + + + (Same as using with the latest tag) + : + @ + + +CI/CD variables are supported. + +Example of image:name: + +test-job: + image: + name: ""registry.example.com/my/image:latest"" + script: echo ""Hello world"" + + +Related topics: + + + +Run your CI/CD jobs in Docker containers. + + + +image:entrypoint + + +Command or script to execute as the container’s entry point. + +When the Docker container is created, the entrypoint is translated to the Docker --entrypoint option. +The syntax is similar to the Dockerfile ENTRYPOINT directive, +where each shell token is a separate string in the array. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + A string. + + +Example of image:entrypoint: + +test-job: + image: + name: super/sql:experimental + entrypoint: [""""] + script: echo ""Hello world"" + + +Related topics: + + + +Override the entrypoint of an image. + + + +image:docker + + + +History + + + + + +Introduced in GitLab 16.7. Requires GitLab Runner 16.7 or later. + +user input option introduced in GitLab 16.8. + + + + + + +Use image:docker to pass options to the Docker executor of a GitLab Runner. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + +A hash of options for the Docker executor, which can include: + + + +platform: Selects the architecture of the image to pull. When not specified, +the default is the same platform as the host runner. + +user: Specify the username or UID to use when running the container. + + +Example of image:docker: + +arm-sql-job: + script: echo ""Run sql tests"" + image: + name: super/sql:experimental + docker: + platform: arm64/v8 + user: dave + + +Additional details: + + + +image:docker:platform maps to the docker pull --platform option. + +image:docker:user maps to the docker run --user option. + + + +image:pull_policy + + + +History + + + + + +Introduced in GitLab 15.1 with a flag named ci_docker_image_pull_policy. Disabled by default. + +Enabled on GitLab.com and self-managed in GitLab 15.2. + +Generally available in GitLab 15.4. Feature flag ci_docker_image_pull_policy removed. + Requires GitLab Runner 15.1 or later. + + + + + + +The pull policy that the runner uses to fetch the Docker image. + +Keyword type: Job keyword. You can use it only as part of a job or in the default section. + +Possible inputs: + + + A single pull policy, or multiple pull policies in an array. +Can be always, if-not-present, or never. + + +Examples of image:pull_policy: + +job1: + script: echo ""A single pull policy."" + image: + name: ruby:3.0 + pull_policy: if-not-present + +job2: + script: echo ""Multiple pull policies."" + image: + name: ruby:3.0 + pull_policy: [always, if-not-present] + + +Additional details: + + + If the runner does not support the defined pull policy, the job fails with an error similar to: +ERROR: Job failed (system failure): the configured PullPolicies ([always]) are not allowed by AllowedPullPolicies ([never]). + + +Related topics: + + + +Run your CI/CD jobs in Docker containers. + +Configure how runners pull images. + +Set multiple pull policies. + + + +inherit + + + +History + + + + + +Introduced in GitLab 12.9. + + + + + + +Use inherit to control inheritance of default keywords and variables. + + +inherit:default + + +Use inherit:default to control the inheritance of default keywords. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +true (default) or false to enable or disable the inheritance of all default keywords. + A list of specific default keywords to inherit. + + +Example of inherit:default: + +default: + retry: 2 + image: ruby:3.0 + interruptible: true + +job1: + script: echo ""This job does not inherit any default keywords."" + inherit: + default: false + +job2: + script: echo ""This job inherits only the two listed default keywords. It does not inherit 'interruptible'."" + inherit: + default: + - retry + - image + + +Additional details: + + + You can also list default keywords to inherit on one line: default: [keyword1, keyword2] + + + + +inherit:variables + + +Use inherit:variables to control the inheritance of global variables keywords. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +true (default) or false to enable or disable the inheritance of all global variables. + A list of specific variables to inherit. + + +Example of inherit:variables: + +variables: + VARIABLE1: ""This is variable 1"" + VARIABLE2: ""This is variable 2"" + VARIABLE3: ""This is variable 3"" + +job1: + script: echo ""This job does not inherit any global variables."" + inherit: + variables: false + +job2: + script: echo ""This job inherits only the two listed global variables. It does not inherit 'VARIABLE3'."" + inherit: + variables: + - VARIABLE1 + - VARIABLE2 + + +Additional details: + + + You can also list global variables to inherit on one line: variables: [VARIABLE1, VARIABLE2] + + + + +interruptible + + + +History + + + + + +Introduced in GitLab 12.3. + Support for trigger jobs introduced in GitLab 16.8. + + + + + + +Use interruptible to configure the auto-cancel redundant pipelines +feature to cancel a job before it completes if a new pipeline on the same ref starts for a newer commit. If the feature +is disabled, the keyword has no effect. + +Running jobs are only cancelled when the jobs are configured with interruptible: true and: + + + No jobs configured with interruptible: false have started at any time. +After a job with interruptible: false starts, the entire pipeline is no longer +considered interruptible. + + If the pipeline triggered a downstream pipeline, but no job with interruptible: false +in the downstream pipeline has started yet, the downstream pipeline is also cancelled. + + + The new pipeline is for a commit with new changes. The Auto-cancel redundant pipelines +feature has no effect if you select Run pipeline in the UI to run a pipeline for the same commit. + + +A job that has not started yet is always considered interruptible: true, regardless of the job’s configuration. +The interruptible configuration is only considered after the job starts. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + +true or false (default). + + +Example of interruptible: + +stages: + - stage1 + - stage2 + - stage3 + +step-1: + stage: stage1 + script: + - echo ""Can be canceled."" + interruptible: true + +step-2: + stage: stage2 + script: + - echo ""Can not be canceled."" + +step-3: + stage: stage3 + script: + - echo ""Because step-2 can not be canceled, this step can never be canceled, even though it's set as interruptible."" + interruptible: true + + +In this example, a new pipeline causes a running pipeline to be: + + + Canceled, if only step-1 is running or pending. + Not canceled, after step-2 starts. + + +Additional details: + + + Only set interruptible: true if the job can be safely canceled after it has started, +like a build job. Deployment jobs usually shouldn’t be cancelled, to prevent partial deployments. + You can add an optional manual job with interruptible: false in the first stage of +a pipeline to allow users to manually prevent a pipeline from being automatically +cancelled. After a user starts the job, the pipeline cannot be canceled by the +Auto-cancel redundant pipelines feature. + When using interruptible with a trigger job: + + The triggered downstream pipeline is never affected by the trigger job’s interruptible configuration. + If workflow:auto_cancel is set to conservative, +the trigger job’s interruptible configuration has no effect. + If workflow:auto_cancel is set to interruptible, +a trigger job with interruptible: true can be automatically cancelled. + + + + + +needs + + + +History + + + + + +Introduced in GitLab 12.2. + In GitLab 12.3, maximum number of jobs in needs array raised from five to 50. + +Introduced in GitLab 12.8, needs: [] lets jobs start immediately. + +Introduced in GitLab 14.2, you can refer to jobs in the same stage as the job you are configuring. + + + + + + +Use needs to execute jobs out-of-order. Relationships between jobs +that use needs can be visualized as a directed acyclic graph. + +You can ignore stage ordering and run some jobs without waiting for others to complete. +Jobs in multiple stages can run concurrently. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + An array of jobs. + An empty array ([]), to set the job to start as soon as the pipeline is created. + + +Example of needs: + +linux:build: + stage: build + script: echo ""Building linux..."" + +mac:build: + stage: build + script: echo ""Building mac..."" + +lint: + stage: test + needs: [] + script: echo ""Linting..."" + +linux:rspec: + stage: test + needs: [""linux:build""] + script: echo ""Running rspec on linux..."" + +mac:rspec: + stage: test + needs: [""mac:build""] + script: echo ""Running rspec on mac..."" + +production: + stage: deploy + script: echo ""Running production..."" + environment: production + + +This example creates four paths of execution: + + + Linter: The lint job runs immediately without waiting for the build stage +to complete because it has no needs (needs: []). + Linux path: The linux:rspec job runs as soon as the linux:build +job finishes, without waiting for mac:build to finish. + macOS path: The mac:rspec jobs runs as soon as the mac:build +job finishes, without waiting for linux:build to finish. + The production job runs as soon as all previous jobs finish: +linux:build, linux:rspec, mac:build, mac:rspec. + + +Additional details: + + + The maximum number of jobs that a single job can have in the needs array is limited: + + For GitLab.com, the limit is 50. For more information, see +issue 350398. + For self-managed instances, the default limit is 50. This limit can be changed. + + + If needs refers to a job that uses the parallel keyword, +it depends on all jobs created in parallel, not just one job. It also downloads +artifacts from all the parallel jobs by default. If the artifacts have the same +name, they overwrite each other and only the last one downloaded is saved. + + To have needs refer to a subset of parallelized jobs (and not all of the parallelized jobs), +use the needs:parallel:matrix keyword. + + + In GitLab 14.1 and later you +can refer to jobs in the same stage as the job you are configuring. This feature is +enabled on GitLab.com and ready for production use. On self-managed GitLab 14.2 and later +this feature is available by default. + In GitLab 14.0 and earlier, you can only refer to jobs in earlier stages. Stages must be +explicitly defined for all jobs that use the needs keyword, or are referenced +in a job’s needs section. + If needs refers to a job that might not be added to +a pipeline because of only, except, or rules, the pipeline might fail to create. Use the needs:optional keyword to resolve a failed pipeline creation. + If a pipeline has jobs with needs: [] and jobs in the .pre stage, they will +all start as soon as the pipeline is created. Jobs with needs: [] start immediately, +and jobs in the .pre stage also start immediately. + + + +needs:artifacts + + + +History + + + + + +Introduced in GitLab 12.6. + + + + + + +When a job uses needs, it no longer downloads all artifacts from previous stages +by default, because jobs with needs can start before earlier stages complete. With +needs you can only download artifacts from the jobs listed in the needs configuration. + +Use artifacts: true (default) or artifacts: false to control when artifacts are +downloaded in jobs that use needs. + +Keyword type: Job keyword. You can use it only as part of a job. Must be used with needs:job. + +Possible inputs: + + + +true (default) or false. + + +Example of needs:artifacts: + +test-job1: + stage: test + needs: + - job: build_job1 + artifacts: true + +test-job2: + stage: test + needs: + - job: build_job2 + artifacts: false + +test-job3: + needs: + - job: build_job1 + artifacts: true + - job: build_job2 + - build_job3 + + +In this example: + + + The test-job1 job downloads the build_job1 artifacts + The test-job2 job does not download the build_job2 artifacts. + The test-job3 job downloads the artifacts from all three build_jobs, because +artifacts is true, or defaults to true, for all three needed jobs. + + +Additional details: + + + You should not combine needs with dependencies in the same job. + + + +needs:project + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 12.7. + + + + + + +Use needs:project to download artifacts from up to five jobs in other pipelines. +The artifacts are downloaded from the latest successful specified job for the specified ref. +To specify multiple jobs, add each as separate array items under the needs keyword. + +If there is a pipeline running for the ref, a job with needs:project +does not wait for the pipeline to complete. Instead, the artifacts are downloaded +from the latest successful run of the specified job. + +needs:project must be used with job, ref, and artifacts. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +needs:project: A full project path, including namespace and group. + +job: The job to download artifacts from. + +ref: The ref to download artifacts from. + +artifacts: Must be true to download artifacts. + + +Examples of needs:project: + +build_job: + stage: build + script: + - ls -lhR + needs: + - project: namespace/group/project-name + job: build-1 + ref: main + artifacts: true + - project: namespace/group/project-name-2 + job: build-2 + ref: main + artifacts: true + + +In this example, build_job downloads the artifacts from the latest successful build-1 and build-2 jobs +on the main branches in the group/project-name and group/project-name-2 projects. + +In GitLab 13.3 and later, you can use CI/CD variables +in needs:project, for example: + +build_job: + stage: build + script: + - ls -lhR + needs: + - project: $CI_PROJECT_PATH + job: $DEPENDENCY_JOB_NAME + ref: $ARTIFACTS_DOWNLOAD_REF + artifacts: true + + +Additional details: + + + To download artifacts from a different pipeline in the current project, set project +to be the same as the current project, but use a different ref than the current pipeline. +Concurrent pipelines running on the same ref could override the artifacts. + The user running the pipeline must have at least the Reporter role for the group or project, +or the group/project must have public visibility. + You can’t use needs:project in the same job as trigger. + When using needs:project to download artifacts from another pipeline, the job does not wait for +the needed job to complete. Directed acyclic graph +behavior is limited to jobs in the same pipeline. Make sure that the needed job in the other +pipeline completes before the job that needs it tries to download the artifacts. + You can’t download artifacts from jobs that run in parallel. + Support for CI/CD variables in project, job, and ref was +introduced in GitLab 13.3. +Feature flag removed in GitLab 13.4. + + +Related topics: + + + To download artifacts between parent-child pipelines, +use needs:pipeline:job. + + + +needs:pipeline:job + + + +History + + + + + +Introduced in GitLab 13.7. + + + + + + +A child pipeline can download artifacts from a job in +its parent pipeline or another child pipeline in the same parent-child pipeline hierarchy. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +needs:pipeline: A pipeline ID. Must be a pipeline present in the same parent-child pipeline hierarchy. + +job: The job to download artifacts from. + + +Example of needs:pipeline:job: + + + + Parent pipeline (.gitlab-ci.yml): + + +create-artifact: + stage: build + script: echo ""sample artifact"" > artifact.txt + artifacts: + paths: [artifact.txt] + +child-pipeline: + stage: test + trigger: + include: child.yml + strategy: depend + variables: + PARENT_PIPELINE_ID: $CI_PIPELINE_ID + + + + Child pipeline (child.yml): + + +use-artifact: + script: cat artifact.txt + needs: + - pipeline: $PARENT_PIPELINE_ID + job: create-artifact + + + + +In this example, the create-artifact job in the parent pipeline creates some artifacts. +The child-pipeline job triggers a child pipeline, and passes the CI_PIPELINE_ID +variable to the child pipeline as a new PARENT_PIPELINE_ID variable. The child pipeline +can use that variable in needs:pipeline to download artifacts from the parent pipeline. + +Additional details: + + + The pipeline attribute does not accept the current pipeline ID ($CI_PIPELINE_ID). +To download artifacts from a job in the current pipeline, use needs:artifacts. + + + +needs:optional + + + +History + + + + + +Introduced in GitLab 13.10. + +Feature flag removed in GitLab 14.0. + + + + + + +To need a job that sometimes does not exist in the pipeline, add optional: true +to the needs configuration. If not defined, optional: false is the default. + +Jobs that use rules, only, or except and that are added with include +might not always be added to a pipeline. GitLab checks the needs relationships before starting a pipeline: + + + If the needs entry has optional: true and the needed job is present in the pipeline, +the job waits for it to complete before starting. + If the needed job is not present, the job can start when all other needs requirements are met. + If the needs section contains only optional jobs, and none are added to the pipeline, +the job starts immediately (the same as an empty needs entry: needs: []). + If a needed job has optional: false, but it was not added to the pipeline, the +pipeline fails to start with an error similar to: 'job1' job needs 'job2' job, but it was not added to the pipeline. + + +Keyword type: Job keyword. You can use it only as part of a job. + +Example of needs:optional: + +build-job: + stage: build + +test-job1: + stage: test + +test-job2: + stage: test + rules: + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + +deploy-job: + stage: deploy + needs: + - job: test-job2 + optional: true + - job: test-job1 + environment: production + +review-job: + stage: deploy + needs: + - job: test-job2 + optional: true + environment: review + + +In this example: + + + +build-job, test-job1, and test-job2 start in stage order. + When the branch is the default branch, test-job2 is added to the pipeline, so: + + +deploy-job waits for both test-job1 and test-job2 to complete. + +review-job waits for test-job2 to complete. + + + When the branch is not the default branch, test-job2 is not added to the pipeline, so: + + +deploy-job waits for only test-job1 to complete, and does not wait for the missing test-job2. + +review-job has no other needed jobs and starts immediately (at the same time as build-job), +like needs: []. + + + + + +needs:pipeline + + +You can mirror the pipeline status from an upstream pipeline to a job by +using the needs:pipeline keyword. The latest pipeline status from the default branch is +replicated to the job. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + A full project path, including namespace and group. If the +project is in the same group or namespace, you can omit them from the project +keyword. For example: project: group/project-name or project: project-name. + + +Example of needs:pipeline: + +upstream_status: + stage: test + needs: + pipeline: other/project + + +Additional details: + + + If you add the job keyword to needs:pipeline, the job no longer mirrors the +pipeline status. The behavior changes to needs:pipeline:job. + + + +needs:parallel:matrix + + + +History + + + + + +Introduced in GitLab 16.3. + + + + + + +Jobs can use parallel:matrix to run a job multiple times in parallel in a single pipeline, +but with different variable values for each instance of the job. + +Use needs:parallel:matrix to execute jobs out-of-order depending on parallelized jobs. + +Keyword type: Job keyword. You can use it only as part of a job. Must be used with needs:job. + +Possible inputs: An array of hashes of variables: + + + The variables and values must be selected from the variables and values defined in the parallel:matrix job. + + +Example of needs:parallel:matrix: + +linux:build: + stage: build + script: echo ""Building linux..."" + parallel: + matrix: + - PROVIDER: aws + STACK: + - monitoring + - app1 + - app2 + +linux:rspec: + stage: test + needs: + - job: linux:build + parallel: + matrix: + - PROVIDER: aws + STACK: app1 + script: echo ""Running rspec on linux..."" + + +The above example generates the following jobs: + +linux:build: [aws, monitoring] +linux:build: [aws, app1] +linux:build: [aws, app2] +linux:rspec + + +The linux:rspec job runs as soon as the linux:build: [aws, app1] job finishes. + +Related topics: + + + +Specify a parallelized job using needs with multiple parallelized jobs. + + +Additional details: + + + + The order of the matrix variables in needs:parallel:matrix must match the order +of the matrix variables in the needed job. For example, reversing the order of +the variables in the linux:rspec job in the earlier example above would be invalid: + + +linux:rspec: + stage: test + needs: + - job: linux:build + parallel: + matrix: + - STACK: app1 # The variable order does not match `linux:build` and is invalid. + PROVIDER: aws + script: echo ""Running rspec on linux..."" + + + + + +pages + + +Use pages to define a GitLab Pages job that +uploads static content to GitLab. The content is then published as a website. + +You must: + + + Define artifacts with a path to the content directory, which is +public by default. + Use publish if want to use a different content directory. + + +Keyword type: Job name. + +Example of pages: + +pages: + stage: deploy + script: + - mv my-html-content public + artifacts: + paths: + - public + rules: + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + environment: production + + +This example renames the my-html-content/ directory to public/. +This directory is exported as an artifact and published with GitLab Pages. + + +pages:publish + + + +History + + + + + +Introduced in GitLab 16.1. + + + + + + +Use publish to configure the content directory of a pages job. + +Keyword type: Job keyword. You can use it only as part of a pages job. + +Possible inputs: A path to a directory containing the Pages content. + +Example of publish: + +pages: + stage: deploy + script: + - npx @11ty/eleventy --input=path/to/eleventy/root --output=dist + artifacts: + paths: + - dist + publish: dist + rules: + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + environment: production + + +This example uses Eleventy to generate a static website and +output the generated HTML files into a the dist/ directory. This directory is exported +as an artifact and published with GitLab Pages. + + +pages:pages.path_prefix + + + +Tier: Premium, Ultimate +Offering: Self-managed +Status: Experiment + + +History + + + + + +Introduced in GitLab 16.7 as an Experiment with a flag named pages_multiple_versions_setting, disabled by default. + + + + + + + + On self-managed GitLab, by default this feature is not available. To make it available, +an administrator can enable the feature flag named +pages_multiple_versions_setting. On GitLab.com and GitLab Dedicated, this feature is not available. This feature is not ready for production use. + + +Use pages.path_prefix to configure a path prefix for multiple deployments of GitLab Pages. + +Keyword type: Job keyword. You can use it only as part of a pages job. + +Possible inputs: + + + A string with valid URL characters. + +CI/CD variables. + A combination of both. + + +Example of pages.path_prefix: + +pages: + stage: deploy + script: + - echo ""Pages accessible through ${CI_PAGES_URL}/${CI_COMMIT_BRANCH}"" + pages: + path_prefix: ""$CI_COMMIT_BRANCH"" + artifacts: + paths: + - public + + +In this example, a different pages deployment is created for each branch. + + +parallel + + + +History + + + + + +Introduced in GitLab 15.9, the maximum value for parallel is increased from 50 to 200. + + + + + + +Use parallel to run a job multiple times in parallel in a single pipeline. + +Multiple runners must exist, or a single runner must be configured to run multiple jobs concurrently. + +Parallel jobs are named sequentially from job_name 1/N to job_name N/N. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + A numeric value from 1 to 200. + + +Example of parallel: + +test: + script: rspec + parallel: 5 + + +This example creates 5 jobs that run in parallel, named test 1/5 to test 5/5. + +Additional details: + + + Every parallel job has a CI_NODE_INDEX and CI_NODE_TOTAL +predefined CI/CD variable set. + A pipeline with jobs that use parallel might: + + Create more jobs running in parallel than available runners. Excess jobs are queued +and marked pending while waiting for an available runner. + Create too many jobs, and the pipeline fails with a job_activity_limit_exceeded error. +The maximum number of jobs that can exist in active pipelines is limited at the instance-level. + + + + +Related topics: + + + +Parallelize large jobs. + + + +parallel:matrix + + + +History + + + + + +Introduced in GitLab 13.3. + The job naming style was improved in GitLab 13.4. + +Introduced in GitLab 15.9, the maximum number of permutations is increased from 50 to 200. + + + + + + +Use parallel:matrix to run a job multiple times in parallel in a single pipeline, +but with different variable values for each instance of the job. + +Multiple runners must exist, or a single runner must be configured to run multiple jobs concurrently. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: An array of hashes of variables: + + + The variable names can use only numbers, letters, and underscores (_). + The values must be either a string, or an array of strings. + The number of permutations cannot exceed 200. + + +Example of parallel:matrix: + +deploystacks: + stage: deploy + script: + - bin/deploy + parallel: + matrix: + - PROVIDER: aws + STACK: + - monitoring + - app1 + - app2 + - PROVIDER: ovh + STACK: [monitoring, backup, app] + - PROVIDER: [gcp, vultr] + STACK: [data, processing] + environment: $PROVIDER/$STACK + + +The example generates 10 parallel deploystacks jobs, each with different values +for PROVIDER and STACK: + +deploystacks: [aws, monitoring] +deploystacks: [aws, app1] +deploystacks: [aws, app2] +deploystacks: [ovh, monitoring] +deploystacks: [ovh, backup] +deploystacks: [ovh, app] +deploystacks: [gcp, data] +deploystacks: [gcp, processing] +deploystacks: [vultr, data] +deploystacks: [vultr, processing] + + +Additional details: + + + +parallel:matrix jobs add the variable values to the job names to differentiate +the jobs from each other, but large values can cause names to exceed limits: + + Job names must be 255 characters or fewer. + When using needs, job names must be 128 characters or fewer. + + + + +Related topics: + + + +Run a one-dimensional matrix of parallel jobs. + +Run a matrix of triggered parallel jobs. + +Select different runner tags for each parallel matrix job. + + +Additional details: + + + + You cannot create multiple matrix configurations with the same variable values but different variable names. +Job names are generated from the variable values, not the variable names, so matrix entries +with identical values generate identical job names that overwrite each other. + + For example, this test configuration would try to create two series of identical jobs, +but the OS2 versions overwrite the OS versions: + + +test: + parallel: + matrix: + - OS: [ubuntu] + PROVIDER: [aws, gcp] + - OS2: [ubuntu] + PROVIDER: [aws, gcp] + + + + + +release + + + +History + + + + + +Introduced in GitLab 13.2. + + + + + + +Use release to create a release. + +The release job must have access to the release-cli, +which must be in the $PATH. + +If you use the Docker executor, +you can use this image from the GitLab container registry: registry.gitlab.com/gitlab-org/release-cli:latest + +If you use the Shell executor or similar, +install release-cli on the server where the runner is registered. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: The release subkeys: + + + tag_name + +tag_message (optional) + +name (optional) + description + +ref (optional) + +milestones (optional) + +released_at (optional) + +assets:links (optional) + + +Example of release keyword: + +release_job: + stage: release + image: registry.gitlab.com/gitlab-org/release-cli:latest + rules: + - if: $CI_COMMIT_TAG # Run this job when a tag is created manually + script: + - echo ""Running the release job."" + release: + tag_name: $CI_COMMIT_TAG + name: 'Release $CI_COMMIT_TAG' + description: 'Release created using the release-cli.' + + +This example creates a release: + + + When you push a Git tag. + When you add a Git tag in the UI at Code > Tags. + + +Additional details: + + + + All release jobs, except trigger jobs, must include the script keyword. A release +job can use the output from script commands. If you don’t need the script, you can use a placeholder: + + +script: + - echo ""release job"" + + + An issue exists to remove this requirement. + + The release section executes after the script keyword and before the after_script. + A release is created only if the job’s main script succeeds. + If the release already exists, it is not updated and the job with the release keyword fails. + + +Related topics: + + + +CI/CD example of the release keyword. + +Create multiple releases in a single pipeline. + +Use a custom SSL CA certificate authority. + + + +release:tag_name + + +Required. The Git tag for the release. + +If the tag does not exist in the project yet, it is created at the same time as the release. +New tags use the SHA associated with the pipeline. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + A tag name. + + +CI/CD variables are supported. + +Example of release:tag_name: + +To create a release when a new tag is added to the project: + + + Use the $CI_COMMIT_TAG CI/CD variable as the tag_name. + Use rules:if to configure the job to run only for new tags. + + +job: + script: echo ""Running the release job for the new tag."" + release: + tag_name: $CI_COMMIT_TAG + description: 'Release description' + rules: + - if: $CI_COMMIT_TAG + + +To create a release and a new tag at the same time, your rules +should not configure the job to run only for new tags. A semantic versioning example: + +job: + script: echo ""Running the release job and creating a new tag."" + release: + tag_name: ${MAJOR}_${MINOR}_${REVISION} + description: 'Release description' + rules: + - if: $CI_PIPELINE_SOURCE == ""schedule"" + + + +release:tag_message + + + +History + + + + + +Introduced in GitLab 15.3. Supported by release-cli v0.12.0 or later. + + + + + + +If the tag does not exist, the newly created tag is annotated with the message specified by tag_message. +If omitted, a lightweight tag is created. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + A text string. + + +Example of release:tag_message: + + release_job: + stage: release + release: + tag_name: $CI_COMMIT_TAG + description: 'Release description' + tag_message: 'Annotated tag message' + + + +release:name + + +The release name. If omitted, it is populated with the value of release: tag_name. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + A text string. + + +Example of release:name: + + release_job: + stage: release + release: + name: 'Release $CI_COMMIT_TAG' + + + +release:description + + +The long description of the release. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + A string with the long description. + The path to a file that contains the description. Introduced in GitLab 13.7. + + The file location must be relative to the project directory ($CI_PROJECT_DIR). + If the file is a symbolic link, it must be in the $CI_PROJECT_DIR. + The ./path/to/file and filename can’t contain spaces. + + + + +Example of release:description: + +job: + release: + tag_name: ${MAJOR}_${MINOR}_${REVISION} + description: './path/to/CHANGELOG.md' + + +Additional details: + + + The description is evaluated by the shell that runs release-cli. +You can use CI/CD variables to define the description, but some shells +use different syntax +to reference variables. Similarly, some shells might require special characters +to be escaped. For example, backticks (`) might need to be escaped with a backslash (\). + + + +release:ref + + +The ref for the release, if the release: tag_name doesn’t exist yet. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + A commit SHA, another tag name, or a branch name. + + + +release:milestones + + +The title of each milestone the release is associated with. + + +release:released_at + + +The date and time when the release is ready. + +Possible inputs: + + + A date enclosed in quotes and expressed in ISO 8601 format. + + +Example of release:released_at: + +released_at: '2021-03-15T08:00:00Z' + + +Additional details: + + + If it is not defined, the current date and time is used. + + + +release:assets:links + + + +History + + + + + +Introduced in GitLab 13.12. + + + + + + +Use release:assets:links to include asset links in the release. + +Requires release-cli version v0.4.0 or later. + +Example of release:assets:links: + +assets: + links: + - name: 'asset1' + url: 'https://example.com/assets/1' + - name: 'asset2' + url: 'https://example.com/assets/2' + filepath: '/pretty/url/1' # optional + link_type: 'other' # optional + + + +resource_group + + + +History + + + + + +Introduced in GitLab 12.7. + + + + + + +Use resource_group to create a resource group that +ensures a job is mutually exclusive across different pipelines for the same project. + +For example, if multiple jobs that belong to the same resource group are queued simultaneously, +only one of the jobs starts. The other jobs wait until the resource_group is free. + +Resource groups behave similar to semaphores in other programming languages. + +You can choose a process mode to strategically control the job concurrency for your deployment preferences. The default process mode is unordered. To change the process mode of a resource group, use the API to send a request to edit an existing resource group. + +You can define multiple resource groups per environment. For example, +when deploying to physical devices, you might have multiple physical devices. Each device +can be deployed to, but only one deployment can occur per device at any given time. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + Only letters, digits, -, _, /, $, {, }, ., and spaces. +It can’t start or end with /. CI/CD variables are supported. + + +Example of resource_group: + +deploy-to-production: + script: deploy + resource_group: production + + +In this example, two deploy-to-production jobs in two separate pipelines can never run at the same time. As a result, +you can ensure that concurrent deployments never happen to the production environment. + +Related topics: + + + +Pipeline-level concurrency control with cross-project/parent-child pipelines. + + + +retry + + +Use retry to configure how many times a job is retried if it fails. +If not defined, defaults to 0 and jobs do not retry. + +When a job fails, the job is processed up to two more times, until it succeeds or +reaches the maximum number of retries. + +By default, all failure types cause the job to be retried. Use retry:when or retry:exit_codes +to select which failures to retry on. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + +0 (default), 1, or 2. + + +Example of retry: + +test: + script: rspec + retry: 2 + +test_advanced: + script: + - echo ""Run a script that results in exit code 137."" + - exit 137 + retry: + max: 2 + when: runner_system_failure + exit_codes: 137 + + +test_advanced will be retried up to 2 times if the exit code is 137 or if it had +a runner system failure. + + +retry:when + + +Use retry:when with retry:max to retry jobs for only specific failure cases. +retry:max is the maximum number of retries, like retry, and can be +0, 1, or 2. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + A single failure type, or an array of one or more failure types: + + + + + + +always: Retry on any failure (default). + +unknown_failure: Retry when the failure reason is unknown. + +script_failure: Retry when: + + The script failed. + The runner failed to pull the Docker image. For docker, docker+machine, kubernetes executors. + + + +api_failure: Retry on API failure. + +stuck_or_timeout_failure: Retry when the job got stuck or timed out. + +runner_system_failure: Retry if there is a runner system failure (for example, job setup failed). + +runner_unsupported: Retry if the runner is unsupported. + +stale_schedule: Retry if a delayed job could not be executed. + +job_execution_timeout: Retry if the script exceeded the maximum execution time set for the job. + +archived_failure: Retry if the job is archived and can’t be run. + +unmet_prerequisites: Retry if the job failed to complete prerequisite tasks. + +scheduler_failure: Retry if the scheduler failed to assign the job to a runner. + +data_integrity_failure: Retry if there is an unknown job problem. + + +Example of retry:when (single failure type): + +test: + script: rspec + retry: + max: 2 + when: runner_system_failure + + +If there is a failure other than a runner system failure, the job is not retried. + +Example of retry:when (array of failure types): + +test: + script: rspec + retry: + max: 2 + when: + - runner_system_failure + - stuck_or_timeout_failure + + + +retry:exit_codes + + + +History + + + + + +Introduced in GitLab 16.10 with a flag named ci_retry_on_exit_codes. Disabled by default. + + + + + + + + On self-managed GitLab, by default this feature is not available. To make it available, +an administrator can enable the feature flag named ci_retry_on_exit_codes. + + +Use retry:exit_codes with retry:max to retry jobs for only specific failure cases. +retry:max is the maximum number of retries, like retry, and can be +0, 1, or 2. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + A single exit code. + An array of exit codes. + + +Example of retry:exit_codes: + +test_job_1: + script: + - echo ""Run a script that results in exit code 1. This job isn't retried."" + - exit 1 + retry: + max: 2 + exit_codes: 137 + +test_job_2: + script: + - echo ""Run a script that results in exit code 137. This job will be retried."" + - exit 137 + retry: + max: 1 + exit_codes: + - 255 + - 137 + + +Related topics: + +You can specify the number of retry attempts for certain stages of job execution +using variables. + + +rules + + + +History + + + + + +Introduced in GitLab 12.3. + + + + + + +Use rules to include or exclude jobs in pipelines. + +Rules are evaluated when the pipeline is created, and evaluated in order +until the first match. When a match is found, the job is either included or excluded from the pipeline, +depending on the configuration. + +You cannot use dotenv variables created in job scripts in rules, because rules are evaluated before any jobs run. + +rules replaces only/except and they can’t be used together +in the same job. If you configure one job to use both keywords, the GitLab returns +a key may not be used with rules error. + +rules accepts an array of rules defined with: + + + if + changes + exists + allow_failure + variables + when + + +You can combine multiple keywords together for complex rules. + +The job is added to the pipeline: + + + If an if, changes, or exists rule matches and also has when: on_success (default), +when: delayed, or when: always. + If a rule is reached that is only when: on_success, when: delayed, or when: always. + + +The job is not added to the pipeline: + + + If no rules match. + If a rule matches and has when: never. + + +You can use !reference tags to reuse rules configuration +in different jobs. + + +rules:if + + +Use rules:if clauses to specify when to add a job to a pipeline: + + + If an if statement is true, add the job to the pipeline. + If an if statement is true, but it’s combined with when: never, do not add the job to the pipeline. + If no if statements are true, do not add the job to the pipeline. + + +if clauses are evaluated based on the values of CI/CD variables +or predefined CI/CD variables, with +some exceptions. + +Keyword type: Job-specific and pipeline-specific. You can use it as part of a job +to configure the job behavior, or with workflow to configure the pipeline behavior. + +Possible inputs: + + + A CI/CD variable expression. + + +Example of rules:if: + +job: + script: echo ""Hello, Rules!"" + rules: + - if: $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME =~ /^feature/ && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME != $CI_DEFAULT_BRANCH + when: never + - if: $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME =~ /^feature/ + when: manual + allow_failure: true + - if: $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME + + +Additional details: + + + If a rule matches and has no when defined, the rule uses the when +defined for the job, which defaults to on_success if not defined. + In GitLab 14.5 and earlier, you can define when once per rule, or once at the job-level, +which applies to all rules. You can’t mix when at the job-level with when in rules. + In GitLab 14.6 and later, you can mix when at the job-level with when in rules. +when configuration in rules takes precedence over when at the job-level. + Unlike variables in script +sections, variables in rules expressions are always formatted as $VARIABLE. + + You can use rules:if with include to conditionally include other configuration files. + + + CI/CD variables on the right side of =~ and !~ expressions are evaluated as regular expressions. + + +Related topics: + + + +Common if expressions for rules. + +Avoid duplicate pipelines. + +Use rules to run merge request pipelines. + + + +rules:changes + + +Use rules:changes to specify when to add a job to a pipeline by checking for changes +to specific files. + + + caution You should use rules: changes only with branch pipelines or merge request pipelines. +You can use rules: changes with other pipeline types, but rules: changes always +evaluates to true for new branch pipelines or when there is no Git push event. Pipelines like tag pipelines, +scheduled pipelines, and manual pipelines, all do not +have a Git push event associated with them. In these cases, use rules: changes: compare_to +to specify the branch to compare against. + + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + +An array including any number of: + + + Paths to files. In GitLab 13.6 and later, file paths can include variables. +A file path array can also be in rules:changes:paths. + Wildcard paths for: + + Single directories, for example path/to/directory/*. + A directory and all its subdirectories, for example path/to/directory/**/*. + + + Wildcard glob paths for all files +with the same extension or multiple extensions, for example *.md or path/to/directory/*.{rb,py,sh}. + Wildcard paths to files in the root directory, or all directories, wrapped in double quotes. +For example ""*.json"" or ""**/*.json"". + + +Example of rules:changes: + +docker build: + script: docker build -t my-image:$CI_COMMIT_REF_SLUG . + rules: + - if: $CI_PIPELINE_SOURCE == ""merge_request_event"" + changes: + - Dockerfile + when: manual + allow_failure: true + + + + If the pipeline is a merge request pipeline, check Dockerfile for changes. + If Dockerfile has changed, add the job to the pipeline as a manual job, and the pipeline +continues running even if the job is not triggered (allow_failure: true). + A maximum of 50 patterns or file paths can be defined per rules:changes section. + If Dockerfile has not changed, do not add job to any pipeline (same as when: never). + +rules:changes:paths is the same as rules:changes without +any subkeys. + + +Additional details: + + + +rules: changes works the same way as only: changes and except: changes. + Glob patterns are interpreted with Ruby’s File.fnmatch +with the flags +File::FNM_PATHNAME | File::FNM_DOTMATCH | File::FNM_EXTGLOB. + You can use when: never to implement a rule similar to except:changes. + +changes resolves to true if any of the matching files are changed (an OR operation). + + +Related topics: + + + +Jobs or pipelines can run unexpectedly when using rules: changes. + + + +rules:changes:paths + + + +History + + + + + +Introduced in GitLab 15.2. + + + + + + +Use rules:changes to specify that a job only be added to a pipeline when specific +files are changed, and use rules:changes:paths to specify the files. + +rules:changes:paths is the same as using rules:changes without +any subkeys. All additional details and related topics are the same. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + An array of file paths. File paths can include variables. + + +Example of rules:changes:paths: + +docker-build-1: + script: docker build -t my-image:$CI_COMMIT_REF_SLUG . + rules: + - if: $CI_PIPELINE_SOURCE == ""merge_request_event"" + changes: + - Dockerfile + +docker-build-2: + script: docker build -t my-image:$CI_COMMIT_REF_SLUG . + rules: + - if: $CI_PIPELINE_SOURCE == ""merge_request_event"" + changes: + paths: + - Dockerfile + + +In this example, both jobs have the same behavior. + + +rules:changes:compare_to + + + +History + + + + + +Introduced in GitLab 15.3 with a flag named ci_rules_changes_compare. Enabled by default. + +Generally available in GitLab 15.5. Feature flag ci_rules_changes_compare removed. + + + + + + +Use rules:changes:compare_to to specify which ref to compare against for changes to the files +listed under rules:changes:paths. + +Keyword type: Job keyword. You can use it only as part of a job, and it must be combined with rules:changes:paths. + +Possible inputs: + + + A branch name, like main, branch1, or refs/heads/branch1. + A tag name, like tag1 or refs/tags/tag1. + A commit SHA, like 2fg31ga14b. + + +Example of rules:changes:compare_to: + +docker build: + script: docker build -t my-image:$CI_COMMIT_REF_SLUG . + rules: + - if: $CI_PIPELINE_SOURCE == ""merge_request_event"" + changes: + paths: + - Dockerfile + compare_to: 'refs/heads/branch1' + + +In this example, the docker build job is only included when the Dockerfile has changed +relative to refs/heads/branch1 and the pipeline source is a merge request event. + + +rules:exists + + + +History + + + + + +Introduced in GitLab 12.4. + CI/CD variable support introduced in GitLab 15.6. + + + + + + +Use exists to run a job when certain files exist in the repository. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + An array of file paths. Paths are relative to the project directory ($CI_PROJECT_DIR) and can’t directly link outside it. File paths can use glob patterns and CI/CD variables. + + +Example of rules:exists: + +job: + script: docker build -t my-image:$CI_COMMIT_REF_SLUG . + rules: + - exists: + - Dockerfile + + +job runs if a Dockerfile exists anywhere in the repository. + +Additional details: + + + Glob patterns are interpreted with Ruby’s File.fnmatch +with the flags +File::FNM_PATHNAME | File::FNM_DOTMATCH | File::FNM_EXTGLOB. + For performance reasons, GitLab performs a maximum of 10,000 checks against +exists patterns or file paths. After the 10,000th check, rules with patterned +globs always match. In other words, the exists rule always assumes a match in +projects with more than 10,000 files, or if there are fewer than 10,000 files but +the exists rules are checked more than 10,000 times. + A maximum of 50 patterns or file paths can be defined per rules:exists section. + +exists resolves to true if any of the listed files are found (an OR operation). + + + +rules:allow_failure + + + +History + + + + + +Introduced in GitLab 12.8. + + + + + + +Use allow_failure: true in rules to allow a job to fail +without stopping the pipeline. + +You can also use allow_failure: true with a manual job. The pipeline continues +running without waiting for the result of the manual job. allow_failure: false +combined with when: manual in rules causes the pipeline to wait for the manual +job to run before continuing. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +true or false. Defaults to false if not defined. + + +Example of rules:allow_failure: + +job: + script: echo ""Hello, Rules!"" + rules: + - if: $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH + when: manual + allow_failure: true + + +If the rule matches, then the job is a manual job with allow_failure: true. + +Additional details: + + + The rule-level rules:allow_failure overrides the job-level allow_failure, +and only applies when the specific rule triggers the job. + + + +rules:needs + + + +History + + + + + +Introduced in GitLab 16.0 with a flag named introduce_rules_with_needs. Disabled by default. + +Generally available in GitLab 16.2. Feature flag introduce_rules_with_needs removed. + + + + + + +Use needs in rules to update a job’s needs for specific conditions. When a condition matches a rule, the job’s needs configuration is completely replaced with the needs in the rule. + +Keyword type: Job-specific. You can use it only as part of a job. + +Possible inputs: + + + An array of job names as strings. + A hash with a job name, optionally with additional attributes. + An empty array ([]), to set the job needs to none when the specific condition is met. + + +Example of rules:needs: + +build-dev: + stage: build + rules: + - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH + script: echo ""Feature branch, so building dev version..."" + +build-prod: + stage: build + rules: + - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH + script: echo ""Default branch, so building prod version..."" + +specs: + stage: test + needs: ['build-dev'] + rules: + - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH + needs: ['build-prod'] + - when: on_success # Run the job in other cases + script: echo ""Running dev specs by default, or prod specs when default branch..."" + + +In this example: + + + If the pipeline runs on a branch that is not the default branch, the specs job needs the build-dev job (default behavior). + If the pipeline runs on the default branch, and therefore the rule matches the condition, the specs job needs the build-prod job instead. + + +Additional details: + + + +needs in rules override any needs defined at the job-level. When overridden, the behavior is same as job-level needs. + +needs in rules can accept artifacts and optional. + + + +rules:variables + + + +History + + + + + +Introduced in GitLab 13.7. + +Feature flag removed in GitLab 13.10. + + + + + + +Use variables in rules to define variables for specific conditions. + +Keyword type: Job-specific. You can use it only as part of a job. + +Possible inputs: + + + A hash of variables in the format VARIABLE-NAME: value. + + +Example of rules:variables: + +job: + variables: + DEPLOY_VARIABLE: ""default-deploy"" + rules: + - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH + variables: # Override DEPLOY_VARIABLE defined + DEPLOY_VARIABLE: ""deploy-production"" # at the job level. + - if: $CI_COMMIT_REF_NAME =~ /feature/ + variables: + IS_A_FEATURE: ""true"" # Define a new variable. + script: + - echo ""Run script with $DEPLOY_VARIABLE as an argument"" + - echo ""Run another script if $IS_A_FEATURE exists"" + + + +rules:interruptible + + + +History + + + + + +Introduced in GitLab 16.10. + + + + + + +Use interruptible in rules to update a job’s interruptible value for specific conditions. + +Keyword type: Job-specific. You can use it only as part of a job. + +Possible inputs: + + + +true or false. + + +Example of rules:interruptible: + +job: + script: echo ""Hello, Rules!"" + interruptible: true + rules: + - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH + interruptible: false # Override interruptible defined at the job level. + - when: on_success + + +Additional details: + + + The rule-level rules:interruptible overrides the job-level interruptible, +and only applies when the specific rule triggers the job. + + + +script + + +Use script to specify commands for the runner to execute. + +All jobs except trigger jobs require a script keyword. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: An array including: + + + Single line commands. + Long commands split over multiple lines. + +YAML anchors. + + +CI/CD variables are supported. + +Example of script: + +job1: + script: ""bundle exec rspec"" + +job2: + script: + - uname -a + - bundle exec rspec + + +Additional details: + + + When you use these special characters in script, you must use single quotes (') or double quotes (""). + + +Related topics: + + + You can ignore non-zero exit codes. + +Use color codes with script +to make job logs easier to review. + +Create custom collapsible sections +to simplify job log output. + + + +secrets + + + +Tier: Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + + +History + + + + + +Introduced in GitLab 13.4. + + + + + + +Use secrets to specify CI/CD secrets to: + + + Retrieve from an external secrets provider. + Make available in the job as CI/CD variables +(file type by default). + + + +secrets:vault + + + +History + + + + + +Introduced in GitLab 13.4 and GitLab Runner 13.4. + + + + + + +Use secrets:vault to specify secrets provided by a HashiCorp Vault. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +engine:name: Name of the secrets engine. + +engine:path: Path to the secrets engine. + +path: Path to the secret. + +field: Name of the field where the password is stored. + + +Example of secrets:vault: + +To specify all details explicitly and use the KV-V2 secrets engine: + +job: + secrets: + DATABASE_PASSWORD: # Store the path to the secret in this CI/CD variable + vault: # Translates to secret: `ops/data/production/db`, field: `password` + engine: + name: kv-v2 + path: ops + path: production/db + field: password + + +You can shorten this syntax. With the short syntax, engine:name and engine:path +both default to kv-v2: + +job: + secrets: + DATABASE_PASSWORD: # Store the path to the secret in this CI/CD variable + vault: production/db/password # Translates to secret: `kv-v2/data/production/db`, field: `password` + + +To specify a custom secrets engine path in the short syntax, add a suffix that starts with @: + +job: + secrets: + DATABASE_PASSWORD: # Store the path to the secret in this CI/CD variable + vault: production/db/password@ops # Translates to secret: `ops/data/production/db`, field: `password` + + + +secrets:gcp_secret_manager + + + +History + + + + + +Introduced in GitLab 16.8 and GitLab Runner 16.8. + + + + + + +Use secrets:gcp_secret_manager to specify secrets provided by GCP Secret Manager. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +name: Name of the secret. + +version: Version of the secret. + + +Example of secrets:gcp_secret_manager: + +job: + secrets: + DATABASE_PASSWORD: + gcp_secret_manager: + name: 'test' + version: 2 + + +Related topics: + + + +Use GCP Secret Manager secrets in GitLab CI/CD. + + + +secrets:azure_key_vault + + + +History + + + + + +Introduced in GitLab 16.3 and GitLab Runner 16.3. + + + + + + +Use secrets:azure_key_vault to specify secrets provided by a Azure Key Vault. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +name: Name of the secret. + +version: Version of the secret. + + +Example of secrets:azure_key_vault: + +job: + secrets: + DATABASE_PASSWORD: + azure_key_vault: + name: 'test' + version: 'test' + + +Related topics: + + + +Use Azure Key Vault secrets in GitLab CI/CD. + + + +secrets:file + + + +History + + + + + +Introduced in GitLab 14.1 and GitLab Runner 14.1. + + + + + + +Use secrets:file to configure the secret to be stored as either a +file or variable type CI/CD variable + +By default, the secret is passed to the job as a file type CI/CD variable. The value +of the secret is stored in the file and the variable contains the path to the file. + +If your software can’t use file type CI/CD variables, set file: false to store +the secret value directly in the variable. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + +true (default) or false. + + +Example of secrets:file: + +job: + secrets: + DATABASE_PASSWORD: + vault: production/db/password@ops + file: false + + +Additional details: + + + The file keyword is a setting for the CI/CD variable and must be nested under +the CI/CD variable name, not in the vault section. + + + +secrets:token + + + +History + + + + + +Introduced in GitLab 15.8, controlled by the Limit JSON Web Token (JWT) access setting. + +Made always available and Limit JSON Web Token (JWT) access setting removed in GitLab 16.0. + + + + + + +Use secrets:token to explicitly select a token to use when authenticating with Vault by referencing the token’s CI/CD variable. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + The name of an ID token + + +Example of secrets:token: + +job: + id_tokens: + AWS_TOKEN: + aud: https://aws.example.com + VAULT_TOKEN: + aud: https://vault.example.com + secrets: + DB_PASSWORD: + vault: gitlab/production/db + token: $VAULT_TOKEN + + +Additional details: + + + When the token keyword is not set, the first ID token is used to authenticate. + + + +services + + +Use services to specify any additional Docker images that your scripts require to run successfully. The services image is linked +to the image specified in the image keyword. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: The name of the services image, including the registry path if needed, in one of these formats: + + + + (Same as using with the latest tag) + : + @ + + +CI/CD variables are supported, but not for alias. + +Example of services: + +default: + image: + name: ruby:2.6 + entrypoint: [""/bin/bash""] + + services: + - name: my-postgres:11.7 + alias: db-postgres + entrypoint: [""/usr/local/bin/db-postgres""] + command: [""start""] + + before_script: + - bundle install + +test: + script: + - bundle exec rake spec + + +In this example, GitLab launches two containers for the job: + + + A Ruby container that runs the script commands. + A PostgreSQL container. The script commands in the Ruby container can connect to +the PostgreSQL database at the db-postgrest hostname. + + +Related topics: + + + +Available settings for services. + +Define services in the .gitlab-ci.yml file. + +Run your CI/CD jobs in Docker containers. + +Use Docker to build Docker images. + + + +services:docker + + + +History + + + + + +Introduced in GitLab 16.7. Requires GitLab Runner 16.7 or later. + +user input option introduced in GitLab 16.8. + + + + + + +Use services:docker to pass options to the Docker executor of a GitLab Runner. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + +A hash of options for the Docker executor, which can include: + + + +platform: Selects the architecture of the image to pull. When not specified, +the default is the same platform as the host runner. + +user: Specify the username or UID to use when running the container. + + +Example of services:docker: + +arm-sql-job: + script: echo ""Run sql tests in service container"" + image: ruby:2.6 + services: + - name: super/sql:experimental + docker: + platform: arm64/v8 + user: dave + + +Additional details: + + + +services:docker:platform maps to the docker pull --platform option. + +services:docker:user maps to the docker run --user option. + + + +services:pull_policy + + + +History + + + + + +Introduced in GitLab 15.1 with a flag named ci_docker_image_pull_policy. Disabled by default. + +Enabled on GitLab.com and self-managed in GitLab 15.2. + +Generally available in GitLab 15.4. Feature flag ci_docker_image_pull_policy removed. + Requires GitLab Runner 15.1 or later. + + + + + + +The pull policy that the runner uses to fetch the Docker image. + +Keyword type: Job keyword. You can use it only as part of a job or in the default section. + +Possible inputs: + + + A single pull policy, or multiple pull policies in an array. +Can be always, if-not-present, or never. + + +Examples of services:pull_policy: + +job1: + script: echo ""A single pull policy."" + services: + - name: postgres:11.6 + pull_policy: if-not-present + +job2: + script: echo ""Multiple pull policies."" + services: + - name: postgres:11.6 + pull_policy: [always, if-not-present] + + +Additional details: + + + If the runner does not support the defined pull policy, the job fails with an error similar to: +ERROR: Job failed (system failure): the configured PullPolicies ([always]) are not allowed by AllowedPullPolicies ([never]). + + +Related topics: + + + +Run your CI/CD jobs in Docker containers. + +Configure how runners pull images. + +Set multiple pull policies. + + + +stage + + +Use stage to define which stage a job runs in. Jobs in the same +stage can execute in parallel (see Additional details). + +If stage is not defined, the job uses the test stage by default. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: A string, which can be a: + + + +Default stage. + User-defined stages. + + +Example of stage: + +stages: + - build + - test + - deploy + +job1: + stage: build + script: + - echo ""This job compiles code."" + +job2: + stage: test + script: + - echo ""This job tests the compiled code. It runs when the build stage completes."" + +job3: + script: + - echo ""This job also runs in the test stage"". + +job4: + stage: deploy + script: + - echo ""This job deploys the code. It runs when the test stage completes."" + environment: production + + +Additional details: + + + Jobs can run in parallel if they run on different runners. + If you have only one runner, jobs can run in parallel if the runner’s +concurrent setting +is greater than 1. + + + +stage: .pre + + + +History + + + + + +Introduced in GitLab 12.4. + + + + + + +Use the .pre stage to make a job run at the start of a pipeline. .pre is +always the first stage in a pipeline. User-defined stages execute after .pre. +You do not have to define .pre in stages. + +If a pipeline contains only jobs in the .pre or .post stages, it does not run. +There must be at least one other job in a different stage. + +Keyword type: You can only use it with a job’s stage keyword. + +Example of stage: .pre: + +stages: + - build + - test + +job1: + stage: build + script: + - echo ""This job runs in the build stage."" + +first-job: + stage: .pre + script: + - echo ""This job runs in the .pre stage, before all other stages."" + +job2: + stage: test + script: + - echo ""This job runs in the test stage."" + + + +stage: .post + + + +History + + + + + +Introduced in GitLab 12.4. + + + + + + +Use the .post stage to make a job run at the end of a pipeline. .post +is always the last stage in a pipeline. User-defined stages execute before .post. +You do not have to define .post in stages. + +If a pipeline contains only jobs in the .pre or .post stages, it does not run. +There must be at least one other job in a different stage. + +Keyword type: You can only use it with a job’s stage keyword. + +Example of stage: .post: + +stages: + - build + - test + +job1: + stage: build + script: + - echo ""This job runs in the build stage."" + +last-job: + stage: .post + script: + - echo ""This job runs in the .post stage, after all other stages."" + +job2: + stage: test + script: + - echo ""This job runs in the test stage."" + + +Additional details: + + + If a pipeline has jobs with needs: [] and jobs in the .pre stage, they will +all start as soon as the pipeline is created. Jobs with needs: [] start immediately, +ignoring any stage configuration. + + + +tags + + + +History + + + + + A limit of 50 tags per job enabled on GitLab.com in GitLab 14.3. + A limit of 50 tags per job enabled on self-managed in GitLab 14.3. + + + + + + +Use tags to select a specific runner from the list of all runners that are +available for the project. + +When you register a runner, you can specify the runner’s tags, for +example ruby, postgres, or development. To pick up and run a job, a runner must +be assigned every tag listed in the job. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: + + + An array of tag names. + CI/CD variables are supported +in GitLab 14.1 and later. + + +Example of tags: + +job: + tags: + - ruby + - postgres + + +In this example, only runners with both the ruby and postgres tags can run the job. + +Additional details: + + + In GitLab 14.3 and later, +the number of tags must be less than 50. + + +Related topics: + + + +Use tags to control which jobs a runner can run. + +Select different runner tags for each parallel matrix job. + + + +timeout + + + +History + + + + + +Introduced in GitLab 12.3. + + + + + + +Use timeout to configure a timeout for a specific job. If the job runs for longer +than the timeout, the job fails. + +The job-level timeout can be longer than the project-level timeout, +but can’t be longer than the runner’s timeout. + +Keyword type: Job keyword. You can use it only as part of a job or in the +default section. + +Possible inputs: A period of time written in natural language. For example, these are all equivalent: + + + 3600 seconds + 60 minutes + one hour + + +Example of timeout: + +build: + script: build.sh + timeout: 3 hours 30 minutes + +test: + script: rspec + timeout: 3h 30m + + + +trigger + + + +History + + + + + Support for resource_group introduced support for resource_group in GitLab 13.9. + Support for environment introduced in GitLab 16.4. + + + + + + +Use trigger to declare that a job is a “trigger job” which starts a +downstream pipeline that is either: + + + +A multi-project pipeline. + +A child pipeline. + + +Trigger jobs can use only a limited set of GitLab CI/CD configuration keywords. +The keywords available for use in trigger jobs are: + + + +allow_failure. + +extends. + +needs, but not needs:project. + +only and except. + +rules. + +stage. + +trigger. + +variables. + +when (only with a value of on_success, on_failure, or always). + +resource_group. + +environment. + + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + For multi-project pipelines, the path to the downstream project. CI/CD variables are supported +in GitLab 15.3 and later, but not job-level persisted variables. +Alternatively, use trigger:project. + For child pipelines, use trigger:include. + + +Example of trigger: + +trigger-multi-project-pipeline: + trigger: my-group/my-project + + +Additional details: + + + You cannot use the API to start when:manual trigger jobs. + In GitLab 13.5 and later, you +can use when:manual in the same job as trigger. In GitLab 13.4 and +earlier, using them together causes the error jobs:#{job-name} when should be on_success, on_failure or always. + You cannot manually specify CI/CD variables +before running a manual trigger job. + +Manual pipeline variables +and scheduled pipeline variables +are not passed to downstream pipelines by default. Use trigger:forward +to forward these variables to downstream pipelines. + +Job-level persisted variables +are not available in trigger jobs. + Environment variables defined in the runner’s config.toml are not available to trigger jobs and are not passed to downstream pipelines. + + +Related topics: + + + +Multi-project pipeline configuration examples. + To run a pipeline for a specific branch, tag, or commit, you can use a trigger token +to authenticate with the pipeline triggers API. +The trigger token is different than the trigger keyword. + + + +trigger:include + + +Use trigger:include to declare that a job is a “trigger job” which starts a +child pipeline. + +Use trigger:include:artifact to trigger a dynamic child pipeline. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + The path to the child pipeline’s configuration file. + + +Example of trigger:include: + +trigger-child-pipeline: + trigger: + include: path/to/child-pipeline.gitlab-ci.yml + + +Related topics: + + + +Child pipeline configuration examples. + + + +trigger:project + + +Use trigger:project to declare that a job is a “trigger job” which starts a +multi-project pipeline. + +By default, the multi-project pipeline triggers for the default branch. Use trigger:branch +to specify a different branch. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + The path to the downstream project. CI/CD variables are supported +in GitLab 15.3 and later, but not job-level persisted variables. + + +Example of trigger:project: + +trigger-multi-project-pipeline: + trigger: + project: my-group/my-project + + +Example of trigger:project for a different branch: + +trigger-multi-project-pipeline: + trigger: + project: my-group/my-project + branch: development + + +Related topics: + + + +Multi-project pipeline configuration examples. + To run a pipeline for a specific branch, tag, or commit, you can also use a trigger token +to authenticate with the pipeline triggers API. +The trigger token is different than the trigger keyword. + + + +trigger:strategy + + +Use trigger:strategy to force the trigger job to wait for the downstream pipeline to complete +before it is marked as success. + +This behavior is different than the default, which is for the trigger job to be marked as +success as soon as the downstream pipeline is created. + +This setting makes your pipeline execution linear rather than parallel. + +Example of trigger:strategy: + +trigger_job: + trigger: + include: path/to/child-pipeline.yml + strategy: depend + + +In this example, jobs from subsequent stages wait for the triggered pipeline to +successfully complete before starting. + +Additional details: + + + +Optional manual jobs in the downstream pipeline +do not affect the status of the downstream pipeline or the upstream trigger job. +The downstream pipeline can complete successfully without running any optional manual jobs. + +Blocking manual jobs in the downstream pipeline +must run before the trigger job is marked as successful or failed. The trigger job +shows pending ( ) if the downstream pipeline status is +waiting for manual action ( ) due to manual jobs. By default, +jobs in later stages do not start until the trigger job completes. + If the downstream pipeline has a failed job, but the job uses allow_failure: true, +the downstream pipeline is considered successful and the trigger job shows success. + + + +trigger:forward + + + +History + + + + + +Introduced in GitLab 14.9 with a flag named ci_trigger_forward_variables. Disabled by default. + +Enabled on GitLab.com and self-managed in GitLab 14.10. + +Generally available in GitLab 15.1. Feature flag ci_trigger_forward_variables removed. + + + + + + +Use trigger:forward to specify what to forward to the downstream pipeline. You can control +what is forwarded to both parent-child pipelines +and multi-project pipelines. + +Possible inputs: + + + +yaml_variables: true (default), or false. When true, variables defined +in the trigger job are passed to downstream pipelines. + +pipeline_variables: true or false (default). When true, manual pipeline variables and scheduled pipeline variables +are passed to downstream pipelines. + + +Example of trigger:forward: + +Run this pipeline manually, with +the CI/CD variable MYVAR = my value: + +variables: # default variables for each job + VAR: value + +# Default behavior: +# - VAR is passed to the child +# - MYVAR is not passed to the child +child1: + trigger: + include: .child-pipeline.yml + +# Forward pipeline variables: +# - VAR is passed to the child +# - MYVAR is passed to the child +child2: + trigger: + include: .child-pipeline.yml + forward: + pipeline_variables: true + +# Do not forward YAML variables: +# - VAR is not passed to the child +# - MYVAR is not passed to the child +child3: + trigger: + include: .child-pipeline.yml + forward: + yaml_variables: false + + +Additional details: + + + CI/CD variables forwarded to downstream pipelines with trigger:forward have the +highest precedence. If a variable +with the same name is defined in the downstream pipeline, that variable is overwritten +by the forwarded variable. + + + +variables + + +Use variables to define CI/CD variables for jobs. + +Keyword type: Global and job keyword. You can use it at the global level, +and also at the job level. + +If you define variables as a global keyword, it behaves like default variables +for all jobs. Each variable is copied to every job configuration when the pipeline is created. +If the job already has that variable defined, the job-level variable takes precedence. + +Variables defined at the global-level cannot be used as inputs for other global keywords +like include. These variables can only +be used at the job-level, in script, before_script, and after_script sections, +as well as inputs in some job keywords like rules. + +Possible inputs: Variable name and value pairs: + + + The name can use only numbers, letters, and underscores (_). In some shells, +the first character must be a letter. + The value must be a string. + + +CI/CD variables are supported. + +Examples of variables: + +variables: + DEPLOY_SITE: ""https://example.com/"" + +deploy_job: + stage: deploy + script: + - deploy-script --url $DEPLOY_SITE --path ""/"" + environment: production + +deploy_review_job: + stage: deploy + variables: + REVIEW_PATH: ""/review"" + script: + - deploy-review-script --url $DEPLOY_SITE --path $REVIEW_PATH + environment: production + + +Additional details: + + + All YAML-defined variables are also set to any linked Docker service containers. + YAML-defined variables are meant for non-sensitive project configuration. Store sensitive information +in protected variables or CI/CD secrets. + +Manual pipeline variables +and scheduled pipeline variables +are not passed to downstream pipelines by default. Use trigger:forward +to forward these variables to downstream pipelines. + + +Related topics: + + + +Predefined variables are variables the runner +automatically creates and makes available in the job. + You can configure runner behavior with variables. + + + +variables:description + + + +History + + + + + +Introduced in GitLab 13.7. + + + + + + +Use the description keyword to define a description for a pipeline-level (global) variable. +The description displays with the prefilled variable name when running a pipeline manually. + +Keyword type: Global keyword. You cannot use it for job-level variables. + +Possible inputs: + + + A string. + + +Example of variables:description: + +variables: + DEPLOY_NOTE: + description: ""The deployment note. Explain the reason for this deployment."" + + +Additional details: + + + When used without value, the variable exists in pipelines that were not triggered manually, +and the default value is an empty string (''). + + + +variables:value + + + +History + + + + + +Introduced in GitLab 13.7. + + + + + + +Use the value keyword to define a pipeline-level (global) variable’s value. When used with +variables: description, the variable value is prefilled when running a pipeline manually. + +Keyword type: Global keyword. You cannot use it for job-level variables. + +Possible inputs: + + + A string. + + +Example of variables:value: + +variables: + DEPLOY_ENVIRONMENT: + value: ""staging"" + description: ""The deployment target. Change this variable to 'canary' or 'production' if needed."" + + +Additional details: + + + If used without variables: description, the behavior is +the same as variables. + + + +variables:options + + + +History + + + + + +Introduced in GitLab 15.7. + + + + + + +Use variables:options to define an array of values that are selectable in the UI when running a pipeline manually. + +Must be used with variables: value, and the string defined for value: + + + Must also be one of the strings in the options array. + Is the default selection. + + +If there is no description, +this keyword has no effect. + +Keyword type: Global keyword. You cannot use it for job-level variables. + +Possible inputs: + + + An array of strings. + + +Example of variables:options: + +variables: + DEPLOY_ENVIRONMENT: + value: ""staging"" + options: + - ""production"" + - ""staging"" + - ""canary"" + description: ""The deployment target. Set to 'staging' by default."" + + + +variables:expand + + + +History + + + + + +Introduced in GitLab 15.6 with a flag named ci_raw_variables_in_yaml_config. Disabled by default. + +Enabled on GitLab.com in GitLab 15.6. + +Enabled on self-managed in GitLab 15.7. + +Generally available in GitLab 15.8. Feature flag ci_raw_variables_in_yaml_config removed. + + + + + + +Use the expand keyword to configure a variable to be expandable or not. + +Keyword type: Global and job keyword. You can use it at the global level, and also at the job level. + +Possible inputs: + + + +true (default): The variable is expandable. + +false: The variable is not expandable. + + +Example of variables:expand: + +variables: + VAR1: value1 + VAR2: value2 $VAR1 + VAR3: + value: value3 $VAR1 + expand: false + + + + The result of VAR2 is value2 value1. + The result of VAR3 is value3 $VAR1. + + +Additional details: + + + The expand keyword can only be used with the global and job-level variables keywords. +You can’t use it with rules:variables or workflow:rules:variables. + + + +when + + +Use when to configure the conditions for when jobs run. If not defined in a job, +the default value is when: on_success. + +Keyword type: Job keyword. You can use it as part of a job. when: always and when: never can also be used in workflow:rules. + +Possible inputs: + + + +on_success (default): Run the job only when no jobs in earlier stages fail +or have allow_failure: true. + +on_failure: Run the job only when at least one job in an earlier stage fails. A job in an earlier stage +with allow_failure: true is always considered successful. + +never: Don’t run the job regardless of the status of jobs in earlier stages. +Can only be used in a rules section or workflow: rules. + +always: Run the job regardless of the status of jobs in earlier stages. Can also be used in workflow:rules. + +manual: Run the job only when triggered manually. + +delayed: Delay the execution of a job +for a specified duration. + + +Example of when: + +stages: + - build + - cleanup_build + - test + - deploy + - cleanup + +build_job: + stage: build + script: + - make build + +cleanup_build_job: + stage: cleanup_build + script: + - cleanup build when failed + when: on_failure + +test_job: + stage: test + script: + - make test + +deploy_job: + stage: deploy + script: + - make deploy + when: manual + environment: production + +cleanup_job: + stage: cleanup + script: + - cleanup after jobs + when: always + + +In this example, the script: + + + Executes cleanup_build_job only when build_job fails. + Always executes cleanup_job as the last step in pipeline regardless of +success or failure. + Executes deploy_job when you run it manually in the GitLab UI. + + +Additional details: + + + In GitLab 13.5 and later, you +can use when:manual in the same job as trigger. In GitLab 13.4 and +earlier, using them together causes the error jobs:#{job-name} when should be on_success, on_failure or always. + The default behavior of allow_failure changes to true with when: manual. +However, if you use when: manual with rules, allow_failure defaults +to false. + + +Related topics: + + + +when can be used with rules for more dynamic job control. + +when can be used with workflow to control when a pipeline can start. + + +Deprecated keywords + + +The following keywords are deprecated. + + + note These keywords are still usable to ensure backwards compatibility, +but could be scheduled for removal in a future major milestone. + + +Globally-defined image, services, cache, before_script, after_script + + +Defining image, services, cache, before_script, and after_script globally is deprecated. +Using these keywords at the top level is still possible to ensure backwards compatibility, +but could be scheduled for removal in a future milestone. + +Use default instead. For example: + +default: + image: ruby:3.0 + services: + - docker:dind + cache: + paths: [vendor/] + before_script: + - bundle config set path vendor/bundle + - bundle install + after_script: + - rm -rf tmp/ + + + +only / except + + + + note +only and except are deprecated and not being actively developed. These keywords +are still usable to ensure backwards compatibility, but could be scheduled for removal +in a future milestone. To control when to add jobs to pipelines, use rules instead. + + +You can use only and except to control when to add jobs to pipelines. + + + Use only to define when a job runs. + Use except to define when a job does not run. + + +See specify when jobs run with only and except +for more details and examples. + + +only:refs / except:refs + + + + note +only:refs and except:refs are deprecated and not being actively developed. These keywords +are still usable to ensure backwards compatibility, but could be scheduled for removal +in a future milestone. To use refs, regular expressions, or variables to control +when to add jobs to pipelines, use rules:if instead. + + +You can use the only:refs and except:refs keywords to control when to add jobs to a +pipeline based on branch names or pipeline types. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: An array including any number of: + + + Branch names, for example main or my-feature-branch. + +Regular expressions +that match against branch names, for example /^feature-.*/. + + The following keywords: + + + + + Value + Description + + + + + api + For pipelines triggered by the pipelines API. + + + branches + When the Git reference for a pipeline is a branch. + + + chat + For pipelines created by using a GitLab ChatOps command. + + + external + When you use CI services other than GitLab. + + + external_pull_requests + When an external pull request on GitHub is created or updated (See Pipelines for external pull requests). + + + merge_requests + For pipelines created when a merge request is created or updated. Enables merge request pipelines, merged results pipelines, and merge trains. + + + pipelines + For multi-project pipelines created by using the API with CI_JOB_TOKEN, or the trigger keyword. + + + pushes + For pipelines triggered by a git push event, including for branches and tags. + + + schedules + For scheduled pipelines. + + + tags + When the Git reference for a pipeline is a tag. + + + triggers + For pipelines created by using a trigger token. + + + web + For pipelines created by selecting Run pipeline in the GitLab UI, from the project’s Build > Pipelines section. + + + + + + +Example of only:refs and except:refs: + +job1: + script: echo + only: + - main + - /^issue-.*$/ + - merge_requests + +job2: + script: echo + except: + - main + - /^stable-branch.*$/ + - schedules + + +Additional details: + + + Scheduled pipelines run on specific branches, so jobs configured with only: branches +run on scheduled pipelines too. Add except: schedules to prevent jobs with only: branches +from running on scheduled pipelines. + + only or except used without any other keywords are equivalent to only: refs +or except: refs. For example, the following two jobs configurations have the same +behavior: + + +job1: + script: echo + only: + - branches + +job2: + script: echo + only: + refs: + - branches + + + + If a job does not use only, except, or rules, then only is set to branches +and tags by default. + + For example, job1 and job2 are equivalent: + + +job1: + script: echo ""test"" + +job2: + script: echo ""test"" + only: + - branches + - tags + + + + + +only:variables / except:variables + + + + note +only:variables and except:variables are deprecated and not being actively developed. +These keywords are still usable to ensure backwards compatibility, but could be scheduled +for removal in a future milestone. To use refs, regular expressions, or variables +to control when to add jobs to pipelines, use rules:if instead. + + +You can use the only:variables or except:variables keywords to control when to add jobs +to a pipeline, based on the status of CI/CD variables. + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: + + + An array of CI/CD variable expressions. + + +Example of only:variables: + +deploy: + script: cap staging deploy + only: + variables: + - $RELEASE == ""staging"" + - $STAGING + + +Related topics: + + + +only:variables and except:variables examples. + + + +only:changes / except:changes + + +only:variables and except:variables + + + note +only:changes and except:changes are deprecated and not being actively developed. +These keywords are still usable to ensure backwards compatibility, but could be scheduled +for removal in a future milestone. To use changed files to control when to add a job to a pipeline, +use rules:changes instead. + + +Use the changes keyword with only to run a job, or with except to skip a job, +when a Git push event modifies a file. + +Use changes in pipelines with the following refs: + + + branches + external_pull_requests + +merge_requests (see additional details about using only:changes with merge request pipelines) + + +Keyword type: Job keyword. You can use it only as part of a job. + +Possible inputs: An array including any number of: + + + Paths to files. + Wildcard paths for: + + Single directories, for example path/to/directory/*. + A directory and all its subdirectories, for example path/to/directory/**/*. + + + Wildcard glob paths for all files +with the same extension or multiple extensions, for example *.md or path/to/directory/*.{rb,py,sh}. + Wildcard paths to files in the root directory, or all directories, wrapped in double quotes. +For example ""*.json"" or ""**/*.json"". + + +Example of only:changes: + +docker build: + script: docker build -t my-image:$CI_COMMIT_REF_SLUG . + only: + refs: + - branches + changes: + - Dockerfile + - docker/scripts/* + - dockerfiles/**/* + - more_scripts/*.{rb,py,sh} + - ""**/*.json"" + + +Additional details: + + + +changes resolves to true if any of the matching files are changed (an OR operation). + Glob patterns are interpreted with Ruby’s File.fnmatch +with the flags +File::FNM_PATHNAME | File::FNM_DOTMATCH | File::FNM_EXTGLOB. + If you use refs other than branches, external_pull_requests, or merge_requests, +changes can’t determine if a given file is new or old and always returns true. + If you use only: changes with other refs, jobs ignore the changes and always run. + If you use except: changes with other refs, jobs ignore the changes and never run. + + +Related topics: + + + +only: changes and except: changes examples. + If you use changes with only allow merge requests to be merged if the pipeline succeeds, +you should also use only:merge_requests. + +Jobs or pipelines can run unexpectedly when using only: changes. + + + +only:kubernetes / except:kubernetes + + + + note +only:kubernetes and except:kubernetes are deprecated and not being actively developed. +These keywords are still usable to ensure backwards compatibility, but could be scheduled +for removal in a future milestone. To control if jobs are added to the pipeline when +the Kubernetes service is active in the project, use rules:if with the +CI_KUBERNETES_ACTIVE predefined CI/CD variable instead. + + +Use only:kubernetes or except:kubernetes to control if jobs are added to the pipeline +when the Kubernetes service is active in the project. + +Keyword type: Job-specific. You can use it only as part of a job. + +Possible inputs: + + + The kubernetes strategy accepts only the active keyword. + + +Example of only:kubernetes: + +deploy: + only: + kubernetes: active + + +In this example, the deploy job runs only when the Kubernetes service is active +in the project. + + +" +what are the key differences between github actions and gitlab ci/cd?,,"1. Migrating from GitHub Actions + + + +Migrating from GitHub Actions + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +If you’re migrating from GitHub Actions to GitLab CI/CD, you are able to create CI/CD +pipelines that replicate and enhance your GitHub Action workflows. + +Key Similarities and Differences + + +GitHub Actions and GitLab CI/CD are both used to generate pipelines to automate building, testing, +and deploying your code. Both share similarities including: + + + CI/CD functionality has direct access to the code stored in the project repository. + Pipeline configurations written in YAML and stored in the project repository. + Pipelines are configurable and can run in different stages. + Jobs can each use a different container image. + + +Additionally, there are some important differences between the two: + + + GitHub has a marketplace for downloading 3rd-party actions, which might require additional support or licenses. + Self-managed GitLab instances support both horizontal and vertical scaling, while +GitHub Enterprise Server only supports vertical scaling. + GitLab maintains and supports all features in house, and some 3rd-party integrations +are accessible through templates. + GitLab provides a built-in container registry. + GitLab has native Kubernetes deployment support. + GitLab provides granular security policies. + + +Comparison of features and concepts + + +Many GitHub features and concepts have equivalents in GitLab that offer the same +functionality. + +Configuration file + + +GitHub Actions can be configured with a workflow YAML file. +GitLab CI/CD uses a .gitlab-ci.yml YAML file by default. + +For example, in a GitHub Actions workflow file: + +on: [push] +jobs: + hello: + runs-on: ubuntu-latest + steps: + - run: echo ""Hello World"" + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +stages: + - hello + +hello: + stage: hello + script: + - echo ""Hello World"" + + +GitHub Actions workflow syntax + + +A GitHub Actions configuration is defined in a workflow YAML file using specific keywords. +GitLab CI/CD has similar functionality, also usually configured with YAML keywords. + + + + + GitHub + GitLab + Explanation + + + + + env + variables + +env defines the variables set in a workflow, job, or step. GitLab uses variables to define CI/CD variables at the global or job level. Variables can also be added in the UI. + + + jobs + stages + +jobs groups together all the jobs that run in the workflow. GitLab uses stages to group jobs together. + + + on + Not applicable + +on defines when a workflow is triggered. GitLab is integrated tightly with Git, so SCM polling options for triggers are not needed, but can be configured per job if required. + + + run + Not applicable + The command to execute in the job. GitLab uses a YAML array under the script keyword, one entry for each command to execute. + + + runs-on + tags + +runs-on defines the GitHub runner that a job must run on. GitLab uses tags to select a runner. + + + steps + script + +steps groups together all the steps that run in a job. GitLab uses script to group together all the commands run in a job. + + + uses + include + +uses defines what GitHub Action to be added to a step. GitLab uses include to add configuration from other files to a job. + + + + +Common configurations + + +This section goes over commonly used CI/CD configurations, showing how they can be converted +from GitHub Actions to GitLab CI/CD. + +GitHub Action workflows +generate automated CI/CD jobs that are triggered when certain event take place, for example +pushing a new commit. A GitHub Action workflow is a YAML file defined in the .github/workflows +directory located in the root of the repository. The GitLab equivalent is the +.gitlab-ci.yml configuration file which also resides +in the repository’s root directory. + +Jobs + + +Jobs are a set of commands that run in a set sequence to achieve a particular result, +for example building a container or deploying to production. + +For example, this GitHub Actions workflow builds a container then deploys it to production. +The jobs runs sequentially, because the deploy job depends on the build job: + +on: [push] +jobs: + build: + runs-on: ubuntu-latest + container: golang:alpine + steps: + - run: apk update + - run: go build -o bin/hello + - uses: actions/upload-artifact@v3 + with: + name: hello + path: bin/hello + retention-days: 7 + deploy: + if: contains( github.ref, 'staging') + runs-on: ubuntu-latest + container: golang:alpine + steps: + - uses: actions/download-artifact@v3 + with: + name: hello + - run: echo ""Deploying to Staging"" + - run: scp bin/hello remoteuser@remotehost:/remote/directory + + +This example: + + + Uses the golang:alpine container image. + Runs a job for building code. + + Stores build executable as artifact. + + + Runs a second job to deploy to staging, which also: + + Requires the build job to succeed before running. + Requires the commit target branch staging. + Uses the build executable artifact. + + + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +default: + image: golang:alpine + +stages: + - build + - deploy + +build-job: + stage: build + script: + - apk update + - go build -o bin/hello + artifacts: + paths: + - bin/hello + expire_in: 1 week + +deploy-job: + stage: deploy + script: + - echo ""Deploying to Staging"" + - scp bin/hello remoteuser@remotehost:/remote/directory + rules: + - if: $CI_COMMIT_BRANCH == 'staging' + + +Parallel + + +In both GitHub and GitLab, Jobs run in parallel by default. + +For example, in a GitHub Actions workflow file: + +on: [push] +jobs: + python-version: + runs-on: ubuntu-latest + container: python:latest + steps: + - run: python --version + java-version: + if: contains( github.ref, 'staging') + runs-on: ubuntu-latest + container: openjdk:latest + steps: + - run: java -version + + +This example runs a Python job and a Java job in parallel, using different container images. +The Java job only runs when the staging branch is changed. + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +python-version: + image: python:latest + script: + - python --version + +java-version: + image: openjdk:latest + rules: + - if: $CI_COMMIT_BRANCH == 'staging' + script: + - java -version + + +In this case, no extra configuration is needed to make the jobs run in parallel. +Jobs run in parallel by default, each on a different runner assuming there are enough runners +for all the jobs. The Java job is set to only run when the staging branch is changed. + +Matrix + + +In both GitLab and GitHub you can use a matrix to run a job multiple times in parallel in a single pipeline, +but with different variable values for each instance of the job. + +For example, in a GitHub Actions workflow file: + +on: [push] +jobs: + build: + runs-on: ubuntu-latest + steps: + - run: echo ""Building $PLATFORM for $ARCH"" + strategy: + matrix: + platform: [linux, mac, windows] + arch: [x64, x86] + test: + runs-on: ubuntu-latest + steps: + - run: echo ""Testing $PLATFORM for $ARCH"" + strategy: + matrix: + platform: [linux, mac, windows] + arch: [x64, x86] + deploy: + runs-on: ubuntu-latest + steps: + - run: echo ""Deploying $PLATFORM for $ARCH"" + strategy: + matrix: + platform: [linux, mac, windows] + arch: [x64, x86] + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +stages: + - build + - test + - deploy + +.parallel-hidden-job: + parallel: + matrix: + - PLATFORM: [linux, mac, windows] + ARCH: [x64, x86] + +build-job: + extends: .parallel-hidden-job + stage: build + script: + - echo ""Building $PLATFORM for $ARCH"" + +test-job: + extends: .parallel-hidden-job + stage: test + script: + - echo ""Testing $PLATFORM for $ARCH"" + +deploy-job: + extends: .parallel-hidden-job + stage: deploy + script: + - echo ""Deploying $PLATFORM for $ARCH"" + + +Trigger + + +GitHub Actions requires you to add a trigger for your workflow. GitLab is integrated tightly with Git, +so SCM polling options for triggers are not needed, but can be configured per job if required. + +Sample GitHub Actions configuration: + +on: + push: + branches: + - main + + +The equivalent GitLab CI/CD configuration would be: + +rules: + - if: '$CI_COMMIT_BRANCH == main' + + +Pipelines can also be scheduled by using Cron syntax. + +Container Images + + +With GitLab you can run your CI/CD jobs in separate, isolated Docker containers +by using the image keyword. + +For example, in a GitHub Actions workflow file: + +jobs: + update: + runs-on: ubuntu-latest + container: alpine:latest + steps: + - run: apk update + + +In this example the apk update command runs in an alpine:latest container. + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +update-job: + image: alpine:latest + script: + - apk update + + +GitLab provides every project a container registry +for hosting container images. Container images can be built and stored directly from +GitLab CI/CD pipelines. + +For example: + +stages: + - build + +build-image: + stage: build + variables: + IMAGE: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA + before_script: + - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY + script: + - docker build -t $IMAGE . + - docker push $IMAGE + + +Variables + + +In GitLab, we use the variables keyword to define different CI/CD variables at runtime. +Use variables when you need to reuse configuration data in a pipeline. You can define +variables globally or per job. + +For example, in a GitHub Actions workflow file: + +env: + NAME: ""fern"" + +jobs: + english: + runs-on: ubuntu-latest + env: + Greeting: ""hello"" + steps: + - run: echo ""$GREETING $NAME"" + spanish: + runs-on: ubuntu-latest + env: + Greeting: ""hola"" + steps: + - run: echo ""$GREETING $NAME"" + + +In this example, variables provide different outputs for the jobs. + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +default: + image: ubuntu-latest + +variables: + NAME: ""fern"" + +english: + variables: + GREETING: ""hello"" + script: + - echo ""$GREETING $NAME"" + +spanish: + variables: + GREETING: ""hola"" + script: + - echo ""$GREETING $NAME"" + + +Variables can also be set up through the GitLab UI, under CI/CD settings, where you can +protect or mask +the variables. Masked variables are hidden in job logs, while protected variables +can only be accessed in pipelines for protected branches or tags. + +For example, in a GitHub Actions workflow file: + +jobs: + login: + runs-on: ubuntu-latest + env: + AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }} + steps: + - run: my-login-script.sh ""$AWS_ACCESS_KEY"" + + +If the AWS_ACCESS_KEY variable is defined in the GitLab project settings, the equivalent +GitLab CI/CD .gitlab-ci.yml file would be: + +login: + script: + - my-login-script.sh $AWS_ACCESS_KEY + + +Additionally, GitHub Actions +and GitLab CI/CD provide built-in variables +which contain data relevant to the pipeline and repository. + +Conditionals + + +When a new pipeline starts, GitLab checks the pipeline configuration to determine +which jobs should run in that pipeline. You can use the rules keyword +to configure jobs to run depending on conditions like the status of variables, or the pipeline type. + +For example, in a GitHub Actions workflow file: + +jobs: + deploy_staging: + if: contains( github.ref, 'staging') + runs-on: ubuntu-latest + steps: + - run: echo ""Deploy to staging server"" + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +deploy_staging: + stage: deploy + script: + - echo ""Deploy to staging server"" + rules: + - if: '$CI_COMMIT_BRANCH == staging' + + +Runners + + +Runners are the services that execute jobs. If you are using GitLab.com, you can use the +instance runner fleet to run jobs without provisioning your own self-managed runners. + +Some key details about runners: + + + Runners can be configured to be shared across an instance, +a group, or dedicated to a single project. + You can use the tags keyword +for finer control, and associate runners with specific jobs. For example, you can use a tag for jobs that +require dedicated, more powerful, or specific hardware. + GitLab has autoscaling for runners. +Use autoscaling to provision runners only when needed and scale down when not needed. + + +For example, in a GitHub Actions workflow file: + +linux_job: + runs-on: ubuntu-latest + steps: + - run: echo ""Hello, $USER"" + +windows_job: + runs-on: windows-latest + steps: + - run: echo ""Hello, %USERNAME%"" + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +linux_job: + stage: build + tags: + - linux-runners + script: + - echo ""Hello, $USER"" + +windows_job: + stage: build + tags: + - windows-runners + script: + - echo ""Hello, %USERNAME%"" + + +Artifacts + + +In GitLab, any job can use the artifacts keyword to define a set +of artifacts to be stored when a job completes. Artifacts are files +that can be used in later jobs. + +For example, in a GitHub Actions workflow file: + +on: [push] +jobs: + generate_cat: + steps: + - run: touch cat.txt + - run: echo ""meow"" > cat.txt + - uses: actions/upload-artifact@v3 + with: + name: cat + path: cat.txt + retention-days: 7 + use_cat: + needs: [generate_cat] + steps: + - uses: actions/download-artifact@v3 + with: + name: cat + - run: cat cat.txt + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +stage: + - generate + - use + +generate_cat: + stage: generate + script: + - touch cat.txt + - echo ""meow"" > cat.txt + artifacts: + paths: + - cat.txt + expire_in: 1 week + +use_cat: + stage: use + script: + - cat cat.txt + + +Caching + + +A cache is created when a job downloads one or more files and +saves them for faster access in the future. Subsequent jobs that use the same cache don’t have to download the files again, +so they execute more quickly. The cache is stored on the runner and uploaded to S3 if +distributed cache is enabled. + +For example, in a GitHub Actions workflow file: + +jobs: + build: + runs-on: ubuntu-latest + steps: + - run: echo ""This job uses a cache."" + - uses: actions/cache@v3 + with: + path: binaries/ + key: binaries-cache-$CI_COMMIT_REF_SLUG + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +cache-job: + script: + - echo ""This job uses a cache."" + cache: + key: binaries-cache-$CI_COMMIT_REF_SLUG + paths: + - binaries/ + + +Templates + + +In GitHub an Action is a set of complex tasks that need to be frequently repeated and is saved +to enable reuse without redefining a CI/CD pipeline. In GitLab the equivalent to an action would +be a the include keyword, which allows you to add CI/CD pipelines from other files, +including template files built into GitLab. + +Sample GitHub Actions configuration: + +- uses: hashicorp/setup-terraform@v2.0.3 + + +The equivalent GitLab CI/CD configuration would be: + +include: + - template: Terraform.gitlab-ci.yml + + +In these examples, the setup-terraform GitHub action and the Terraform.gitlab-ci.yml GitLab template +are not exact matches. These two examples are just to show how complex configuration can be reused. + +Security Scanning features + + +GitLab provides a variety of security scanners +out-of-the-box to detect vulnerabilities in all parts of the SLDC. You can add these features +to your GitLab CI/CD pipeline by using templates. + +for example to add SAST scanning to your pipeline, add the following to your .gitlab-ci.yml: + +include: + - template: Jobs/SAST.gitlab-ci.yml + + +You can customize the behavior of security scanners by using CI/CD variables, for example +with the SAST scanners. + +Secrets Management + + +Privileged information, often referred to as “secrets”, is sensitive information +or credentials you need in your CI/CD workflow. You might use secrets to unlock protected resources +or sensitive information in tools, applications, containers, and cloud-native environments. + +For secrets management in GitLab, you can use one of the supported integrations +for an external service. These services securely store secrets outside of your GitLab project, +though you must have a subscription for the service: + + + +HashiCorp Vault. + +Azure Key Vault. + + +GitLab also supports OIDC authentication +for other third party services that support OIDC. + +Additionally, you can make credentials available to jobs by storing them in CI/CD variables, though secrets +stored in plain text are susceptible to accidental exposure. You should always store sensitive information +in masked and protected +variables, which mitigates some of the risk. + +Also, never store secrets as variables in your .gitlab-ci.yml file, which is public to all +users with access to the project. Storing sensitive information in variables should +only be done in the project, group, or instance settings. + +Review the security guidelines to improve +the safety of your CI/CD variables. + +Planning and Performing a Migration + + +The following list of recommended steps was created after observing organizations +that were able to quickly complete this migration. + +Create a Migration Plan + + +Before starting a migration you should create a migration plan to make preparations for the migration. + +Prerequisites + + +Before doing any migration work, you should first: + + + Get familiar with GitLab. + + Read about the key GitLab CI/CD features. + Follow tutorials to create your first GitLab pipeline and more complex pipelines that build, test, and deploys a static site. + Review the CI/CD YAML syntax reference. + + + Set up and configure GitLab. + Test your GitLab instance. + + Ensure runners are available, either by using shared GitLab.com runners or installing new runners. + + + + +Migration Steps + + + + Migrate Projects from GitHub to GitLab: + + (Recommended) You can use the GitHub Importer +to automate mass imports from external SCM providers. + You can import repositories by URL. + + + Create a .gitlab-ci.yml in each project. + Migrate GitHub Actions jobs to GitLab CI/CD jobs and configure them to show results directly in merge requests. + Migrate deployment jobs by using cloud deployment templates, +environments, and the GitLab agent for Kubernetes. + Check if any CI/CD configuration can be reused across different projects, then create +and share CI/CD templates + + Check the pipeline efficiency documentation +to learn how to make your GitLab CI/CD pipelines faster and more efficient. + + +Additional Resources + + + + Video: How to migrate from GitHub to GitLab including Actions + Blog: GitHub to GitLab migration the easy way + + +If you have questions that are not answered here, the GitLab community forum can be a great resource. + + +2. Get started with GitLab CI/CD + + + +Get started with GitLab CI/CD + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +CI/CD is a continuous method of software development, where you continuously build, +test, deploy, and monitor iterative code changes. + +This iterative process helps reduce the chance that you develop new code based on +buggy or failed previous versions. GitLab CI/CD can catch bugs early in the development cycle, +and help ensure that all the code deployed to production complies with your established code standards. + +Common terms + + +If you’re new to GitLab CI/CD, start by reviewing some of the commonly used terms. + +The .gitlab-ci.yml file + + +To use GitLab CI/CD, you start with a .gitlab-ci.yml file at the root of your project +which contains the configuration for your CI/CD pipeline. This file follows the YAML format +and has its own syntax. + +You can name this file anything you want, but .gitlab-ci.yml is the most common name. + +Get started: + + + +Create your first .gitlab-ci.yml file. + View all the possible keywords that you can use in the .gitlab-ci.yml file in +the CI/CD YAML syntax reference. + Use the pipeline editor to edit or visualize +your CI/CD configuration. + + +Runners + + +Runners are the agents that run your jobs. These agents can run on physical machines or virtual instances. +In your .gitlab-ci.yml file, you can specify a container image you want to use when running the job. +The runner loads the image, clones your project and runs the job either locally or in the container. + +If you use GitLab.com, runners on Linux, Windows, and macOS are already available for use. And you can register your own +runners on GitLab.com if you’d like. + +If you don’t use GitLab.com, you can: + + + Register runners or use runners already registered for your self-managed instance. + Create a runner on your local machine. + + +Get started: + + + +Create a runner on your local machine. + +Learn more about runners. + + +Pipelines + + +Pipelines are made up of jobs and stages: + + + +Jobs define what you want to do. For example, test code changes, or deploy +to a staging environment. + Jobs are grouped into stages. Each stage contains at least one job. +Typical stages might be build, test, and deploy. + + +Get started: + + + +Learn more about pipelines. + + +CI/CD variables + + +CI/CD variables help you customize jobs by making values defined elsewhere accessible to jobs. +They can be hard-coded in your .gitlab-ci.yml file, project settings, or dynamically generated. + +Get started: + + + +Learn more about CI/CD variables. + +Learn about dynamically generated predefined variables. + + +CI/CD components + + +A CI/CD component is a reusable single pipeline configuration unit. Use them to compose an entire pipeline configuration or a small part of a larger pipeline. + +Get started: + + + +Learn more about CI/CD components. + + +Videos + + + + + GitLab CI/CD demo. + + GitLab CI/CD and the Web IDE. + Webcast: Mastering continuous software development. + + +Related topics + + + + +Five teams that made the switch to GitLab CI/CD. + +Make the case for CI/CD in your organization. + Learn how Verizon reduced rebuilds from 30 days to under 8 hours with GitLab. + Use the GitLab Workflow VS Code extension to +validate your configuration +and view your pipeline status. + + + +" +how can i migrate from github actions to gitlab ci?,,"1. Migrating from GitHub Actions + + + +Migrating from GitHub Actions + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +If you’re migrating from GitHub Actions to GitLab CI/CD, you are able to create CI/CD +pipelines that replicate and enhance your GitHub Action workflows. + +Key Similarities and Differences + + +GitHub Actions and GitLab CI/CD are both used to generate pipelines to automate building, testing, +and deploying your code. Both share similarities including: + + + CI/CD functionality has direct access to the code stored in the project repository. + Pipeline configurations written in YAML and stored in the project repository. + Pipelines are configurable and can run in different stages. + Jobs can each use a different container image. + + +Additionally, there are some important differences between the two: + + + GitHub has a marketplace for downloading 3rd-party actions, which might require additional support or licenses. + Self-managed GitLab instances support both horizontal and vertical scaling, while +GitHub Enterprise Server only supports vertical scaling. + GitLab maintains and supports all features in house, and some 3rd-party integrations +are accessible through templates. + GitLab provides a built-in container registry. + GitLab has native Kubernetes deployment support. + GitLab provides granular security policies. + + +Comparison of features and concepts + + +Many GitHub features and concepts have equivalents in GitLab that offer the same +functionality. + +Configuration file + + +GitHub Actions can be configured with a workflow YAML file. +GitLab CI/CD uses a .gitlab-ci.yml YAML file by default. + +For example, in a GitHub Actions workflow file: + +on: [push] +jobs: + hello: + runs-on: ubuntu-latest + steps: + - run: echo ""Hello World"" + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +stages: + - hello + +hello: + stage: hello + script: + - echo ""Hello World"" + + +GitHub Actions workflow syntax + + +A GitHub Actions configuration is defined in a workflow YAML file using specific keywords. +GitLab CI/CD has similar functionality, also usually configured with YAML keywords. + + + + + GitHub + GitLab + Explanation + + + + + env + variables + +env defines the variables set in a workflow, job, or step. GitLab uses variables to define CI/CD variables at the global or job level. Variables can also be added in the UI. + + + jobs + stages + +jobs groups together all the jobs that run in the workflow. GitLab uses stages to group jobs together. + + + on + Not applicable + +on defines when a workflow is triggered. GitLab is integrated tightly with Git, so SCM polling options for triggers are not needed, but can be configured per job if required. + + + run + Not applicable + The command to execute in the job. GitLab uses a YAML array under the script keyword, one entry for each command to execute. + + + runs-on + tags + +runs-on defines the GitHub runner that a job must run on. GitLab uses tags to select a runner. + + + steps + script + +steps groups together all the steps that run in a job. GitLab uses script to group together all the commands run in a job. + + + uses + include + +uses defines what GitHub Action to be added to a step. GitLab uses include to add configuration from other files to a job. + + + + +Common configurations + + +This section goes over commonly used CI/CD configurations, showing how they can be converted +from GitHub Actions to GitLab CI/CD. + +GitHub Action workflows +generate automated CI/CD jobs that are triggered when certain event take place, for example +pushing a new commit. A GitHub Action workflow is a YAML file defined in the .github/workflows +directory located in the root of the repository. The GitLab equivalent is the +.gitlab-ci.yml configuration file which also resides +in the repository’s root directory. + +Jobs + + +Jobs are a set of commands that run in a set sequence to achieve a particular result, +for example building a container or deploying to production. + +For example, this GitHub Actions workflow builds a container then deploys it to production. +The jobs runs sequentially, because the deploy job depends on the build job: + +on: [push] +jobs: + build: + runs-on: ubuntu-latest + container: golang:alpine + steps: + - run: apk update + - run: go build -o bin/hello + - uses: actions/upload-artifact@v3 + with: + name: hello + path: bin/hello + retention-days: 7 + deploy: + if: contains( github.ref, 'staging') + runs-on: ubuntu-latest + container: golang:alpine + steps: + - uses: actions/download-artifact@v3 + with: + name: hello + - run: echo ""Deploying to Staging"" + - run: scp bin/hello remoteuser@remotehost:/remote/directory + + +This example: + + + Uses the golang:alpine container image. + Runs a job for building code. + + Stores build executable as artifact. + + + Runs a second job to deploy to staging, which also: + + Requires the build job to succeed before running. + Requires the commit target branch staging. + Uses the build executable artifact. + + + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +default: + image: golang:alpine + +stages: + - build + - deploy + +build-job: + stage: build + script: + - apk update + - go build -o bin/hello + artifacts: + paths: + - bin/hello + expire_in: 1 week + +deploy-job: + stage: deploy + script: + - echo ""Deploying to Staging"" + - scp bin/hello remoteuser@remotehost:/remote/directory + rules: + - if: $CI_COMMIT_BRANCH == 'staging' + + +Parallel + + +In both GitHub and GitLab, Jobs run in parallel by default. + +For example, in a GitHub Actions workflow file: + +on: [push] +jobs: + python-version: + runs-on: ubuntu-latest + container: python:latest + steps: + - run: python --version + java-version: + if: contains( github.ref, 'staging') + runs-on: ubuntu-latest + container: openjdk:latest + steps: + - run: java -version + + +This example runs a Python job and a Java job in parallel, using different container images. +The Java job only runs when the staging branch is changed. + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +python-version: + image: python:latest + script: + - python --version + +java-version: + image: openjdk:latest + rules: + - if: $CI_COMMIT_BRANCH == 'staging' + script: + - java -version + + +In this case, no extra configuration is needed to make the jobs run in parallel. +Jobs run in parallel by default, each on a different runner assuming there are enough runners +for all the jobs. The Java job is set to only run when the staging branch is changed. + +Matrix + + +In both GitLab and GitHub you can use a matrix to run a job multiple times in parallel in a single pipeline, +but with different variable values for each instance of the job. + +For example, in a GitHub Actions workflow file: + +on: [push] +jobs: + build: + runs-on: ubuntu-latest + steps: + - run: echo ""Building $PLATFORM for $ARCH"" + strategy: + matrix: + platform: [linux, mac, windows] + arch: [x64, x86] + test: + runs-on: ubuntu-latest + steps: + - run: echo ""Testing $PLATFORM for $ARCH"" + strategy: + matrix: + platform: [linux, mac, windows] + arch: [x64, x86] + deploy: + runs-on: ubuntu-latest + steps: + - run: echo ""Deploying $PLATFORM for $ARCH"" + strategy: + matrix: + platform: [linux, mac, windows] + arch: [x64, x86] + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +stages: + - build + - test + - deploy + +.parallel-hidden-job: + parallel: + matrix: + - PLATFORM: [linux, mac, windows] + ARCH: [x64, x86] + +build-job: + extends: .parallel-hidden-job + stage: build + script: + - echo ""Building $PLATFORM for $ARCH"" + +test-job: + extends: .parallel-hidden-job + stage: test + script: + - echo ""Testing $PLATFORM for $ARCH"" + +deploy-job: + extends: .parallel-hidden-job + stage: deploy + script: + - echo ""Deploying $PLATFORM for $ARCH"" + + +Trigger + + +GitHub Actions requires you to add a trigger for your workflow. GitLab is integrated tightly with Git, +so SCM polling options for triggers are not needed, but can be configured per job if required. + +Sample GitHub Actions configuration: + +on: + push: + branches: + - main + + +The equivalent GitLab CI/CD configuration would be: + +rules: + - if: '$CI_COMMIT_BRANCH == main' + + +Pipelines can also be scheduled by using Cron syntax. + +Container Images + + +With GitLab you can run your CI/CD jobs in separate, isolated Docker containers +by using the image keyword. + +For example, in a GitHub Actions workflow file: + +jobs: + update: + runs-on: ubuntu-latest + container: alpine:latest + steps: + - run: apk update + + +In this example the apk update command runs in an alpine:latest container. + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +update-job: + image: alpine:latest + script: + - apk update + + +GitLab provides every project a container registry +for hosting container images. Container images can be built and stored directly from +GitLab CI/CD pipelines. + +For example: + +stages: + - build + +build-image: + stage: build + variables: + IMAGE: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA + before_script: + - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY + script: + - docker build -t $IMAGE . + - docker push $IMAGE + + +Variables + + +In GitLab, we use the variables keyword to define different CI/CD variables at runtime. +Use variables when you need to reuse configuration data in a pipeline. You can define +variables globally or per job. + +For example, in a GitHub Actions workflow file: + +env: + NAME: ""fern"" + +jobs: + english: + runs-on: ubuntu-latest + env: + Greeting: ""hello"" + steps: + - run: echo ""$GREETING $NAME"" + spanish: + runs-on: ubuntu-latest + env: + Greeting: ""hola"" + steps: + - run: echo ""$GREETING $NAME"" + + +In this example, variables provide different outputs for the jobs. + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +default: + image: ubuntu-latest + +variables: + NAME: ""fern"" + +english: + variables: + GREETING: ""hello"" + script: + - echo ""$GREETING $NAME"" + +spanish: + variables: + GREETING: ""hola"" + script: + - echo ""$GREETING $NAME"" + + +Variables can also be set up through the GitLab UI, under CI/CD settings, where you can +protect or mask +the variables. Masked variables are hidden in job logs, while protected variables +can only be accessed in pipelines for protected branches or tags. + +For example, in a GitHub Actions workflow file: + +jobs: + login: + runs-on: ubuntu-latest + env: + AWS_ACCESS_KEY: ${{ secrets.AWS_ACCESS_KEY }} + steps: + - run: my-login-script.sh ""$AWS_ACCESS_KEY"" + + +If the AWS_ACCESS_KEY variable is defined in the GitLab project settings, the equivalent +GitLab CI/CD .gitlab-ci.yml file would be: + +login: + script: + - my-login-script.sh $AWS_ACCESS_KEY + + +Additionally, GitHub Actions +and GitLab CI/CD provide built-in variables +which contain data relevant to the pipeline and repository. + +Conditionals + + +When a new pipeline starts, GitLab checks the pipeline configuration to determine +which jobs should run in that pipeline. You can use the rules keyword +to configure jobs to run depending on conditions like the status of variables, or the pipeline type. + +For example, in a GitHub Actions workflow file: + +jobs: + deploy_staging: + if: contains( github.ref, 'staging') + runs-on: ubuntu-latest + steps: + - run: echo ""Deploy to staging server"" + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +deploy_staging: + stage: deploy + script: + - echo ""Deploy to staging server"" + rules: + - if: '$CI_COMMIT_BRANCH == staging' + + +Runners + + +Runners are the services that execute jobs. If you are using GitLab.com, you can use the +instance runner fleet to run jobs without provisioning your own self-managed runners. + +Some key details about runners: + + + Runners can be configured to be shared across an instance, +a group, or dedicated to a single project. + You can use the tags keyword +for finer control, and associate runners with specific jobs. For example, you can use a tag for jobs that +require dedicated, more powerful, or specific hardware. + GitLab has autoscaling for runners. +Use autoscaling to provision runners only when needed and scale down when not needed. + + +For example, in a GitHub Actions workflow file: + +linux_job: + runs-on: ubuntu-latest + steps: + - run: echo ""Hello, $USER"" + +windows_job: + runs-on: windows-latest + steps: + - run: echo ""Hello, %USERNAME%"" + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +linux_job: + stage: build + tags: + - linux-runners + script: + - echo ""Hello, $USER"" + +windows_job: + stage: build + tags: + - windows-runners + script: + - echo ""Hello, %USERNAME%"" + + +Artifacts + + +In GitLab, any job can use the artifacts keyword to define a set +of artifacts to be stored when a job completes. Artifacts are files +that can be used in later jobs. + +For example, in a GitHub Actions workflow file: + +on: [push] +jobs: + generate_cat: + steps: + - run: touch cat.txt + - run: echo ""meow"" > cat.txt + - uses: actions/upload-artifact@v3 + with: + name: cat + path: cat.txt + retention-days: 7 + use_cat: + needs: [generate_cat] + steps: + - uses: actions/download-artifact@v3 + with: + name: cat + - run: cat cat.txt + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +stage: + - generate + - use + +generate_cat: + stage: generate + script: + - touch cat.txt + - echo ""meow"" > cat.txt + artifacts: + paths: + - cat.txt + expire_in: 1 week + +use_cat: + stage: use + script: + - cat cat.txt + + +Caching + + +A cache is created when a job downloads one or more files and +saves them for faster access in the future. Subsequent jobs that use the same cache don’t have to download the files again, +so they execute more quickly. The cache is stored on the runner and uploaded to S3 if +distributed cache is enabled. + +For example, in a GitHub Actions workflow file: + +jobs: + build: + runs-on: ubuntu-latest + steps: + - run: echo ""This job uses a cache."" + - uses: actions/cache@v3 + with: + path: binaries/ + key: binaries-cache-$CI_COMMIT_REF_SLUG + + +The equivalent GitLab CI/CD .gitlab-ci.yml file would be: + +cache-job: + script: + - echo ""This job uses a cache."" + cache: + key: binaries-cache-$CI_COMMIT_REF_SLUG + paths: + - binaries/ + + +Templates + + +In GitHub an Action is a set of complex tasks that need to be frequently repeated and is saved +to enable reuse without redefining a CI/CD pipeline. In GitLab the equivalent to an action would +be a the include keyword, which allows you to add CI/CD pipelines from other files, +including template files built into GitLab. + +Sample GitHub Actions configuration: + +- uses: hashicorp/setup-terraform@v2.0.3 + + +The equivalent GitLab CI/CD configuration would be: + +include: + - template: Terraform.gitlab-ci.yml + + +In these examples, the setup-terraform GitHub action and the Terraform.gitlab-ci.yml GitLab template +are not exact matches. These two examples are just to show how complex configuration can be reused. + +Security Scanning features + + +GitLab provides a variety of security scanners +out-of-the-box to detect vulnerabilities in all parts of the SLDC. You can add these features +to your GitLab CI/CD pipeline by using templates. + +for example to add SAST scanning to your pipeline, add the following to your .gitlab-ci.yml: + +include: + - template: Jobs/SAST.gitlab-ci.yml + + +You can customize the behavior of security scanners by using CI/CD variables, for example +with the SAST scanners. + +Secrets Management + + +Privileged information, often referred to as “secrets”, is sensitive information +or credentials you need in your CI/CD workflow. You might use secrets to unlock protected resources +or sensitive information in tools, applications, containers, and cloud-native environments. + +For secrets management in GitLab, you can use one of the supported integrations +for an external service. These services securely store secrets outside of your GitLab project, +though you must have a subscription for the service: + + + +HashiCorp Vault. + +Azure Key Vault. + + +GitLab also supports OIDC authentication +for other third party services that support OIDC. + +Additionally, you can make credentials available to jobs by storing them in CI/CD variables, though secrets +stored in plain text are susceptible to accidental exposure. You should always store sensitive information +in masked and protected +variables, which mitigates some of the risk. + +Also, never store secrets as variables in your .gitlab-ci.yml file, which is public to all +users with access to the project. Storing sensitive information in variables should +only be done in the project, group, or instance settings. + +Review the security guidelines to improve +the safety of your CI/CD variables. + +Planning and Performing a Migration + + +The following list of recommended steps was created after observing organizations +that were able to quickly complete this migration. + +Create a Migration Plan + + +Before starting a migration you should create a migration plan to make preparations for the migration. + +Prerequisites + + +Before doing any migration work, you should first: + + + Get familiar with GitLab. + + Read about the key GitLab CI/CD features. + Follow tutorials to create your first GitLab pipeline and more complex pipelines that build, test, and deploys a static site. + Review the CI/CD YAML syntax reference. + + + Set up and configure GitLab. + Test your GitLab instance. + + Ensure runners are available, either by using shared GitLab.com runners or installing new runners. + + + + +Migration Steps + + + + Migrate Projects from GitHub to GitLab: + + (Recommended) You can use the GitHub Importer +to automate mass imports from external SCM providers. + You can import repositories by URL. + + + Create a .gitlab-ci.yml in each project. + Migrate GitHub Actions jobs to GitLab CI/CD jobs and configure them to show results directly in merge requests. + Migrate deployment jobs by using cloud deployment templates, +environments, and the GitLab agent for Kubernetes. + Check if any CI/CD configuration can be reused across different projects, then create +and share CI/CD templates + + Check the pipeline efficiency documentation +to learn how to make your GitLab CI/CD pipelines faster and more efficient. + + +Additional Resources + + + + Video: How to migrate from GitHub to GitLab including Actions + Blog: GitHub to GitLab migration the easy way + + +If you have questions that are not answered here, the GitLab community forum can be a great resource. + + +2. Get started with GitLab CI/CD + + + +Get started with GitLab CI/CD + + + +Tier: Free, Premium, Ultimate +Offering: GitLab.com, Self-managed, GitLab Dedicated + +CI/CD is a continuous method of software development, where you continuously build, +test, deploy, and monitor iterative code changes. + +This iterative process helps reduce the chance that you develop new code based on +buggy or failed previous versions. GitLab CI/CD can catch bugs early in the development cycle, +and help ensure that all the code deployed to production complies with your established code standards. + +Common terms + + +If you’re new to GitLab CI/CD, start by reviewing some of the commonly used terms. + +The .gitlab-ci.yml file + + +To use GitLab CI/CD, you start with a .gitlab-ci.yml file at the root of your project +which contains the configuration for your CI/CD pipeline. This file follows the YAML format +and has its own syntax. + +You can name this file anything you want, but .gitlab-ci.yml is the most common name. + +Get started: + + + +Create your first .gitlab-ci.yml file. + View all the possible keywords that you can use in the .gitlab-ci.yml file in +the CI/CD YAML syntax reference. + Use the pipeline editor to edit or visualize +your CI/CD configuration. + + +Runners + + +Runners are the agents that run your jobs. These agents can run on physical machines or virtual instances. +In your .gitlab-ci.yml file, you can specify a container image you want to use when running the job. +The runner loads the image, clones your project and runs the job either locally or in the container. + +If you use GitLab.com, runners on Linux, Windows, and macOS are already available for use. And you can register your own +runners on GitLab.com if you’d like. + +If you don’t use GitLab.com, you can: + + + Register runners or use runners already registered for your self-managed instance. + Create a runner on your local machine. + + +Get started: + + + +Create a runner on your local machine. + +Learn more about runners. + + +Pipelines + + +Pipelines are made up of jobs and stages: + + + +Jobs define what you want to do. For example, test code changes, or deploy +to a staging environment. + Jobs are grouped into stages. Each stage contains at least one job. +Typical stages might be build, test, and deploy. + + +Get started: + + + +Learn more about pipelines. + + +CI/CD variables + + +CI/CD variables help you customize jobs by making values defined elsewhere accessible to jobs. +They can be hard-coded in your .gitlab-ci.yml file, project settings, or dynamically generated. + +Get started: + + + +Learn more about CI/CD variables. + +Learn about dynamically generated predefined variables. + + +CI/CD components + + +A CI/CD component is a reusable single pipeline configuration unit. Use them to compose an entire pipeline configuration or a small part of a larger pipeline. + +Get started: + + + +Learn more about CI/CD components. + + +Videos + + + + + GitLab CI/CD demo. + + GitLab CI/CD and the Web IDE. + Webcast: Mastering continuous software development. + + +Related topics + + + + +Five teams that made the switch to GitLab CI/CD. + +Make the case for CI/CD in your organization. + Learn how Verizon reduced rebuilds from 30 days to under 8 hours with GitLab. + Use the GitLab Workflow VS Code extension to +validate your configuration +and view your pipeline status. + + + +" +how do i push an existing folder into a newly created gitlab repository?,,"1. Use Git to add a file to a repository + + + +Use Git to add a file to a repository + + +To add a new file from the command line: + + + Open a terminal. + + Change directories until you are in your project’s folder. + + +cd my-project + + + Choose a Git branch to work in. + + To create a branch: git checkout -b + + To switch to an existing branch: git checkout + + + + Copy the file you want to add into the directory where you want to add it. + Confirm that your file is in the directory: + + Windows: dir + + All other operating systems: ls + + + + The filename should be displayed. + + + Check the status of the file: + + +git status + + + The filename should be in red. The file is in your file system, but Git isn’t tracking it yet. + + + Tell Git to track the file: + + +git add + + + + Check the status of the file again: + + +git status + + + The filename should be green. The file is tracked locally by Git, but +has not been committed and pushed. + + + Commit the file to your local copy of the project’s Git repository: + + +git commit -m ""Describe the reason for your commit here"" + + + + Push your changes from your copy of the repository to GitLab. +In this command, origin refers to the remote copy of the repository. +Replace with the name of your branch: + + +git push origin + + + + Git prepares, compresses, and sends the data. Lines from the remote repository +start with remote:: + + +Enumerating objects: 9, done. +Counting objects: 100% (9/9), done. +Delta compression using up to 10 threads +Compressing objects: 100% (5/5), done. +Writing objects: 100% (5/5), 1.84 KiB | 1.84 MiB/s, done. +Total 5 (delta 3), reused 0 (delta 0), pack-reused 0 +remote: +remote: To create a merge request for , visit: +remote: https://gitlab.com/gitlab-org/gitlab/-/merge_requests/new?merge_request%5Bsource_branch%5D= +remote: +To https://gitlab.com/gitlab-org/gitlab.git + * [new branch] -> +branch '' set up to track 'origin/'. + + + + +Your file is copied from your local copy of the repository to the remote +repository. + +To create a merge request, copy the link sent back from the remote +repository and paste it into a browser window. + +Add a file to the last commit + + +git add +git commit --amend + + +Append --no-edit to the commit command if you do not want to edit the commit +message. + +Related topics + + + + Add file from the UI + Add file from the Web IDE + + + +2. Command line Git + + + +Command line Git + + +Git is an open-source distributed version control system. GitLab is built +on top of Git. + +You can do many Git operations directly in GitLab. However, the command line is required for advanced tasks, +like fixing complex merge conflicts or rolling back commits. + +If you’re new to Git and want to learn by working in your own project, +learn how to make your first commit. + +For a quick reference of Git commands, download a Git Cheat Sheet. + +For more information about the advantages of working with Git and GitLab: + + + + Watch the GitLab Source Code Management Walkthrough video. + Learn how GitLab became the backbone of the Worldline development environment. + + +To help you visualize what you’re doing locally, you can install a +Git GUI app. + +Choose a terminal + + +To execute Git commands on your computer, you must open a terminal (also known as command +prompt, command shell, and command line). Here are some options: + + + For macOS users: + + Built-in Terminal. Press ⌘ command + space and type terminal. + +iTerm2. You can integrate it with Zsh and Oh My Zsh for color highlighting and other advanced features. + + + For Windows users: + + Built-in command line. On the Windows taskbar, select the search icon and type cmd. + +PowerShell. + Git Bash. It is built into Git for Windows. + + + For Linux users: + + Built-in Linux Terminal. + + + + +Confirm Git is installed + + +You can determine if Git is already installed on your computer by opening a terminal +and running this command: + +git --version + + +If Git is installed, the output is: + +git version X.Y.Z + + +If your computer doesn’t recognize git as a command, you must install Git. + +Configure Git + + +To start using Git from your computer, you must enter your credentials +to identify yourself as the author of your work. The full name and email address +should match the ones you use in GitLab. + + + + In your shell, add your full name: + + +git config --global user.name ""John Doe"" + + + + Add your email address: + + +git config --global user.email ""your_email_address@example.com"" + + + + To check the configuration, run: + + +git config --global --list + + + The --global option tells Git to always use this information for anything you do on your system. +If you omit --global or use --local, the configuration applies only to the current +repository. + + + +You can read more on how Git manages configurations in the +Git configuration documentation. + +Choose a repository + + +Before you begin, choose the repository you want to work in. You can use any project you have permission to +access on GitLab.com or any other GitLab instance. + +To use the repository in the examples on this page: + + + Go to https://gitlab.com/gitlab-tests/sample-project/. + In the upper-right corner, select Fork. + Choose a namespace for your fork. + + +The project becomes available at https://gitlab.com//sample-project/. + +You can fork any project you have access to. + +Clone a repository + + +When you clone a repository, the files from the remote repository are downloaded to your computer, +and a connection is created. + +This connection requires you to add credentials. You can either use SSH or HTTPS. SSH is recommended. + +Clone with SSH + + +Clone with SSH when you want to authenticate only one time. + + + Authenticate with GitLab by following the instructions in the SSH documentation. + On the left sidebar, select Search or go to and find the project you want to clone. + On the project’s overview page, in the upper-right corner, select Code, then copy the URL for Clone with SSH. + Open a terminal and go to the directory where you want to clone the files. +Git automatically creates a folder with the repository name and downloads the files there. + + Run this command: + + +git clone git@gitlab.com:gitlab-tests/sample-project.git + + + + To view the files, go to the new directory: + + +cd sample-project + + + + +You can also +clone a repository and open it directly in Visual Studio Code. + +Clone with HTTPS + + +Clone with HTTPS when you want to authenticate each time you perform an operation +between your computer and GitLab. +OAuth credential helpers can decrease the number of times you must manually authenticate, making HTTPS a seamless experience. + + + On the left sidebar, select Search or go to and find the project you want to clone. + On the project’s overview page, in the upper-right corner, select Code, then copy the URL for Clone with HTTPS. + Open a terminal and go to the directory where you want to clone the files. + + Run the following command. Git automatically creates a folder with the repository name and downloads the files there. + + +git clone https://gitlab.com/gitlab-tests/sample-project.git + + + + GitLab requests your username and password. + + If you have enabled two-factor authentication (2FA) on your account, you cannot use your account password. Instead, you can do one of the following: + + + +Clone using a token with read_repository or write_repository permissions. + Install an OAuth credential helper. + + + If you have not enabled 2FA, use your account password. + + + To view the files, go to the new directory: + + +cd sample-project + + + + + + note On Windows, if you enter your password incorrectly multiple times and an Access denied message appears, +add your namespace (username or group) to the path: +git clone https://namespace@gitlab.com/gitlab-org/gitlab.git. + + +Clone using a token + + +Clone with HTTPS using a token if: + + + You want to use 2FA. + You want to have a revocable set of credentials scoped to one or more repositories. + + +You can use any of these tokens to authenticate when cloning over HTTPS: + + + +Personal access tokens. + +Deploy tokens. + +Project access tokens. + +Group access tokens. + + +git clone https://:@gitlab.example.com/tanuki/awesome_project.git + + +Convert a local directory into a repository + + +You can initialize a local folder so Git tracks it as a repository. + + + Open the terminal in the directory you’d like to convert. + + Run this command: + + +git init + + + A .git folder is created in your directory. This folder contains Git +records and configuration files. You should not edit these files +directly. + + Add the path to your remote repository +so Git can upload your files into the correct project. + + +Add a remote + + +You add a “remote” to tell Git which remote repository in GitLab is tied +to the specific local folder on your computer. +The remote tells Git where to push or pull from. + +To add a remote to your local copy: + + + In GitLab, create a project to hold your files. + Visit this project’s homepage, scroll down to Push an existing folder, and copy the command that starts with git remote add. + + On your computer, open the terminal in the directory you’ve initialized, paste the command you copied, and press enter: + + +git remote add origin git@gitlab.com:username/projectpath.git + + + + +After you’ve done that, you can stage your files and upload them to GitLab. + +View your remote repositories + + +To view your remote repositories, type: + +git remote -v + + +The -v flag stands for verbose. + +Download the latest changes in the project + + +To work on an up-to-date copy of the project, you pull to get all the changes made by users +since the last time you cloned or pulled the project. Replace +with the name of your default branch +to get the main branch code, or replace it with the branch name of the branch +you are currently working in. + +git pull + + +When you clone a repository, REMOTE is typically origin. This is where the +repository was cloned from, and it indicates the SSH or HTTPS URL of the repository +on the remote server. is usually the name of your +default branch, but it may be any +existing branch. You can create additional named remotes and branches as necessary. + +You can learn more on how Git manages remote repositories in the +Git Remote documentation. + +Add another URL to a remote + + +Add another URL to a remote, so both remotes get updated on each push: + +git remote set-url --add + + +Display changes to Git references + + +A Git reference is a name that points to a specific commit, or to another reference. +The reference HEAD is special. It usually points to a reference which points to the tip +of the current working branch: + +$ git show HEAD +commit ab123c (HEAD -> main, origin/main, origin/HEAD) + + +When a reference is changed in the local repository, Git records the change +in its reference logs. You can display the contents of the reference logs +if you need to find the old values of a reference. For example, you might want +to display the changes to HEAD in order to undo a change. + +To display the list of changes to HEAD: + +git reflog + + +Check the Git history of a file + + +The basic command to check the Git history of a file: + +git log + + +If you get this error message: + +fatal: ambiguous argument : unknown revision or path not in the working tree. +Use '--' to separate paths from revisions, like this: + + +Use this to check the Git history of the file: + +git log -- + + +Check the content of each change to a file + + +gitk + + +Branches + + +A branch is a copy of the files in the repository at the time you create the branch. +You can work in your branch without affecting other branches. When +you’re ready to add your changes to the main codebase, you can merge your branch into +the default branch, for example, main. + +Use branches when you: + + + Want to add code to a project but you’re not sure if it works properly. + Are collaborating on the project with others, and don’t want your work to get mixed up. + + +A new branch is often called feature branch to differentiate from the +default branch. + +Create a branch + + +To create a feature branch: + +git checkout -b + + +GitLab enforces branch naming rules +to prevent problems, and provides +branch naming patterns +to streamline merge request creation. + +Switch to a branch + + +All work in Git is done in a branch. +You can switch between branches to see the state of the files and work in that branch. + +To switch to an existing branch: + +git checkout + + +For example, to change to the main branch: + +git checkout main + + +View differences + + +To view the differences between your local unstaged changes and the latest version +that you cloned or pulled: + +git diff + + +View the files that have changes + + +When you add, change, or delete files or folders, Git knows about the changes. +To check which files have been changed: + +git status + + +Add and commit local changes + + +When you type git status, locally changed files are shown in red. These changes may +be new, modified, or deleted files or folders. + + + + To stage a file for commit: + + +git add + + + + Repeat step 1 for each file or folder you want to add. +Or, to stage all files in the current directory and subdirectory, type git add .. + + + Confirm that the files have been added to staging: + + +git status + + + The files should be displayed in green text. + + + To commit the staged files: + + +git commit -m ""COMMENT TO DESCRIBE THE INTENTION OF THE COMMIT"" + + + + +Stage and commit all changes + + +As a shortcut, you can add all local changes to staging and commit them with one command: + +git commit -a -m ""COMMENT TO DESCRIBE THE INTENTION OF THE COMMIT"" + + +Send changes to GitLab + + +To push all local changes to the remote repository: + +git push + + +For example, to push your local commits to the main branch of the origin remote: + +git push origin main + + +Sometimes Git does not allow you to push to a repository. Instead, +you must force an update. + +Delete all changes in the branch + + +To discard all changes to tracked files: + +git checkout . + + +This action removes changes to files, not the files themselves. +Untracked (new) files do not change. + +Unstage all changes that have been added to the staging area + + +To unstage (remove) all files that have not been committed: + +git reset + + +Undo most recent commit + + +To undo the most recent commit: + +git reset HEAD~1 + + +This action leaves the changed files and folders unstaged in your local repository. + + + caution A Git commit should not be reversed if you already pushed it +to the remote repository. Although you can undo a commit, the best option is to avoid +the situation altogether by working carefully. + + +You can learn more about the different ways Git can undo changes in the +Git Undoing Things documentation. + +Merge a branch with default branch + + +When you are ready to add your changes to +the default branch, you merge the feature branch into it: + +git checkout +git merge + + +In GitLab, you typically use a merge request to merge your changes, instead of using the command line. + +To create a merge request from a fork to an upstream repository, see the +forking workflow. + +Reuse recorded resolutions + + +To reuse recorded resolutions: + +git rerere + + +To enable rerere functionality: + +git config --global rerere.enabled true + + +Advanced use of Git through the command line + + +For an introduction of more advanced Git techniques, see Git rebase, force-push, and merge conflicts. + +Synchronize changes in a forked repository with the upstream + + +To create a copy of a repository in your namespace, you fork it. +Changes made to your copy of the repository are not automatically synchronized with the original. +To keep the project in sync with the original project, you need to pull from the original repository. + +You must create a link to the remote repository to pull +changes from the original repository. It is common to call this remote repository the upstream. + +You can now use the upstream as a to pull new updates +from the original repository, and use the origin +to push local changes and create merge requests. + + +" \ No newline at end of file diff --git a/evaluation_scripts/chat/evaluate_docs.py b/evaluation_scripts/chat/evaluate_docs.py new file mode 100644 index 00000000..b2a29a9d --- /dev/null +++ b/evaluation_scripts/chat/evaluate_docs.py @@ -0,0 +1,60 @@ +import os +import requests +from dotenv import load_dotenv +from langsmith import traceable, wrappers +from langchain_openai import ChatOpenAI +from langchain_core.prompts import ChatPromptTemplate +from langchain_core.output_parsers import StrOutputParser +from langsmith.evaluation import evaluate, LangChainStringEvaluator + +# Load environment variables from .env file +load_dotenv() + +@traceable +def get_chat_answer(question, context): + base_url = os.getenv("GITLAB_BASE_URL") + url = f"{base_url}/api/v4/chat/completions" + headers = { + "Content-Type": "application/json", + "PRIVATE-TOKEN": os.getenv("GITLAB_PRIVATE_TOKEN"), + } + payload = { + "content": f"{context} {question}", + "with_clean_history": True + } + response = requests.post(url, json=payload, headers=headers) + if response.status_code == 201: + return response.json().get("choices", [{}])[0].get("message", {}).get("content", "") + else: + raise Exception(f"Error: {response.status_code} - {response.text}") + +def main(): + prompt = ChatPromptTemplate.from_messages([ + ("system", "Please provide a concise answer to the user question considering the given context."), + ("user", "{context}\n\n{question}") + ]) + + chat_model = ChatOpenAI() + output_parser = StrOutputParser() + + chain = prompt | chat_model | output_parser + + data = "duo_chat_documentation_questions_context" + + evaluators = [ + LangChainStringEvaluator("cot_qa"), + LangChainStringEvaluator("labeled_criteria", config={"criteria": "conciseness"}), + LangChainStringEvaluator("labeled_criteria", config={"criteria": "helpfulness"}) + ] + + results = evaluate( + lambda inputs: get_chat_answer(inputs['question'], inputs['context']), + data=data, + evaluators=evaluators, + experiment_prefix="duo_chat_documentation_questions_context", + ) + + print(results) + +if __name__ == "__main__": + main() \ No newline at end of file -- GitLab From 4a780de966a88578a0846789687081de641db4af Mon Sep 17 00:00:00 2001 From: David O'Regan Date: Tue, 18 Jun 2024 10:01:00 +0200 Subject: [PATCH 2/3] feat: add docs set and docs eval --- .DS_Store | Bin 0 -> 6148 bytes evaluation_scripts/.DS_Store | Bin 0 -> 6148 bytes evaluation_scripts/chat/evaluate_docs.py | 52 +++++++++++------------ 3 files changed, 24 insertions(+), 28 deletions(-) create mode 100644 .DS_Store create mode 100644 evaluation_scripts/.DS_Store diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..06e8a4ca594f20148175e47f6220604199ce3493 GIT binary patch literal 6148 zcmZQzU|@7AO)+F(5MW?n;9!8z3~dZp0Z1N%F(jFwB8(vOz-E*(Br=pRWHRJ4K zQ6RlS45d(Uij$M_^K(EN2pKvmKN0&G1V%#u76Qx=h5)GkcV%F})&Ga6 z8YM?VU^E1VWe6~`xCFa6fh%R~{sYyup!zfcs^$Px86&7(hKPZbfU0(Ib<6}ARFr@! f1BrvQgJ^Jd%*en1uFXdq1F#SprAI@6z99erSxXrl literal 0 HcmV?d00001 diff --git a/evaluation_scripts/.DS_Store b/evaluation_scripts/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..ec1fee639ef65c97651f697574c4225fde6c3ccb GIT binary patch literal 6148 zcmZQzU|@7AO)+F(5MW?n;9!8z45|#6fRTZLfrTNNA%h{2p(HJ(I5{alKL;epz`(!+ zRSx1Zg3LgZhf0r9qaiRF0;3@?8UmvsFd71bC$b;@W6jE2DA2mw&} zpaAMCFfcGUKlfnG}h#HVqkQ$Iy5Dn7GzzAZ256I6Ne8$b#4Mw z4#Wmk#f+dj8KMVV6~h_Vbq}tJ86j;55Fey{v>^Zspiz1>1O^}k0Iwes AnE(I) literal 0 HcmV?d00001 diff --git a/evaluation_scripts/chat/evaluate_docs.py b/evaluation_scripts/chat/evaluate_docs.py index b2a29a9d..585b1fdf 100644 --- a/evaluation_scripts/chat/evaluate_docs.py +++ b/evaluation_scripts/chat/evaluate_docs.py @@ -1,11 +1,10 @@ import os import requests +import langsmith from dotenv import load_dotenv -from langsmith import traceable, wrappers -from langchain_openai import ChatOpenAI -from langchain_core.prompts import ChatPromptTemplate -from langchain_core.output_parsers import StrOutputParser -from langsmith.evaluation import evaluate, LangChainStringEvaluator +from langsmith import traceable +from langchain.schema import output_parser +from langsmith.evaluation import evaluate, LangChainStringEvaluator, LangChainContextualEvaluator # Load environment variables from .env file load_dotenv() @@ -19,42 +18,39 @@ def get_chat_answer(question, context): "PRIVATE-TOKEN": os.getenv("GITLAB_PRIVATE_TOKEN"), } payload = { - "content": f"{context} {question}", + "content": f"Question: {question}\n\nContext: {context}", "with_clean_history": True } response = requests.post(url, json=payload, headers=headers) if response.status_code == 201: - return response.json().get("choices", [{}])[0].get("message", {}).get("content", "") + return response.json() else: raise Exception(f"Error: {response.status_code} - {response.text}") def main(): - prompt = ChatPromptTemplate.from_messages([ - ("system", "Please provide a concise answer to the user question considering the given context."), - ("user", "{context}\n\n{question}") - ]) - - chat_model = ChatOpenAI() - output_parser = StrOutputParser() - - chain = prompt | chat_model | output_parser - - data = "duo_chat_documentation_questions_context" + # Initialize evaluators + exact_match_evaluator = LangChainStringEvaluator("exact_match") + relevance_evaluator = LangChainContextualEvaluator("relevance") + completeness_evaluator = LangChainContextualEvaluator("completeness") + correctness_evaluator = LangChainContextualEvaluator("correctness") + # Combine evaluators into a list evaluators = [ - LangChainStringEvaluator("cot_qa"), - LangChainStringEvaluator("labeled_criteria", config={"criteria": "conciseness"}), - LangChainStringEvaluator("labeled_criteria", config={"criteria": "helpfulness"}) + exact_match_evaluator, + relevance_evaluator, + completeness_evaluator, + correctness_evaluator ] - - results = evaluate( + + # Evaluate using the combined evaluators + chain_results = evaluate( lambda inputs: get_chat_answer(inputs['question'], inputs['context']), - data=data, - evaluators=evaluators, - experiment_prefix="duo_chat_documentation_questions_context", + data="duo_chat_documentation_questions_context", # Dataset name + evaluators=evaluators, # Use the combined evaluators + experiment_prefix="Enhanced Duo Chat Documentation Evaluation", ) - - print(results) + + print(chain_results) if __name__ == "__main__": main() \ No newline at end of file -- GitLab From a9ae2a5f85fd4e68a294082d5ef764dc9cdb598f Mon Sep 17 00:00:00 2001 From: David O'Regan Date: Tue, 18 Jun 2024 14:03:59 +0200 Subject: [PATCH 3/3] feat: add docs set and docs eval --- evaluation_scripts/chat/evaluate_docs.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/evaluation_scripts/chat/evaluate_docs.py b/evaluation_scripts/chat/evaluate_docs.py index 585b1fdf..f083c66f 100644 --- a/evaluation_scripts/chat/evaluate_docs.py +++ b/evaluation_scripts/chat/evaluate_docs.py @@ -4,7 +4,7 @@ import langsmith from dotenv import load_dotenv from langsmith import traceable from langchain.schema import output_parser -from langsmith.evaluation import evaluate, LangChainStringEvaluator, LangChainContextualEvaluator +from langsmith.evaluation import evaluate, LangChainStringEvaluator # Load environment variables from .env file load_dotenv() @@ -28,28 +28,33 @@ def get_chat_answer(question, context): raise Exception(f"Error: {response.status_code} - {response.text}") def main(): + # Retrieve the dataset name from environment variable + dataset_name = os.getenv("DATASET_NAME", "duo_chat_documentation_questions_context") + # Initialize evaluators exact_match_evaluator = LangChainStringEvaluator("exact_match") - relevance_evaluator = LangChainContextualEvaluator("relevance") - completeness_evaluator = LangChainContextualEvaluator("completeness") - correctness_evaluator = LangChainContextualEvaluator("correctness") - + context_qa_evaluator = LangChainStringEvaluator("context_qa") + relevance_evaluator = LangChainStringEvaluator("criteria", config={"criteria": "relevance"}) + completeness_evaluator = LangChainStringEvaluator("criteria", config={"criteria": "completeness"}) + correctness_evaluator = LangChainStringEvaluator("criteria", config={"criteria": "correctness"}) + # Combine evaluators into a list evaluators = [ exact_match_evaluator, + context_qa_evaluator, relevance_evaluator, completeness_evaluator, correctness_evaluator ] - + # Evaluate using the combined evaluators chain_results = evaluate( lambda inputs: get_chat_answer(inputs['question'], inputs['context']), - data="duo_chat_documentation_questions_context", # Dataset name + data=dataset_name, # Use the dataset name from the environment variable evaluators=evaluators, # Use the combined evaluators experiment_prefix="Enhanced Duo Chat Documentation Evaluation", ) - + print(chain_results) if __name__ == "__main__": -- GitLab