diff --git a/docs/_releasenotes/1808.feature.1.nix-helm-fluxcd b/docs/_releasenotes/1808.feature.1.nix-helm-fluxcd new file mode 100644 index 0000000000000000000000000000000000000000..e79ac5d603ed7954175573c18d15e813c220465e --- /dev/null +++ b/docs/_releasenotes/1808.feature.1.nix-helm-fluxcd @@ -0,0 +1 @@ +Arbitrary helm values can now be set for fluxcd diff --git a/docs/user/reference/options/yk8s.k8s-service-layer.fluxcd.rst b/docs/user/reference/options/yk8s.k8s-service-layer.fluxcd.rst index deff9244cb16f738bf562b3c3402158feb1e11f7..ae42720b665ce4e3f781eac9f5e13f33497a0404 100644 --- a/docs/user/reference/options/yk8s.k8s-service-layer.fluxcd.rst +++ b/docs/user/reference/options/yk8s.k8s-service-layer.fluxcd.rst @@ -35,11 +35,34 @@ Whether to enable Flux management. https://gitlab.com/alasca.cloud/tarook/tarook/-/tree/devel/nix/yk8s/k8s-supplements/fluxcd.nix -.. _configuration-options.yk8s.k8s-service-layer.fluxcd.helm_repo_url: +.. _configuration-options.yk8s.k8s-service-layer.fluxcd.helm.chart_ref: -``yk8s.k8s-service-layer.fluxcd.helm_repo_url`` -############################################### +``yk8s.k8s-service-layer.fluxcd.helm.chart_ref`` +################################################ +The chart reference (relative to the repository) of the fluxcd Helm chart. + + +**Type:**:: + + RFC3986 relative URL path + + +**Default:**:: + + "flux2" + + +**Declared by** +https://gitlab.com/alasca.cloud/tarook/tarook/-/tree/devel/nix/yk8s/k8s-supplements/fluxcd.nix + + +.. _configuration-options.yk8s.k8s-service-layer.fluxcd.helm.chart_repo_url: + +``yk8s.k8s-service-layer.fluxcd.helm.chart_repo_url`` +##################################################### + +The URL to the Helm repository for the fluxcd Helm chart. **Type:**:: @@ -56,36 +79,63 @@ https://gitlab.com/alasca.cloud/tarook/tarook/-/tree/devel/nix/yk8s/k8s-suppleme https://gitlab.com/alasca.cloud/tarook/tarook/-/tree/devel/nix/yk8s/k8s-supplements/fluxcd.nix -.. _configuration-options.yk8s.k8s-service-layer.fluxcd.install: +.. _configuration-options.yk8s.k8s-service-layer.fluxcd.helm.chart_version: -``yk8s.k8s-service-layer.fluxcd.install`` -######################################### +``yk8s.k8s-service-layer.fluxcd.helm.chart_version`` +#################################################### -If enabled, choose whether to install or uninstall fluxcd2. IF SET TO -FALSE, FLUXCD2 WILL BE DELETED WITHOUT CHECKING FOR DISRUPTION. +Version of the fluxcd Helm chart to be used. + +If the version shall be unpinned, set to: ``null``. **Type:**:: - boolean + null or Helm chart version (Semantic version 2 string or OCI image tag) **Default:**:: - true + "2.15.0" + + +**Example:**:: + + "1.2.3" + + +**Declared by** +https://gitlab.com/alasca.cloud/tarook/tarook/-/tree/devel/nix/yk8s/k8s-supplements/fluxcd.nix + + +.. _configuration-options.yk8s.k8s-service-layer.fluxcd.helm.release_name: + +``yk8s.k8s-service-layer.fluxcd.helm.release_name`` +################################################### + +The release name inside the cluster for fluxcd. + + +**Type:**:: + + non-empty string + + +**Default:**:: + + "flux2" **Declared by** https://gitlab.com/alasca.cloud/tarook/tarook/-/tree/devel/nix/yk8s/k8s-supplements/fluxcd.nix -.. _configuration-options.yk8s.k8s-service-layer.fluxcd.namespace: +.. _configuration-options.yk8s.k8s-service-layer.fluxcd.helm.release_namespace: -``yk8s.k8s-service-layer.fluxcd.namespace`` -########################################### +``yk8s.k8s-service-layer.fluxcd.helm.release_namespace`` +######################################################## -Namespace to deploy the flux-system in (will be created if it does not exist, but -never deleted). +The namespace in which to install fluxcd. **Type:**:: @@ -102,45 +152,72 @@ never deleted). https://gitlab.com/alasca.cloud/tarook/tarook/-/tree/devel/nix/yk8s/k8s-supplements/fluxcd.nix -.. _configuration-options.yk8s.k8s-service-layer.fluxcd.scheduling_key: +.. _configuration-options.yk8s.k8s-service-layer.fluxcd.helm.values: -``yk8s.k8s-service-layer.fluxcd.scheduling_key`` -################################################ +``yk8s.k8s-service-layer.fluxcd.helm.values`` +############################################# -Scheduling key for the flux instance and its resources. Has no -default. +Helm values for the fluxcd helm chart. + +Some values are set by default through Tarook, but arbitrary values can be set. +For a full list of possible values, see +https://github.com/fluxcd-community/helm-charts/blob/main/charts/flux2/values.yaml **Type:**:: - null or Kubernetes label + JSON value **Default:**:: - null + { } **Declared by** https://gitlab.com/alasca.cloud/tarook/tarook/-/tree/devel/nix/yk8s/k8s-supplements/fluxcd.nix -.. _configuration-options.yk8s.k8s-service-layer.fluxcd.version: +.. _configuration-options.yk8s.k8s-service-layer.fluxcd.install: -``yk8s.k8s-service-layer.fluxcd.version`` +``yk8s.k8s-service-layer.fluxcd.install`` ######################################### -Helm chart version of FluxCD to be deployed. +If enabled, choose whether to install or uninstall fluxcd2. IF SET TO +FALSE, FLUXCD2 WILL BE DELETED WITHOUT CHECKING FOR DISRUPTION. **Type:**:: - OCI image tag + boolean **Default:**:: - "2.15.0" + true + + +**Declared by** +https://gitlab.com/alasca.cloud/tarook/tarook/-/tree/devel/nix/yk8s/k8s-supplements/fluxcd.nix + + +.. _configuration-options.yk8s.k8s-service-layer.fluxcd.scheduling_key: + +``yk8s.k8s-service-layer.fluxcd.scheduling_key`` +################################################ + +Scheduling key for the flux instance and its resources. Has no +default. + + +**Type:**:: + + null or Kubernetes label + + +**Default:**:: + + null **Declared by** diff --git a/k8s-supplements/ansible/roles/fluxcd2_v2/tasks/main.yaml b/k8s-supplements/ansible/roles/fluxcd2_v2/tasks/main.yaml index 9cc5295f21d84edf5feca746314fe0b4068bfe2a..cbf1c65e4194d2890c27af6f22b1245a6b53519b 100644 --- a/k8s-supplements/ansible/roles/fluxcd2_v2/tasks/main.yaml +++ b/k8s-supplements/ansible/roles/fluxcd2_v2/tasks/main.yaml @@ -6,7 +6,7 @@ apiVersion: v1 kind: Namespace metadata: - name: "{{ fluxcd_namespace }}" + name: "{{ fluxcd_helm_release_namespace }}" labels: app.kubernetes.io/instance: flux-system app.kubernetes.io/part-of: flux @@ -22,14 +22,12 @@ delay: "{{ error_delay }}" - name: Install fluxcdv2 - vars: - scheduling_key: "{{ fluxcd_scheduling_key | default(None) }}" kubernetes.core.helm: - chart_repo_url: "{{ fluxcd_helm_repo_url }}" - chart_ref: flux2 - release_namespace: "{{ fluxcd_namespace }}" - release_name: flux2 + chart_repo_url: "{{ fluxcd_helm_chart_repo_url }}" + chart_ref: "{{ fluxcd_helm_chart_ref }}" + release_namespace: "{{ fluxcd_helm_release_namespace }}" + release_name: "{{ fluxcd_helm_release_name }}" release_state: "{{ fluxcd_install | ternary('present', 'absent') }}" - values: "{{ lookup('template', 'values.yaml.j2') | from_yaml }}" - chart_version: "{{ fluxcd_version }}" + values: "{{ fluxcd_helm_values | to_json }}" + chart_version: "{{ fluxcd_helm_chart_version }}" ... diff --git a/k8s-supplements/ansible/roles/fluxcd2_v2/templates/values.yaml.j2 b/k8s-supplements/ansible/roles/fluxcd2_v2/templates/values.yaml.j2 deleted file mode 100644 index a2ece8ae109419726b9db882d1db0237b56d7b75..0000000000000000000000000000000000000000 --- a/k8s-supplements/ansible/roles/fluxcd2_v2/templates/values.yaml.j2 +++ /dev/null @@ -1,30 +0,0 @@ -helmController: - priorityClassName: system-cluster-critical - affinity: {{ lookup('template', 'roles/config/common_defaults_v1/templates/affinity.json.j2') | to_json }} - tolerations: {{ lookup('template', 'roles/config/common_defaults_v1/templates/tolerations.json.j2') | to_json }} -imageAutomationController: - priorityClassName: system-cluster-critical - affinity: {{ lookup('template', 'roles/config/common_defaults_v1/templates/affinity.json.j2') | to_json }} - tolerations: {{ lookup('template', 'roles/config/common_defaults_v1/templates/tolerations.json.j2') | to_json }} -imageReflectionController: - priorityClassName: system-cluster-critical - affinity: {{ lookup('template', 'roles/config/common_defaults_v1/templates/affinity.json.j2') | to_json }} - tolerations: {{ lookup('template', 'roles/config/common_defaults_v1/templates/tolerations.json.j2') | to_json }} -kustomizeController: - priorityClassName: system-cluster-critical - affinity: {{ lookup('template', 'roles/config/common_defaults_v1/templates/affinity.json.j2') | to_json }} - tolerations: {{ lookup('template', 'roles/config/common_defaults_v1/templates/tolerations.json.j2') | to_json }} -notificationController: - priorityClassName: system-cluster-critical - affinity: {{ lookup('template', 'roles/config/common_defaults_v1/templates/affinity.json.j2') | to_json }} - tolerations: {{ lookup('template', 'roles/config/common_defaults_v1/templates/tolerations.json.j2') | to_json }} -sourceController: - priorityClassName: system-cluster-critical - affinity: {{ lookup('template', 'roles/config/common_defaults_v1/templates/affinity.json.j2') | to_json }} - tolerations: {{ lookup('template', 'roles/config/common_defaults_v1/templates/tolerations.json.j2') | to_json }} -prometheus: - podMonitor: - create: {{ k8s_monitoring_enabled | bool }} - additionalLabels: - app.kubernetes.io/component: monitoring - release: prometheus-stack # That's the default podmonitor selector of our prometheus diff --git a/nix/yk8s/k8s-supplements/fluxcd.nix b/nix/yk8s/k8s-supplements/fluxcd.nix index e9252b3075721fe26aa3d6237c7f7f6daeeade45..e41695861805317a77a4b36336f91f75b0ea387b 100644 --- a/nix/yk8s/k8s-supplements/fluxcd.nix +++ b/nix/yk8s/k8s-supplements/fluxcd.nix @@ -6,9 +6,10 @@ }: let cfg = config.yk8s.k8s-service-layer.fluxcd; modules-lib = import ../lib/modules.nix {inherit lib;}; - inherit (modules-lib) mkRemovedOptionModule; + inherit (modules-lib) mkRemovedOptionModule mkRenamedOptionModule; inherit (lib) mkEnableOption mkOption types; inherit (yk8s-lib) mkTopSection mkGroupVarsFile; + inherit (yk8s-lib.k8s) mkAffinity mkTolerations; inherit (yk8s-lib.types) helmChartRepoUrl @@ -16,9 +17,13 @@ k8sNamespaceName ociImageTag ; + inherit (yk8s-lib.options) mkHelmReleaseOptions; in { imports = [ (mkRemovedOptionModule ["k8s-service-layer" "fluxcd" "legacy"] "Support for the legacy FluxCD installation has been dropped.\nYou must switch to an older release and migrate if you have not yet.") + (mkRenamedOptionModule ["k8s-service-layer" "fluxcd" "helm_repo_url"] ["k8s-service-layer" "fluxcd" "helm" "chart_repo_url"]) + (mkRenamedOptionModule ["k8s-service-layer" "fluxcd" "version"] ["k8s-service-layer" "fluxcd" "helm" "chart_version"]) + (mkRenamedOptionModule ["k8s-service-layer" "fluxcd" "namespace"] ["k8s-service-layer" "fluxcd" "helm" "release_namespace"]) ]; options.yk8s.k8s-service-layer.fluxcd = mkTopSection { _docs.preface = '' @@ -37,25 +42,15 @@ in { type = types.bool; default = true; }; - helm_repo_url = mkOption { - type = helmChartRepoUrl; - default = "https://fluxcd-community.github.io/helm-charts"; - }; - version = mkOption { - description = '' - Helm chart version of FluxCD to be deployed. - ''; - type = ociImageTag; + helm = mkHelmReleaseOptions { + descriptionName = "fluxcd"; + defaultRepoUrl = "https://fluxcd-community.github.io/helm-charts"; + defaultChartRef = "flux2"; # renovate: datasource=helm depName=flux2 registryUrl=https://fluxcd-community.github.io/helm-charts - default = "2.15.0"; - }; - namespace = mkOption { - description = '' - Namespace to deploy the flux-system in (will be created if it does not exist, but - never deleted). - ''; - type = k8sNamespaceName; - default = "k8s-svc-flux-system"; + defaultChartVersion = "2.15.0"; + defaultReleaseNamespace = "k8s-svc-flux-system"; + defaultReleaseName = "flux2"; + valuesDocUrl = "https://github.com/fluxcd-community/helm-charts/blob/main/charts/flux2/values.yaml"; }; scheduling_key = mkOption { description = '' @@ -66,10 +61,46 @@ in { default = null; }; }; + config.yk8s.k8s-service-layer.fluxcd.helm.values = let + affinity = mkAffinity {inherit (cfg) scheduling_key;}; + tolerations = mkTolerations {inherit (cfg) scheduling_key;}; + priorityClassName = "system-cluster-critical"; + in { + helmController = { + inherit affinity tolerations priorityClassName; + }; + imageAutomationController = { + inherit affinity tolerations priorityClassName; + }; + imageReflectionController = { + inherit affinity tolerations priorityClassName; + }; + kustomizeController = { + inherit affinity tolerations priorityClassName; + }; + notificationController = { + inherit affinity tolerations priorityClassName; + }; + sourceController = { + inherit affinity tolerations priorityClassName; + }; + prometheus = { + podMonitor = { + create = config.yk8s.kubernetes.monitoring.enabled; + additionalLabels = { + "app.kubernetes.io/component" = "monitoring"; + release = "prometheus-stack"; + }; + }; + }; + }; config.yk8s._inventory_packages = [ (mkGroupVarsFile { inherit cfg; ansible_prefix = "fluxcd_"; + unflat = [ + ["helm"] + ]; inventory_path = "all/fluxcd.yaml"; }) ]; diff --git a/nix/yk8s/k8s-supplements/ingress.nix b/nix/yk8s/k8s-supplements/ingress.nix index 9e1c845d1a6945d896885fce31f095d2dd343d81..af3ca4e46262bd67e49a5c1ea06d5b9829c7206a 100644 --- a/nix/yk8s/k8s-supplements/ingress.nix +++ b/nix/yk8s/k8s-supplements/ingress.nix @@ -9,7 +9,7 @@ inherit (modules-lib) mkRenamedOptionModule mkResourceOptionModule; inherit (lib) mkEnableOption mkOption types; inherit (yk8s-lib) mkTopSection mkGroupVarsFile; - inherit (yk8s-lib.k8s) mkAffinities mkTolerations; + inherit (yk8s-lib.k8s) mkAffinity mkTolerations; inherit (yk8s-lib.options) mkHelmReleaseOptions; inherit (yk8s-lib.types) @@ -139,7 +139,7 @@ in { }; config.yk8s.k8s-service-layer.ingress.helm.values = let inherit (config.yk8s.infra) ipv4_enabled ipv6_enabled; - affinity = mkAffinities {inherit (cfg) scheduling_key;}; + affinity = mkAffinity {inherit (cfg) scheduling_key;}; tolerations = mkTolerations {inherit (cfg) scheduling_key;}; in { defaultBackend = {inherit affinity tolerations;}; diff --git a/nix/yk8s/lib/k8s.nix b/nix/yk8s/lib/k8s.nix index 2edb66be40518c459fd1332c0a9f2f195273a712..bcf16b9a4204e0499ec802fe92359d6133b42b3a 100644 --- a/nix/yk8s/lib/k8s.nix +++ b/nix/yk8s/lib/k8s.nix @@ -13,7 +13,7 @@ } ]; - mkAffinities = { + mkAffinity = { scheduling_key ? null, pod_affinity_key ? null, pod_affinity_operator ? "Exists",