diff --git a/.gitlab/ci/build.gitlab-ci.yml b/.gitlab/ci/build.gitlab-ci.yml index e7056f806c77fa55e9c5ce24b8d56a7807bdfa06..7b4b18bab5f6d3bb33803d195a3f876125e6f0a6 100644 --- a/.gitlab/ci/build.gitlab-ci.yml +++ b/.gitlab/ci/build.gitlab-ci.yml @@ -75,7 +75,7 @@ build-cta-rpm: XRD_SSI_PATH: "xrootd-ssi-protobuf-interface" script: - | - if [ -z "${CI_COMMIT_TAG}" ]; then + if [[ -z "${CI_COMMIT_TAG}" ]]; then export VCS_VERSION=$CTA_BUILD_ID export CTA_VERSION=$BUILD_CTA_VERSION else @@ -85,18 +85,18 @@ build-cta-rpm: fi - export XRD_SSI_VERSION=$(git --git-dir=$XRD_SSI_PATH/.git --work-tree=$XRD_SSI_PATH describe --tags --abbrev=0) - | - if [ -z "$XRD_SSI_VERSION" ]; then + if [[ -z "$XRD_SSI_VERSION" ]]; then echo "Error: failed to find tag for submodule $XRD_SSI_PATH" exit 1 fi - BUILD_OPTIONS="" # For now we automatically skip the unit tests for the postgres scheduler. - | - if [ "${SKIP_UNIT_TESTS}" == "TRUE" ]; then + if [[ "${SKIP_UNIT_TESTS}" == "TRUE" ]]; then BUILD_OPTIONS+=" --skip-unit-tests" fi - | - if [ "$USE_INTERNAL_REPOS" == "TRUE" ]; then + if [[ "$USE_INTERNAL_REPOS" == "TRUE" ]]; then BUILD_OPTIONS+=" --use-internal-repos" fi - ./continuousintegration/build/build_rpm.sh --build-dir build_rpm diff --git a/.gitlab/ci/lint.gitlab-ci.yml b/.gitlab/ci/lint.gitlab-ci.yml index b8039cf3e78d90c0e1135f09daaaec9cd3ede0a6..d5182010a9ac4a7963e6622ecb44f56c2a3222a0 100644 --- a/.gitlab/ci/lint.gitlab-ci.yml +++ b/.gitlab/ci/lint.gitlab-ci.yml @@ -52,17 +52,17 @@ cppcheck: - exit 1 # The jobs that extend this job should override this script: - | - if [ ! -f "$FORMAT_DIFF" ]; then + if [[ ! -f "$FORMAT_DIFF" ]]; then echo "FORMAT_DIFF file not found: $FORMAT_DIFF" exit 1 fi - | - if [ $(wc -l < $FORMAT_DIFF) -le 1 ]; then + if [[ $(wc -l < $FORMAT_DIFF) -le 1 ]]; then echo "No formatting issues found" exit 0 fi - | - if [ $(wc -l < $FORMAT_DIFF) -le 1000 ]; then + if [[ $(wc -l < $FORMAT_DIFF) -le 1000 ]]; then bat --style=plain --color=always $FORMAT_DIFF else echo "Format diff too large. Please check job artifacts" @@ -115,7 +115,7 @@ format-apply: - git config user.name $GITLAB_USER_LOGIN script: - | - if [ $(ls $DIFF_DIRECTORY | wc -l) -lt 1 ]; then + if [[ $(ls $DIFF_DIRECTORY | wc -l) -lt 1 ]]; then echo "No formatting diffs found to apply" exit 0; fi diff --git a/.gitlab/ci/prepare.gitlab-ci.yml b/.gitlab/ci/prepare.gitlab-ci.yml index a8dbee8642f5b2bc29057ec6b489cfae29574a11..7e8ce1149dc4bdc6ec96e5daf8947e8fb19d4f01 100644 --- a/.gitlab/ci/prepare.gitlab-ci.yml +++ b/.gitlab/ci/prepare.gitlab-ci.yml @@ -10,7 +10,7 @@ modify-project-json: # Replace XRootD - trimmed_xrootd_version=$(echo "$CUSTOM_XROOTD_VERSION" | xargs) - | - if [ -n "$trimmed_xrootd_version" ]; then + if [[ -n "$trimmed_xrootd_version" ]]; then echo "Updating XRootD version to: $trimmed_xrootd_version." tmp_file=$(mktemp) jq --arg new_xrd_version "$trimmed_xrootd_version" \ @@ -23,7 +23,7 @@ modify-project-json: # Replace EOS versionlock - trimmed_eos_version=$(echo "$CUSTOM_EOS_VERSION" | xargs) - | - if [ -n "$trimmed_eos_version" ]; then + if [[ -n "$trimmed_eos_version" ]]; then eos_image_tag="${trimmed_eos_version}" echo "Updating EOS version to: $trimmed_eos_version." tmp_file=$(mktemp) @@ -39,12 +39,12 @@ modify-project-json: - trimmed_eos_image_tag=$(echo "$CUSTOM_EOS_IMAGE_TAG" | xargs) # If not explicitly provided, extract it from the EOS version by removing the packaging number and appending the platform - | - if [ -z "$trimmed_eos_image_tag" ] && [ -n "$trimmed_eos_version" ]; then + if [[ -z "$trimmed_eos_image_tag" ]] && [[ -n "$trimmed_eos_version" ]]; then base_eos_version="${trimmed_eos_version%-*}" trimmed_eos_image_tag="${base_eos_version}.${PLATFORM}" fi - | - if [ -n "$trimmed_eos_image_tag" ]; then + if [[ -n "$trimmed_eos_image_tag" ]]; then echo "Updating EOS image tag to: $trimmed_eos_image_tag." tmp_file=$(mktemp) jq --arg new_eos_image_tag "$trimmed_eos_image_tag" \ diff --git a/.gitlab/ci/release.gitlab-ci.yml b/.gitlab/ci/release.gitlab-ci.yml index 4d022c2b464585c4c6fe2756c90b67915603ac00..a29e29e2b2e89f25a2ef47cbcd83b7c44b460ce0 100644 --- a/.gitlab/ci/release.gitlab-ci.yml +++ b/.gitlab/ci/release.gitlab-ci.yml @@ -18,14 +18,14 @@ changelog-preview: RELEASE_VERSION: "" before_script: - | - if [ -z "${RELEASE_VERSION}" ]; then + if [[ -z "${RELEASE_VERSION}" ]]; then echo "Release version cannot be empty. Please set the RELEASE_VERSION variable manually in the CI." exit 1 fi script: # If no from reference (commit hash or tag) is provided, grab the latest tag before the current commit - | - if [ -z "${FROM_REF}" ]; then + if [[ -z "${FROM_REF}" ]]; then FROM_REF=$(git describe --tags --abbrev=0 ${TO_COMMIT}) fi - python3 continuousintegration/release/preview_changelog.py --api-url ${CI_SERVER_URL} @@ -67,7 +67,7 @@ changelog-update: - FROM_REF=${ENV_FROM_REF} - RELEASE_VERSION=${ENV_RELEASE_VERSION} - | - if [ -z "${SOURCE_BRANCH}" ]; then + if [[ -z "${SOURCE_BRANCH}" ]]; then echo "No source branch provided. Using default branch: ${CI_DEFAULT_BRANCH}" SOURCE_BRANCH=${CI_DEFAULT_BRANCH} fi diff --git a/.gitlab/ci/review.gitlab-ci.yml b/.gitlab/ci/review.gitlab-ci.yml index 1a3edbfc7952281b5062b5465aa8c2817828de13..0c3ad6336d6f2cf99e85cbe4ddfac10f75febd40 100644 --- a/.gitlab/ci/review.gitlab-ci.yml +++ b/.gitlab/ci/review.gitlab-ci.yml @@ -46,7 +46,7 @@ danger-review-gate: - when: never script: - status=$(cat danger_status.txt) - - if [ "$status" -ne 0 ]; then + - if [[ "$status" -ne 0 ]]; then echo "Danger review failed. Please check the merge request comment."; exit 1; fi diff --git a/.gitlab/ci/tests-kubernetes.gitlab-ci.yml b/.gitlab/ci/tests-kubernetes.gitlab-ci.yml index 37680d4651f32af3a4e183ea473e6f671f6eb8a0..2b02e377f9383438c01c962afd1567479fbe297b 100644 --- a/.gitlab/ci/tests-kubernetes.gitlab-ci.yml +++ b/.gitlab/ci/tests-kubernetes.gitlab-ci.yml @@ -3,20 +3,20 @@ .setup-k8s-test-variables: &setup-k8s-test-variables - | # Which scheduler config to use - if [ $SCHEDULER_TYPE == "objectstore" ]; then + if [[ $SCHEDULER_TYPE == "objectstore" ]]; then SCHEDULER_CONFIG=/opt/kubernetes/CTA/scheduler/ci-ceph-values.yaml else SCHEDULER_CONFIG=presets/dev-scheduler-postgres-values.yaml fi - | # Which catalogue config to use - if [ $ORACLE_SUPPORT == "TRUE" ]; then + if [[ $ORACLE_SUPPORT == "TRUE" ]]; then CATALOGUE_CONFIG=/opt/kubernetes/CTA/catalogue/ci-oracle-values.yaml else CATALOGUE_CONFIG=presets/dev-catalogue-postgres-values.yaml fi - | # Which image tag to use - if [ $PIPELINE_TYPE == "SYSTEM_TEST_ONLY" ]; then - if [ -z "$CUSTOM_SYSTEM_TEST_IMAGE_TAG" ]; then + if [[ $PIPELINE_TYPE == "SYSTEM_TEST_ONLY" ]]; then + if [[ -z "$CUSTOM_SYSTEM_TEST_IMAGE_TAG" ]]; then IMAGE_TAG=$(./continuousintegration/utils/get_latest_image_tag_on_branch.sh --branch $CI_DEFAULT_BRANCH) else IMAGE_TAG=$CUSTOM_SYSTEM_TEST_IMAGE_TAG @@ -26,7 +26,7 @@ fi # If we are testing from a CTA tag, get the Catalogue version for that tag - | - if [ -n "${CUSTOM_CTA_VERSION}" ]; then + if [[ -n "${CUSTOM_CTA_VERSION}" ]]; then git fetch --tags --force if git checkout "tags/$CUSTOM_CTA_VERSION" -- project.json; then catalogue_version=$(jq -r '.catalogueVersion' project.json) @@ -175,7 +175,7 @@ test-liquibase-update: before_script: - !reference [.kubernetes-test, before_script] - | - if [ "$(jq '.supportedCatalogueVersions | length' project.json)" -eq 1 ]; then + if [[ "$(jq '.supportedCatalogueVersions | length' project.json)" -eq 1 ]]; then echo "We are currently in a non-catalogue release. No update to test." exit 0 fi @@ -240,13 +240,13 @@ stress-test: script: - | EXTRA_SPAWN_ARGS="--cta-config ${STRESS_CTA_VALUES_FILE} --eos-config ${STRESS_EOS_VALUES_FILE} ${EXTRA_SPAWN_ARGS}" - if [ $PUBLISH_TELEMETRY_METRICS == "TRUE" ]; then + if [[ $PUBLISH_TELEMETRY_METRICS == "TRUE" ]]; then EXTRA_SPAWN_ARGS+=" --publish-telemetry" fi extra_args=($EXTRA_ARGS) if [[ -n "${EXTRA_SPAWN_ARGS}" ]]; then extra_args+=(--spawn-options "${EXTRA_SPAWN_ARGS}"); fi EXTRA_STRESS_TEST_OPTIONS="" - if [ "${SCHEDULER_TYPE}" == "pgsched" ]; then + if [[ "${SCHEDULER_TYPE}" == "pgsched" ]]; then SCHEDULER_CONFIG=/opt/kubernetes/CTA/scheduler/stresstest-scheduler-postgres-values.yaml EXTRA_STRESS_TEST_OPTIONS+=" -Q" fi @@ -273,7 +273,7 @@ stress-test: - ./stress_test.sh -n ${NAMESPACE} ${EXTRA_STRESS_TEST_OPTIONS} after_script: - | - if [ "${KEEP_STRESS_TEST_NAMESPACE}" == "TRUE" ]; then + if [[ "${KEEP_STRESS_TEST_NAMESPACE}" == "TRUE" ]]; then echo "Skipping deletion of stress test namespace ${NAMESPACE}" exit 0 fi diff --git a/.gitlab/ci/tests.gitlab-ci.yml b/.gitlab/ci/tests.gitlab-ci.yml index 92e5aa2fb9e194c0595bdac947c7336863af6b68..37249c937a510fba7f2f87821cd8931bcf92397d 100644 --- a/.gitlab/ci/tests.gitlab-ci.yml +++ b/.gitlab/ci/tests.gitlab-ci.yml @@ -6,7 +6,7 @@ - dnf -y --nogpgcheck install cta-release - cta-versionlock apply - | - if [ "$USE_INTERNAL_REPOS" == "TRUE" ]; then + if [[ "$USE_INTERNAL_REPOS" == "TRUE" ]]; then cp -f continuousintegration/docker/${PLATFORM}/etc/yum.repos.d-internal/*.repo /etc/yum.repos.d/ fi - dnf -y --nogpgcheck install cta-integrationtests cta-debuginfo cta-debugsource @@ -88,7 +88,7 @@ unit-test-postgresql: - echo 'postgresql:postgresql://cta@localhost/cta' > ${CTA_CATALOGUE_CONF} - /usr/bin/cta-catalogue-schema-create ${CTA_CATALOGUE_CONF} - | - if [ "$VALGRIND_UNIT_TESTS" = "TRUE" ]; then + if [[ "$VALGRIND_UNIT_TESTS" = "TRUE" ]]; then valgrind /usr/bin/cta-rdbmsUnitTests ${CTA_CATALOGUE_CONF} else /usr/bin/cta-rdbmsUnitTests ${CTA_CATALOGUE_CONF} @@ -124,7 +124,7 @@ unit-test-oracle: - echo yes | cta-catalogue-schema-drop ${CTA_CATALOGUE_CONF} - /usr/bin/cta-catalogue-schema-create ${CTA_CATALOGUE_CONF} - | - if [ "$VALGRIND_UNIT_TESTS" = "TRUE" ]; then + if [[ "$VALGRIND_UNIT_TESTS" = "TRUE" ]]; then valgrind /usr/bin/cta-rdbmsUnitTests ${CTA_CATALOGUE_CONF} else /usr/bin/cta-rdbmsUnitTests ${CTA_CATALOGUE_CONF} diff --git a/.gitlab/ci/validate.gitlab-ci.yml b/.gitlab/ci/validate.gitlab-ci.yml index 23a67b9a631a2263fe3c4654561998f6db976f24..408aefba2af60568b91d6d29662d1973170b96ee 100644 --- a/.gitlab/ci/validate.gitlab-ci.yml +++ b/.gitlab/ci/validate.gitlab-ci.yml @@ -38,7 +38,7 @@ validate-pipeline-variables: script: - cp -f continuousintegration/docker/${PLATFORM}/etc/yum.repos.d-public/*.repo /etc/yum.repos.d/ - | - if [ "$USE_INTERNAL_REPOS" == "TRUE" ]; then + if [[ "$USE_INTERNAL_REPOS" == "TRUE" ]]; then cp -f continuousintegration/docker/${PLATFORM}/etc/yum.repos.d-internal/*.repo /etc/yum.repos.d/ fi # This repo is a special case as it provides CTA itself diff --git a/continuousintegration/build/build_image.sh b/continuousintegration/build/build_image.sh index f6f3373e8cbe2ad3df274a2d126125f4ad41702a..644437c9f6753f4492125a21ec69ddd067efe42f 100755 --- a/continuousintegration/build/build_image.sh +++ b/continuousintegration/build/build_image.sh @@ -59,7 +59,7 @@ buildImage() { -h | --help) usage ;; -c | --container-runtime) if [[ $# -gt 1 ]]; then - if [ "$2" != "docker" ] && [ "$2" != "podman" ]; then + if [[ "$2" != "docker" ]] && [[ "$2" != "podman" ]]; then echo "-c | --container-runtime is \"$2\" but must be one of [docker, podman]." exit 1 fi @@ -129,17 +129,17 @@ buildImage() { shift done - if [ -z "${image_tag}" ]; then + if [[ -z "${image_tag}" ]]; then echo "Failure: Missing mandatory argument -t | --tag" usage fi - if [ -z "${rpm_src}" ]; then + if [[ -z "${rpm_src}" ]]; then echo "Failure: Missing mandatory argument -s | --rpm-src" usage fi - if [ -z "${rpm_version}" ]; then + if [[ -z "${rpm_version}" ]]; then echo "Failure: Missing mandatory argument --rpm-version" usage fi @@ -167,7 +167,7 @@ buildImage() { # Clean up again rm -rf "${rpm_default_src}" - if [ "$load_into_minikube" == "true" ]; then + if [[ "$load_into_minikube" == "true" ]]; then # This step is necessary because atm the container runtime and minikube don't share the same docker runtime and local registry tmpfile=$(mktemp) && trap 'rm -f $tmpfile' EXIT ${container_runtime} save -o $tmpfile localhost/${image_name}:${image_tag} >/dev/null 2>&1 diff --git a/continuousintegration/build/build_local.sh b/continuousintegration/build/build_local.sh index a29322d0b02238f8acf0272de47b870a953031d2..34e33ca0bc45c5fb60a7cab1474ea89f431ba68c 100755 --- a/continuousintegration/build/build_local.sh +++ b/continuousintegration/build/build_local.sh @@ -99,7 +99,7 @@ build_local() { ;; --cmake-build-type) if [[ $# -gt 1 ]]; then - if [ "$2" != "Release" ] && [ "$2" != "Debug" ] && [ "$2" != "RelWithDebInfo" ] && [ "$2" != "MinSizeRel" ]; then + if [[ "$2" != "Release" ]] && [[ "$2" != "Debug" ]] && [[ "$2" != "RelWithDebInfo" ]] && [[ "$2" != "MinSizeRel" ]]; then echo "--cmake-build-type is \"$2\" but must be one of [Release, Debug, RelWithDebInfo, or MinSizeRel]." exit 1 fi @@ -130,14 +130,14 @@ build_local() { # Navigate to repo root cd "${project_root}" - if [ ${skip_srpms} = false ]; then + if [[ ${skip_srpms} = false ]]; then echo "Building SRPMs..." local build_srpm_flags="" if [[ ${clean_build_dirs} = true ]]; then build_srpm_flags+=" --clean-build-dir" fi if [[ ${install} = true ]]; then - if [ ${install_platform} == "el9" ]; then + if [[ ${install_platform} == "el9" ]]; then dnf install -y epel-release almalinux-release-devel git python3-dnf-plugin-versionlock dnf install -y gcc gcc-c++ cmake3 rpm-build dnf-utils pandoc which make ninja-build ccache systemd-devel else @@ -161,10 +161,10 @@ build_local() { local build_rpm_flags="--jobs ${num_jobs}" - if [ ${skip_cmake} = true ]; then + if [[ ${skip_cmake} = true ]]; then build_rpm_flags+=" --skip-cmake" fi - if [ ${skip_unit_tests} = true ]; then + if [[ ${skip_unit_tests} = true ]]; then build_rpm_flags+=" --skip-unit-tests" fi if [[ ! ${cmake_build_type} = "" ]]; then diff --git a/continuousintegration/build/build_rpm.sh b/continuousintegration/build/build_rpm.sh index e3eba51f6da6297671e8b3a9ccffee5e6fa5bb2c..f87351936ce127ccd2ebbd034ed776f479643f38 100755 --- a/continuousintegration/build/build_rpm.sh +++ b/continuousintegration/build/build_rpm.sh @@ -86,7 +86,7 @@ build_rpm() { ;; --platform) if [[ $# -gt 1 ]]; then - if [ "$(jq --arg platform "$2" '.platforms | has($platform)' "$project_root/project.json")" != "true" ]; then + if [[ "$(jq --arg platform "$2" '.platforms | has($platform)' "$project_root/project.json")" != "true" ]]; then echo "Error: platform $2 not supported. Please check the project.json for supported platforms." fi platform="$2" @@ -98,7 +98,7 @@ build_rpm() { ;; --build-generator) if [[ $# -gt 1 ]]; then - if [ "$2" != "Ninja" ] && [ "$2" != "Unix Makefiles" ]; then + if [[ "$2" != "Ninja" ]] && [[ "$2" != "Unix Makefiles" ]]; then echo "Warning: build generator $2 is not officially supported. Compilation might not be successful." fi build_generator="$2" @@ -122,7 +122,7 @@ build_rpm() { ;; --scheduler-type) if [[ $# -gt 1 ]]; then - if [ "$2" != "objectstore" ] && [ "$2" != "pgsched" ]; then + if [[ "$2" != "objectstore" ]] && [[ "$2" != "pgsched" ]]; then echo "Error: scheduler type $2 is not one of [objectstore, pgsched]." exit 1 fi @@ -177,7 +177,7 @@ build_rpm() { --skip-unit-tests) skip_unit_tests=true ;; --oracle-support) if [[ $# -gt 1 ]]; then - if [ "$2" = "FALSE" ]; then + if [[ "$2" = "FALSE" ]]; then oracle_support=false fi shift @@ -188,7 +188,7 @@ build_rpm() { ;; --cmake-build-type) if [[ $# -gt 1 ]]; then - if [ "$2" != "Release" ] && [ "$2" != "Debug" ] && [ "$2" != "RelWithDebInfo" ] && [ "$2" != "MinSizeRel" ]; then + if [[ "$2" != "Release" ]] && [[ "$2" != "Debug" ]] && [[ "$2" != "RelWithDebInfo" ]] && [[ "$2" != "MinSizeRel" ]]; then echo "--cmake-build-type is \"$2\" but must be one of [Release, Debug, RelWithDebInfo, or MinSizeRel]." exit 1 fi @@ -207,42 +207,42 @@ build_rpm() { shift done - if [ -z "${build_dir}" ]; then + if [[ -z "${build_dir}" ]]; then echo "Failure: Missing mandatory argument --build-dir" usage fi - if [ -z "${scheduler_type}" ]; then + if [[ -z "${scheduler_type}" ]]; then echo "Failure: Missing mandatory argument --scheduler-type" usage fi - if [ -z "${srpm_dir}" ]; then + if [[ -z "${srpm_dir}" ]]; then echo "Failure: Missing mandatory argument --srpm-dir" usage fi - if [ -z "${vcs_version}" ]; then + if [[ -z "${vcs_version}" ]]; then echo "Failure: Missing mandatory argument --vcs-version" usage fi - if [ -z "${build_generator}" ]; then + if [[ -z "${build_generator}" ]]; then echo "Failure: Missing mandatory argument --build-generator" usage fi - if [ -z "${xrootd_ssi_version}" ]; then + if [[ -z "${xrootd_ssi_version}" ]]; then echo "Failure: Missing mandatory argument --xrootd-ssi-version" usage fi - if [ -z "${cmake_build_type}" ]; then + if [[ -z "${cmake_build_type}" ]]; then echo "Failure: Missing mandatory argument --cmake-build-type" usage fi - if [ -z "${platform}" ]; then + if [[ -z "${platform}" ]]; then echo "Failure: Missing mandatory argument --platform" usage fi @@ -257,17 +257,17 @@ build_rpm() { if [[ ${create_build_dir} = true ]]; then mkdir -p "${build_dir}" - elif [ ! -d "${build_dir}" ]; then + elif [[ ! -d "${build_dir}" ]]; then echo "Build directory ${build_dir} does not exist. Please create it and execute the script again, or run the script with the --create-build-dir option.." exit 1 fi - if [ -d "${build_dir}" ] && [ "$(ls -A "${build_dir}")" ]; then + if [[ -d "${build_dir}" ]] && [[ "$(ls -A "${build_dir}")" ]]; then echo "WARNING: build directory ${build_dir} is not empty" fi # Setup - if [ "${install_srpms}" = true ]; then + if [[ "${install_srpms}" = true ]]; then ./continuousintegration/project-json/generate_versionlock.py --platform ${platform} >/etc/yum/pluginconf.d/versionlock.list cp -f continuousintegration/docker/${platform}/etc/yum.repos.d-public/*.repo /etc/yum.repos.d/ if [[ ${use_internal_repos} = true ]]; then @@ -280,7 +280,7 @@ build_rpm() { # Cmake export CTA_VERSION=${cta_version} - if [ "${skip_cmake}" = false ]; then + if [[ "${skip_cmake}" = false ]]; then # Needs to be exported as cmake gets it from the environment export XROOTD_SSI_PROTOBUF_INTERFACE_VERSION=${xrootd_ssi_version} @@ -345,7 +345,7 @@ build_rpm() { ) else echo "Skipping cmake..." - if [ ! -d "${build_dir}" ]; then + if [[ ! -d "${build_dir}" ]]; then echo "${build_dir}/ directory does not exist. Ensure to run this script without skipping cmake first." fi cd "${build_dir}" diff --git a/continuousintegration/build/build_srpm.sh b/continuousintegration/build/build_srpm.sh index 101481241b16a06391821e6022354cfc31f50fa2..50879a00cc9be1b34359117d8c36b35763d95bd3 100755 --- a/continuousintegration/build/build_srpm.sh +++ b/continuousintegration/build/build_srpm.sh @@ -68,7 +68,7 @@ build_srpm() { ;; --build-generator) if [[ $# -gt 1 ]]; then - if [ "$2" != "Ninja" ] && [ "$2" != "Unix Makefiles" ]; then + if [[ "$2" != "Ninja" ]] && [[ "$2" != "Unix Makefiles" ]]; then echo "Warning: build generator $2 is not officially supported. Compilation might not be successful." fi build_generator="$2" @@ -91,7 +91,7 @@ build_srpm() { ;; --scheduler-type) if [[ $# -gt 1 ]]; then - if [ "$2" != "objectstore" ] && [ "$2" != "pgsched" ]; then + if [[ "$2" != "objectstore" ]] && [[ "$2" != "pgsched" ]]; then echo "Error: scheduler type $2 is not one of [objectstore, pgsched]." exit 1 fi @@ -122,7 +122,7 @@ build_srpm() { ;; --oracle-support) if [[ $# -gt 1 ]]; then - if [ "$2" = "FALSE" ]; then + if [[ "$2" = "FALSE" ]]; then oracle_support=false fi shift @@ -130,7 +130,7 @@ build_srpm() { ;; --cmake-build-type) if [[ $# -gt 1 ]]; then - if [ "$2" != "Release" ] && [ "$2" != "Debug" ] && [ "$2" != "RelWithDebInfo" ] && [ "$2" != "MinSizeRel" ]; then + if [[ "$2" != "Release" ]] && [[ "$2" != "Debug" ]] && [[ "$2" != "RelWithDebInfo" ]] && [[ "$2" != "MinSizeRel" ]]; then echo "--cmake-build-type is \"$2\" but must be one of [Release, Debug, RelWithDebInfo, or MinSizeRel]." exit 1 fi @@ -149,27 +149,27 @@ build_srpm() { shift done - if [ -z "${build_dir}" ]; then + if [[ -z "${build_dir}" ]]; then echo "Failure: Missing mandatory argument --build-dir" usage fi - if [ -z "${scheduler_type}" ]; then + if [[ -z "${scheduler_type}" ]]; then echo "Failure: Missing mandatory argument --scheduler-type" usage fi - if [ -z "${vcs_version}" ]; then + if [[ -z "${vcs_version}" ]]; then echo "Failure: Missing mandatory argument --vcs-version" usage fi - if [ -z "${build_generator}" ]; then + if [[ -z "${build_generator}" ]]; then echo "Failure: Missing mandatory argument --build-generator" usage fi - if [ -z "${cmake_build_type}" ]; then + if [[ -z "${cmake_build_type}" ]]; then echo "Failure: Missing mandatory argument --cmake-build-type" usage fi @@ -184,12 +184,12 @@ build_srpm() { if [[ ${create_build_dir} = true ]]; then mkdir -p "${build_dir}" - elif [ ! -d "${build_dir}" ]; then + elif [[ ! -d "${build_dir}" ]]; then echo "Build directory ${build_dir} does not exist. Please create it and execute the script again, or run the script with the --create-build-dir option.." exit 1 fi - if [ -d "${build_dir}" ] && [ "$(ls -A "${build_dir}")" ]; then + if [[ -d "${build_dir}" ]] && [[ "$(ls -A "${build_dir}")" ]]; then echo "WARNING: build directory ${build_dir} is not empty" fi diff --git a/continuousintegration/build/development/cta/alma9/prepare_ctadev_image.sh b/continuousintegration/build/development/cta/alma9/prepare_ctadev_image.sh index 37a44d8b4d17ed48aecd692e4fdc650f6d5fcb82..de54ed04c08fd71a83b5c019b2f23e2dd387f45c 100755 --- a/continuousintegration/build/development/cta/alma9/prepare_ctadev_image.sh +++ b/continuousintegration/build/development/cta/alma9/prepare_ctadev_image.sh @@ -16,7 +16,7 @@ # submit itself to any jurisdiction. image_tag="dev" -if [ -n "$1" ]; then +if [[ -n "$1" ]]; then version_arg="--build-arg CTA_VERSION=$1" image_tag="${image_tag}-$1" fi diff --git a/continuousintegration/build/development/cta/alma9/start_ctadev.sh b/continuousintegration/build/development/cta/alma9/start_ctadev.sh index e74e077a57b0c8d904cfde11f1db475c177797f8..cb1deeab288bc0c895a160f3cab86ffabfec444f 100755 --- a/continuousintegration/build/development/cta/alma9/start_ctadev.sh +++ b/continuousintegration/build/development/cta/alma9/start_ctadev.sh @@ -28,7 +28,7 @@ while getopts "p:v:" o; do done shift $((OPTIND-1)) -if [ -z "$1" ]; then +if [[ -z "$1" ]]; then usage fi diff --git a/continuousintegration/build/development/eos/alma9/prepare_eosdev_image.sh b/continuousintegration/build/development/eos/alma9/prepare_eosdev_image.sh index 9ec969e4fe898b09fdd7403b844bf5f463903b1d..2b20988ae77dc920ef06a3fb759b7b69f2e52c7b 100755 --- a/continuousintegration/build/development/eos/alma9/prepare_eosdev_image.sh +++ b/continuousintegration/build/development/eos/alma9/prepare_eosdev_image.sh @@ -16,7 +16,7 @@ # submit itself to any jurisdiction. image_tag="dev" -if [ -n "$1" ]; then +if [[ -n "$1" ]]; then version_arg="--build-arg EOS_VERSION=$1" image_tag="${image_tag}-$1" fi diff --git a/continuousintegration/build/development/eos/alma9/start_eosdev.sh b/continuousintegration/build/development/eos/alma9/start_eosdev.sh index e74e077a57b0c8d904cfde11f1db475c177797f8..cb1deeab288bc0c895a160f3cab86ffabfec444f 100755 --- a/continuousintegration/build/development/eos/alma9/start_eosdev.sh +++ b/continuousintegration/build/development/eos/alma9/start_eosdev.sh @@ -28,7 +28,7 @@ while getopts "p:v:" o; do done shift $((OPTIND-1)) -if [ -z "$1" ]; then +if [[ -z "$1" ]]; then usage fi diff --git a/continuousintegration/build_deploy.sh b/continuousintegration/build_deploy.sh index 5e1f937a0ac3327007e51b75545365a68c05e08b..e183a894f3d99aadeaf879ea28158acb0a6edfd8 100755 --- a/continuousintegration/build_deploy.sh +++ b/continuousintegration/build_deploy.sh @@ -152,7 +152,7 @@ build_deploy() { ;; --platform) if [[ $# -gt 1 ]]; then - if [ "$(jq --arg platform "$2" '.platforms | has($platform)' "$project_root/project.json")" != "true" ]; then + if [[ "$(jq --arg platform "$2" '.platforms | has($platform)' "$project_root/project.json")" != "true" ]]; then echo "Error: platform $2 not supported. Please check the project.json for supported platforms." fi platform="$2" @@ -173,7 +173,7 @@ build_deploy() { ;; --cmake-build-type) if [[ $# -gt 1 ]]; then - if [ "$2" != "Release" ] && [ "$2" != "Debug" ] && [ "$2" != "RelWithDebInfo" ] && [ "$2" != "MinSizeRel" ]; then + if [[ "$2" != "Release" ]] && [[ "$2" != "Debug" ]] && [[ "$2" != "RelWithDebInfo" ]] && [[ "$2" != "MinSizeRel" ]]; then echo "--cmake-build-type is \"$2\" but must be one of [Release, Debug, RelWithDebInfo, or MinSizeRel]." exit 1 fi @@ -186,7 +186,7 @@ build_deploy() { ;; -c | --container-runtime) if [[ $# -gt 1 ]]; then - if [ "$2" != "docker" ] && [ "$2" != "podman" ]; then + if [[ "$2" != "docker" ]] && [[ "$2" != "podman" ]]; then echo "-c | --container-runtime is \"$2\" but must be one of [docker, podman]." exit 1 fi @@ -292,10 +292,10 @@ build_deploy() { ##################################################################################################################### # Build binaries/RPMs ##################################################################################################################### - if [ "${skip_build}" = false ]; then + if [[ "${skip_build}" = false ]]; then build_image_name="cta-build-image-${platform}" # Stop and remove existing container if reset is requested - if [ "${reset}" = true ]; then + if [[ "${reset}" = true ]]; then echo "Shutting down existing build container..." ${container_runtime} rm -f "${build_container_name}" >/dev/null 2>&1 || true podman rmi ${build_image_name} > /dev/null 2>&1 || true @@ -339,14 +339,14 @@ build_deploy() { local build_rpm_flags="" - if [ ${restarted} = true ] || [ ${force_install} = true ]; then + if [[ ${restarted} = true ]] || [[ ${force_install} = true ]]; then # Only install srpms if it is the first time running this or if the install is forced build_rpm_flags+=" --install-srpms" - elif [ ${skip_cmake} = true ]; then + elif [[ ${skip_cmake} = true ]]; then # It should only be possible to skip cmake if the pod was not restarted build_rpm_flags+=" --skip-cmake" fi - if [ ${skip_unit_tests} = true ]; then + if [[ ${skip_unit_tests} = true ]]; then build_rpm_flags+=" --skip-unit-tests" fi if [[ ${clean_build_dir} = true || ${clean_build_dirs} = true ]]; then @@ -390,23 +390,23 @@ build_deploy() { # Build image ##################################################################################################################### build_iteration_file=/tmp/.build_iteration - if [ "$skip_image_reload" == "false" ]; then + if [[ "$skip_image_reload" == "false" ]]; then print_header "BUILDING CONTAINER IMAGE" - if [ "$upgrade_cta" == "false" ]; then + if [[ "$upgrade_cta" == "false" ]]; then # Start with the tag dev-0 local current_build_id=0 image_tag="dev-$current_build_id" touch $build_iteration_file echo $current_build_id >$build_iteration_file - if [ ${image_cleanup} = true ]; then + if [[ ${image_cleanup} = true ]]; then # When deploying an entirely new instance, this is a nice time to clean up old images echo "Cleaning up unused ctageneric images..." minikube image ls | grep "localhost/ctageneric:dev" | xargs -r minikube image rm >/dev/null 2>&1 fi else # This continuoully increments the image tag from previous upgrades - if [ ! -f "$build_iteration_file" ]; then + if [[ ! -f "$build_iteration_file" ]]; then echo "Failed to find $build_iteration_file to retrieve build iteration." exit 1 fi @@ -427,13 +427,13 @@ build_deploy() { --container-runtime "${container_runtime}" \ --load-into-minikube \ ${extra_image_build_options} - if [ ${image_cleanup} = true ]; then + if [[ ${image_cleanup} = true ]]; then # Pruning of unused images is done after image building to ensure we maintain caching podman image ls | grep ctageneric | grep -v "${image_tag}" | awk '{ print "localhost/ctageneric:" $2 }' | xargs -r podman rmi || true podman image prune -f >/dev/null fi else - if [ ! -f "$build_iteration_file" ]; then + if [[ ! -f "$build_iteration_file" ]]; then echo "Failed to find $build_iteration_file to retrieve build iteration. Unable to identify which image to spawn/upgrade the instance with." exit 1 fi @@ -444,16 +444,16 @@ build_deploy() { ##################################################################################################################### # Deploy CTA instance ##################################################################################################################### - if [ ${skip_deploy} = false ]; then - if [ "$upgrade_cta" = true ]; then + if [[ ${skip_deploy} = false ]]; then + if [[ "$upgrade_cta" = true ]]; then print_header "UPGRADING CTA INSTANCE" cd continuousintegration/orchestration upgrade_options="" - if [ "$skip_image_reload" == "false" ]; then + if [[ "$skip_image_reload" == "false" ]]; then upgrade_options+=" --cta-image-repository localhost/ctageneric --cta-image-tag ${image_tag}" fi ./upgrade_cta_instance.sh --namespace ${deploy_namespace} ${upgrade_options} ${extra_spawn_options} - elif [ "$upgrade_eos" = true ]; then + elif [[ "$upgrade_eos" = true ]]; then print_header "UPGRADING EOS INSTANCE" cd continuousintegration/orchestration ./deploy_eos_instance.sh --namespace ${deploy_namespace} --eos-image-tag ${eos_image_tag} @@ -463,30 +463,30 @@ build_deploy() { # and polutes the dev machine ./continuousintegration/orchestration/delete_instance.sh -n ${deploy_namespace} --discard-logs print_header "DEPLOYING CTA INSTANCE" - if [ -n "${tapeservers_config}" ]; then + if [[ -n "${tapeservers_config}" ]]; then extra_spawn_options+=" --tapeservers-config ${tapeservers_config}" fi - if [ -n "${eos_image_tag}" ]; then + if [[ -n "${eos_image_tag}" ]]; then extra_spawn_options+=" --eos-image-tag ${eos_image_tag}" fi - if [ -n "${eos_config}" ]; then + if [[ -n "${eos_config}" ]]; then extra_spawn_options+=" --eos-config ${eos_config}" fi - if [ -n "${cta_config}" ]; then + if [[ -n "${cta_config}" ]]; then extra_spawn_options+=" --cta-config ${cta_config}" fi - if [ "$local_telemetry" = true ]; then + if [[ "$local_telemetry" = true ]]; then extra_spawn_options+=" --local-telemetry" fi - if [ "$publish_telemetry" = true ]; then + if [[ "$publish_telemetry" = true ]]; then extra_spawn_options+=" --publish-telemetry" fi - if [ -z "${scheduler_config}" ]; then - if [ "${scheduler_type}" == "pgsched" ]; then + if [[ -z "${scheduler_config}" ]]; then + if [[ "${scheduler_type}" == "pgsched" ]]; then scheduler_config="presets/dev-scheduler-postgres-values.yaml" else scheduler_config="presets/dev-scheduler-vfs-values.yaml" diff --git a/continuousintegration/checks/check_catalogue_schema_version.sh b/continuousintegration/checks/check_catalogue_schema_version.sh index 8f980b4e693e0ca3c4df901ff3e315a70fc16a6f..0beceea4f9d391c70df0284b013a51da8ee30706 100755 --- a/continuousintegration/checks/check_catalogue_schema_version.sh +++ b/continuousintegration/checks/check_catalogue_schema_version.sh @@ -56,7 +56,7 @@ while getopts "pnt" o; do done shift $((OPTIND - 1)) -if [ $# -ne 1 ]; then +if [[ $# -ne 1 ]]; then usage fi @@ -67,7 +67,7 @@ if ! test -d ${cta_repo_dir}; then fi cta_repo_dir=$(readlink -f $cta_repo_dir) -if [ "$check_pivot_release" -eq "1" ] && [ "$check_non_pivot_release" -eq "1" ]; then +if [[ "$check_pivot_release" -eq "1" ]] && [[ "$check_non_pivot_release" -eq "1" ]]; then echo "Error: Can't set both '-p' and '-c'" usage fi @@ -100,7 +100,7 @@ CTA_PROJECT_NUM_SUPPORTED_VERSIONS=$(jq '.supportedCatalogueVersions | length' $ echo "Checking..." # Always check that the CTA catalogue schema version is the same in both the main CTA project and the 'cta-catalogue-schema' submodule echo -n "- CTA catalogue schema version is the same in the project.json and 'cta-catalogue-schema' submodule: " -if [ "$CTA_PROJECT_CATALOGUE_VERSION" != "${CTA_SUB_REPO__CATALOGUE_MAJOR_VERSION}.${CTA_SUB_REPO__CATALOGUE_MINOR_VERSION}" ]; then +if [[ "$CTA_PROJECT_CATALOGUE_VERSION" != "${CTA_SUB_REPO__CATALOGUE_MAJOR_VERSION}.${CTA_SUB_REPO__CATALOGUE_MINOR_VERSION}" ]]; then error="${error}CTA catalogue schema version is not the same in the project.json and the 'cta-catalogue-schema' submodule. ${CTA_PROJECT_CATALOGUE_VERSION} vs ${CTA_SUB_REPO__CATALOGUE_MAJOR_VERSION}.${CTA_SUB_REPO__CATALOGUE_MINOR_VERSION}\n" echo "FAIL" @@ -116,7 +116,7 @@ else fi # [Optional] Check that the 'cta-catalogue-schema' submodule version is tagged -if [ "$check_catalogue_submodule_tags" -eq "1" ]; then +if [[ "$check_catalogue_submodule_tags" -eq "1" ]]; then echo -n "- CTA catalogue schema version is tagged in the 'cta-catalogue-schema' submodule commit: " if test 0 == $(echo $CTA_SUB_REPO__TAGS | grep $CTA_PROJECT_CATALOGUE_VERSION | wc -l); then error="${error}The 'cta-catalogue-schema' submodule commit does not contain a tag for CTA catalogue schema version ${CTA_PROJECT_CATALOGUE_VERSION}.\n" @@ -128,9 +128,9 @@ fi # [Optional] Non-pivot release check # - Can only support 1 catalogue schema version -if [ "$check_non_pivot_release" -eq "1" ]; then +if [[ "$check_non_pivot_release" -eq "1" ]]; then echo -n "- CTA release is not pivot (single catalogue version supported): " - if [ "$CTA_PROJECT_NUM_SUPPORTED_VERSIONS" -ne "1" ]; then + if [[ "$CTA_PROJECT_NUM_SUPPORTED_VERSIONS" -ne "1" ]]; then error="${error}Non-pivot CTA release should only support 1 catalogue schema version. It currently supports multiple versions {${CTA_PROJECT_SUPPORTED_CATALOGUE_VERSIONS}}.\n" echo "FAIL" else @@ -142,9 +142,9 @@ fi # - Supports 2+ catalogue schema versions # - 'supportedCatalogueVersions' is a list of previous versions that can be migrated to 'CTA_CATALOGUE_REF_SCHEMA_VERSION_CURR' # - 'CTA_PROJECT_PREV_CATALOGUE_VERSIONS' values must be smaller than 'CTA_PROJECT_CATALOGUE_MAJOR_VERSION' -if [ "$check_pivot_release" -eq "1" ]; then +if [[ "$check_pivot_release" -eq "1" ]]; then echo -n "- CTA release is pivot (multiple catalogue versions supported): " - if [ "$CTA_PROJECT_NUM_SUPPORTED_VERSIONS" -le "1" ]; then + if [[ "$CTA_PROJECT_NUM_SUPPORTED_VERSIONS" -le "1" ]]; then error="${error}Pivot CTA release should support more than 1 catalogue schema versions in the project.json.\n" echo "FAIL" else @@ -152,7 +152,7 @@ if [ "$check_pivot_release" -eq "1" ]; then fi echo "- All supported releases are less than or equal to the current version: " for PREV_VERSION in $(echo $CTA_PROJECT_PREV_CATALOGUE_VERSIONS); do - if [ "$(printf '%s\n' "$PREV_VERSION" "$CTA_PROJECT_CATALOGUE_VERSION" | sort -V | head -n1)" != "$PREV_VERSION" ]; then + if [[ "$(printf '%s\n' "$PREV_VERSION" "$CTA_PROJECT_CATALOGUE_VERSION" | sort -V | head -n1)" != "$PREV_VERSION" ]]; then error="${error}Previous CTA catalogue schema version ${PREV_VERSION} is equal or greater than current version ${CTA_PROJECT_CATALOGUE_VERSION}.\n" echo " - FAIL(${PREV_VERSION}->${CTA_PROJECT_CATALOGUE_VERSION}) " else @@ -168,18 +168,18 @@ for PREV_VERSION in $(echo $CTA_PROJECT_PREV_CATALOGUE_VERSIONS); do migration_script_name_1="${PREV_VERSION}to${CURR_VERSION}.sql" migration_script_name_2="${PREV_VERSION}.0to${CURR_VERSION}.0.sql" migration_scripts_dir="${cta_repo_dir}/catalogue/cta-catalogue-schema/migrations/liquibase" - if [ "$PREV_VERSION" -eq "$CURR_VERSION" ]; then + if [[ "$PREV_VERSION" -eq "$CURR_VERSION" ]]; then # Skip if versions are the same echo " - No script needed for non-pivot release: OK" continue fi - if [ ! -f "${migration_scripts_dir}/oracle/${migration_script_name_1}" ] && [ ! -f "${migration_scripts_dir}/oracle/${migration_script_name_2}" ]; then + if [[ ! -f "${migration_scripts_dir}/oracle/${migration_script_name_1}" ]] && [[ ! -f "${migration_scripts_dir}/oracle/${migration_script_name_2}" ]]; then error="${error}Missing oracle migration script from CTA catalogue schema version ${PREV_VERSION} to ${CURR_VERSION}.\n" echo " - Oracle(${PREV_VERSION}->${CURR_VERSION}): FAIL" else echo " - Oracle(${PREV_VERSION}->${CURR_VERSION}): OK" fi - if [ ! -f "${migration_scripts_dir}/postgres/${migration_script_name_1}" ] && [ ! -f "${migration_scripts_dir}/postgres/${migration_script_name_2}" ]; then + if [[ ! -f "${migration_scripts_dir}/postgres/${migration_script_name_1}" ]] && [[ ! -f "${migration_scripts_dir}/postgres/${migration_script_name_2}" ]]; then error="${error}Missing postgres migration script from CTA catalogue schema version ${PREV_VERSION} to ${CURR_VERSION}.\n" echo " - Postgres(${PREV_VERSION}->${CURR_VERSION}): FAIL" else @@ -188,7 +188,7 @@ for PREV_VERSION in $(echo $CTA_PROJECT_PREV_CATALOGUE_VERSIONS); do done # Fail if there were error... -if [ -n "${error}" ]; then +if [[ -n "${error}" ]]; then echo -e "Errors:\n${error}" exit 1 fi diff --git a/continuousintegration/checks/check_runner_healthy.sh b/continuousintegration/checks/check_runner_healthy.sh index 356dce0082a415a8fd2bf5e0113006ebdcd9e179..3572d84ab80b01452c1b2efbc1241651f2a42625 100755 --- a/continuousintegration/checks/check_runner_healthy.sh +++ b/continuousintegration/checks/check_runner_healthy.sh @@ -84,7 +84,7 @@ else fi echo -if [ "${errors}" -gt 0 ]; then +if [[ "${errors}" -gt 0 ]]; then echo "FAILURE: not all conditions were satisfied. The runner is not configured correctly" exit 1 fi diff --git a/continuousintegration/deploy.sh b/continuousintegration/deploy.sh index c4a670f3b2038e3a49dbb201196dd11b686f9d1f..9cba6c1ca85972a53a5c2fb4c60e4a49edd2ebba 100755 --- a/continuousintegration/deploy.sh +++ b/continuousintegration/deploy.sh @@ -139,7 +139,7 @@ deploy() { shift done - if [ -z "${cta_image_tag}" ]; then + if [[ -z "${cta_image_tag}" ]]; then echo "Missing mandatory argument: --cta-image-tag" usage fi @@ -153,23 +153,23 @@ deploy() { # and polutes the dev machine ./continuousintegration/orchestration/delete_instance.sh -n ${deploy_namespace} --discard-logs print_header "DEPLOYING CTA INSTANCE" - if [ -n "${tapeservers_config}" ]; then + if [[ -n "${tapeservers_config}" ]]; then extra_spawn_options+=" --tapeservers-config ${tapeservers_config}" fi - if [ -n "${eos_image_tag}" ]; then + if [[ -n "${eos_image_tag}" ]]; then extra_spawn_options+=" --eos-image-tag ${eos_image_tag}" fi - if [ -n "${eos_config}" ]; then + if [[ -n "${eos_config}" ]]; then extra_spawn_options+=" --eos-config ${eos_config}" fi - if [ -n "${cta_config}" ]; then + if [[ -n "${cta_config}" ]]; then extra_spawn_options+=" --cta-config ${cta_config}" fi - if [ "$local_telemetry" = true ]; then + if [[ "$local_telemetry" = true ]]; then extra_spawn_options+=" --local-telemetry" fi diff --git a/continuousintegration/orchestration/create_instance.sh b/continuousintegration/orchestration/create_instance.sh index dc77461241cf0b25eb32663503fb9235ebe96be7..63e6cbf0b6d8210e84f82a324f7fbbe3bbfae4a8 100755 --- a/continuousintegration/orchestration/create_instance.sh +++ b/continuousintegration/orchestration/create_instance.sh @@ -194,24 +194,24 @@ create_instance() { done # Argument checks - if [ -z "${namespace}" ]; then + if [[ -z "${namespace}" ]]; then echo "Missing mandatory argument: -n | --namespace" usage fi - if [ -z "${cta_image_tag}" ]; then + if [[ -z "${cta_image_tag}" ]]; then echo "Missing mandatory argument: -i | --cta-image-tag" usage fi - if [ -z "${catalogue_schema_version}" ]; then + if [[ -z "${catalogue_schema_version}" ]]; then echo "No catalogue schema version provided: using project.json value" catalogue_schema_version=$(jq .catalogueVersion ${project_json_path}) fi - if [ "$local_telemetry" == "true" ] && [ "$publish_telemetry" == "true" ]; then + if [[ "$local_telemetry" == "true" ]] && [[ "$publish_telemetry" == "true" ]]; then die "--local-telemetry and --publish-telemetry cannot be active at the same time" fi - if [ $dry_run == 1 ]; then + if [[ $dry_run == 1 ]]; then helm_command="template --debug" else helm_command="install" @@ -223,7 +223,7 @@ create_instance() { echo "Catalogue content will be kept" fi - if [ "$reset_scheduler" == "true" ]; then + if [[ "$reset_scheduler" == "true" ]]; then echo "scheduler data store content will be reset" else echo "scheduler data store content will be kept" @@ -234,12 +234,12 @@ create_instance() { devices_all=$(./../utils/tape/list_all_libraries.sh) devices_in_use=$(kubectl get all --all-namespaces -l cta/library-device -o jsonpath='{.items[*].metadata.labels.cta/library-device}' | tr ' ' '\n' | sort | uniq) unused_devices=$(comm -23 <(echo "$devices_all") <(echo "$devices_in_use")) - if [ -z "$unused_devices" ]; then + if [[ -z "$unused_devices" ]]; then die "No unused library devices available. All the following libraries are in use: $devices_in_use" fi # Determine the library config to use - if [ -z "${tapeservers_config}" ]; then + if [[ -z "${tapeservers_config}" ]]; then echo "Library configuration not provided. Auto-generating..." # This file is cleaned up again by delete_instance.sh tapeservers_config=$(mktemp "/tmp/${namespace}-tapeservers-XXXXXX-values.yaml") @@ -262,7 +262,7 @@ create_instance() { cat "$tapeservers_config" echo "---" - if [ "$scheduler_config" == "presets/dev-scheduler-vfs-values.yaml" ]; then + if [[ "$scheduler_config" == "presets/dev-scheduler-vfs-values.yaml" ]]; then if kubectl get pods -n local-path-storage -l app=local-path-provisioner 2>/dev/null | grep -q Running; then echo "Local path provisioning is enabled. Using VFS scheduler is okay." else @@ -384,10 +384,10 @@ create_instance() { wait $scheduler_pid || exit 1 extra_cta_chart_flags="" - if [ "$local_telemetry" == "true" ]; then + if [[ "$local_telemetry" == "true" ]]; then extra_cta_chart_flags+="--values presets/dev-cta-telemetry-values.yaml" fi - if [ "$publish_telemetry" == "true" ]; then + if [[ "$publish_telemetry" == "true" ]]; then extra_cta_chart_flags+="--values presets/ci-cta-telemetry-http-values.yaml" fi @@ -408,7 +408,7 @@ create_instance() { if [ $dcache_enabled == "true" ] ; then wait $dcache_pid || exit 1 fi - if [ $dry_run == 1 ]; then + if [[ $dry_run == 1 ]]; then exit 0 fi } diff --git a/continuousintegration/orchestration/delete_instance.sh b/continuousintegration/orchestration/delete_instance.sh index 084162ccbb048ef0e314078bdba120d582a69cb0..caf35ec5c814cd071980863a56b242b2618ba9c4 100755 --- a/continuousintegration/orchestration/delete_instance.sh +++ b/continuousintegration/orchestration/delete_instance.sh @@ -103,7 +103,7 @@ save_logs() { rm -rf "${tmpdir}/varlogs" # Save artifacts if running in CI - if [ -n "${CI_PIPELINE_ID}" ]; then + if [[ -n "${CI_PIPELINE_ID}" ]]; then echo "Saving logs as artifacts" # Note that this directory must be in the repository so that they can be properly saved as artifacts mkdir -p "../../pod_logs/${namespace}" @@ -125,13 +125,13 @@ reclaim_released_pvs() { echo "Processing PV: $pv" path=$(kubectl get pv "$pv" -o jsonpath='{.spec.local.path}') - if [ -z "$path" ]; then + if [[ -z "$path" ]]; then echo " Skipping: no local path found (not a local volume?)" continue fi echo " Found path: $path" - if [ -d "$path" ]; then + if [[ -d "$path" ]]; then echo " Wiping contents of $path" # We need sudo here as files in the mount path can be owned by root # Note that this requires explicit permission in the sudoers file to ensure the user executing this @@ -181,7 +181,7 @@ delete_instance() { done # Argument checks - if [ -z "${namespace}" ]; then + if [[ -z "${namespace}" ]]; then echo "Missing mandatory argument: -n | --namespace" usage fi @@ -194,7 +194,7 @@ delete_instance() { kubectl get pods --namespace ${namespace} # Optional log collection - if [ "$collect_logs" = true ]; then + if [[ "$collect_logs" = true ]]; then save_logs $namespace $log_dir else echo "Discarding logs for the current run" @@ -210,7 +210,7 @@ delete_instance() { echo "Deletion finished" # Reclaim any PVs - if [ "$wipe_pvs" = true ]; then + if [[ "$wipe_pvs" = true ]]; then reclaim_released_pvs $namespace else echo "Skipping reclaiming of released Persistent Volumes" diff --git a/continuousintegration/orchestration/deploy_dcache.sh b/continuousintegration/orchestration/deploy_dcache.sh index f5555ea551549def3bad0bcbffc8e53ffcb099a8..8b3d24488e81eeb00bd46dd16a0022cfba4a5cd6 100755 --- a/continuousintegration/orchestration/deploy_dcache.sh +++ b/continuousintegration/orchestration/deploy_dcache.sh @@ -76,7 +76,7 @@ deploy() { done # Argument checks - if [ -z "${namespace}" ]; then + if [[ -z "${namespace}" ]]; then echo "Missing mandatory argument: -n | --namespace" usage fi @@ -86,10 +86,10 @@ deploy() { helm_flags="" - if [ -n "$dcache_image_tag" ]; then + if [[ -n "$dcache_image_tag" ]]; then helm_flags+=" --set image.tag=${dcache_image_tag}" fi - if [ -n "$dcache_config" ]; then + if [[ -n "$dcache_config" ]]; then helm_flags+=" --values ${dcache_config}" fi diff --git a/continuousintegration/orchestration/deploy_eos.sh b/continuousintegration/orchestration/deploy_eos.sh index 181e43a474e58327598cbd94e1e8671256d0b074..c012799ef49e548c13a45897a7c3927feecf00fa 100755 --- a/continuousintegration/orchestration/deploy_eos.sh +++ b/continuousintegration/orchestration/deploy_eos.sh @@ -80,7 +80,7 @@ deploy() { done # Argument checks - if [ -z "${namespace}" ]; then + if [[ -z "${namespace}" ]]; then echo "Missing mandatory argument: -n | --namespace" usage fi @@ -90,13 +90,13 @@ deploy() { helm_flags="" - if [ -n "$eos_image_repository" ]; then + if [[ -n "$eos_image_repository" ]]; then helm_flags+=" --set global.repository=${eos_image_repository}" fi - if [ -n "$eos_image_tag" ]; then + if [[ -n "$eos_image_tag" ]]; then helm_flags+=" --set global.tag=${eos_image_tag}" fi - if [ -n "$eos_image_tag" ]; then + if [[ -n "$eos_image_tag" ]]; then helm_flags+=" --values ${eos_config}" fi diff --git a/continuousintegration/orchestration/helm/auth/templates/keycloak-configure-job.yaml b/continuousintegration/orchestration/helm/auth/templates/keycloak-configure-job.yaml index f9cbe0166d8f1328a6fd30afbf77ff627ce40c0a..99de3698f629d3e866b4bf49fba9b5192d9bb98f 100644 --- a/continuousintegration/orchestration/helm/auth/templates/keycloak-configure-job.yaml +++ b/continuousintegration/orchestration/helm/auth/templates/keycloak-configure-job.yaml @@ -51,7 +51,7 @@ spec: # Create or get grpc-client GRPC_CLIENT_ID=$(/opt/keycloak/bin/kcadm.sh get clients -r master -q clientId=grpc-client --fields id 2>/dev/null | grep '"id"' | head -1 | sed 's/.*"id" : "\([^"]*\)".*/\1/') - if [ -z "$GRPC_CLIENT_ID" ]; then + if [[ -z "$GRPC_CLIENT_ID" ]]; then echo "Creating grpc-client..." /opt/keycloak/bin/kcadm.sh create clients \ -r master \ @@ -73,7 +73,7 @@ spec: /opt/keycloak/bin/kcadm.sh update users/profile -r master -s unmanagedAttributePolicy=ENABLED # Set diskInstanceName attribute on admin user ADMIN_USER_ID=$(/opt/keycloak/bin/kcadm.sh get users -r master -q username="${KEYCLOAK_ADMIN}" --fields id | grep '"id"' | head -1 | sed 's/.*"id" : "\([^"]*\)".*/\1/') - if [ -n "$ADMIN_USER_ID" ]; then + if [[ -n "$ADMIN_USER_ID" ]]; then echo "Setting diskInstanceName attribute for admin user (ID: $ADMIN_USER_ID)..." /opt/keycloak/bin/kcadm.sh update users/$ADMIN_USER_ID \ -r master \ diff --git a/continuousintegration/orchestration/helm/catalogue/scripts/reset_catalogue.sh b/continuousintegration/orchestration/helm/catalogue/scripts/reset_catalogue.sh index 1396f27f6eddf8fa81a08a794e7f63f86ffe26ae..51c7915aeacf4e9fff4abe894470700aa0c9d5f5 100755 --- a/continuousintegration/orchestration/helm/catalogue/scripts/reset_catalogue.sh +++ b/continuousintegration/orchestration/helm/catalogue/scripts/reset_catalogue.sh @@ -33,7 +33,7 @@ if ! (echo yes | cta-catalogue-schema-drop /etc/cta/cta-catalogue.conf); then echo yes | cta-catalogue-schema-drop /etc/cta/cta-catalogue.conf || die "ERROR: Could not wipe database. cta-catalogue-schema-drop /etc/cta/cta-catalogue.conf FAILED" fi -if [ "$CATALOGUE_BACKEND" == "oracle" ]; then +if [[ "$CATALOGUE_BACKEND" == "oracle" ]]; then # dnf install -y oracle-instantclient-sqlplus # echo "Purging Oracle recycle bin" # ORACLE_SQLPLUS="/usr/bin/sqlplus64" diff --git a/continuousintegration/orchestration/helm/frontend/scripts/ctafrontend-grpc.sh b/continuousintegration/orchestration/helm/frontend/scripts/ctafrontend-grpc.sh index 4482f8a07065c055f856320f56e4e82c4a46ec69..42d2ff871a3d5a2a6fc1e16d1169c7068560370f 100644 --- a/continuousintegration/orchestration/helm/frontend/scripts/ctafrontend-grpc.sh +++ b/continuousintegration/orchestration/helm/frontend/scripts/ctafrontend-grpc.sh @@ -19,7 +19,7 @@ echo "$(date '+%Y-%m-%d %H:%M:%S') [$(basename "${BASH_SOURCE[0]}")] Started" # Install missing RPMs dnf install -y cta-frontend-grpc cta-catalogueutils cta-debuginfo cta-debugsource -if [ "$SCHEDULER_BACKEND" == "ceph" ]; then +if [[ "$SCHEDULER_BACKEND" == "ceph" ]]; then dnf config-manager --enable ceph dnf install -y ceph-common fi diff --git a/continuousintegration/orchestration/helm/frontend/scripts/ctafrontend-xrd.sh b/continuousintegration/orchestration/helm/frontend/scripts/ctafrontend-xrd.sh index 5aa0143417556de1a864d1939380dbe057e81070..4325b6412b09f2642f8eab0064bae8296f53854b 100644 --- a/continuousintegration/orchestration/helm/frontend/scripts/ctafrontend-xrd.sh +++ b/continuousintegration/orchestration/helm/frontend/scripts/ctafrontend-xrd.sh @@ -19,7 +19,7 @@ echo "$(date '+%Y-%m-%d %H:%M:%S') [$(basename "${BASH_SOURCE[0]}")] Started" # Install missing RPMs dnf install -y cta-frontend cta-catalogueutils cta-debuginfo cta-debugsource -if [ "$SCHEDULER_BACKEND" == "ceph" ]; then +if [[ "$SCHEDULER_BACKEND" == "ceph" ]]; then dnf config-manager --enable ceph dnf install -y ceph-common fi diff --git a/continuousintegration/orchestration/helm/scheduler/scripts/reset_scheduler.sh b/continuousintegration/orchestration/helm/scheduler/scripts/reset_scheduler.sh index e02f80a5ea95b715cbe878cff8c7340e3c8aca2a..04952bce7e3f3fb1d2ea618550b56c2fa6b47ce5 100755 --- a/continuousintegration/orchestration/helm/scheduler/scripts/reset_scheduler.sh +++ b/continuousintegration/orchestration/helm/scheduler/scripts/reset_scheduler.sh @@ -27,11 +27,11 @@ die() { echo "Using scheduler backend: $SCHEDULER_BACKEND" # Clean up scheduler -if [ "$SCHEDULER_BACKEND" == "vfs" ] || [ "$SCHEDULER_BACKEND" == "vfsDeprecated" ]; then +if [[ "$SCHEDULER_BACKEND" == "vfs" ]] || [[ "$SCHEDULER_BACKEND" == "vfsDeprecated" ]]; then echo "Installing the cta-objectstore-tools" dnf install -y cta-objectstore-tools echo "Wiping objectstore" - if [ "$SCHEDULER_BACKEND" == "vfsDeprecated" ]; then + if [[ "$SCHEDULER_BACKEND" == "vfsDeprecated" ]]; then rm -fr $SCHEDULER_URL mkdir -p $SCHEDULER_URL else @@ -39,7 +39,7 @@ if [ "$SCHEDULER_BACKEND" == "vfs" ] || [ "$SCHEDULER_BACKEND" == "vfsDeprecated fi cta-objectstore-initialize $SCHEDULER_URL || die "ERROR: Could not wipe the objectstore. cta-objectstore-initialize $SCHEDULER_URL FAILED" chmod -R 777 $SCHEDULER_URL -elif [ "$SCHEDULER_BACKEND" == "postgres" ]; then +elif [[ "$SCHEDULER_BACKEND" == "postgres" ]]; then echo "Installing the cta-scheduler-utils" dnf install -y cta-scheduler-utils echo "Postgres scheduler config file content: " @@ -48,7 +48,7 @@ elif [ "$SCHEDULER_BACKEND" == "postgres" ]; then echo "yes" | cta-scheduler-schema-drop /etc/cta/cta-scheduler.conf || die "ERROR: Could not drop scheduler schema. cta-scheduler-schema-drop /etc/cta/cta-scheduler.conf FAILED" echo "Creating the scheduler DB schema" cta-scheduler-schema-create /etc/cta/cta-scheduler.conf || die "ERROR: Could not create scheduler schema. cta-scheduler-schema-create /etc/cta/cta-scheduler.conf FAILED" -elif [ "$SCHEDULER_BACKEND" == "ceph" ]; then +elif [[ "$SCHEDULER_BACKEND" == "ceph" ]]; then echo "Installing the cta-objectstore-tools" dnf config-manager --enable ceph dnf install -y cta-objectstore-tools ceph-common diff --git a/continuousintegration/orchestration/helm/tpsrv/scripts/taped.sh b/continuousintegration/orchestration/helm/tpsrv/scripts/taped.sh index 4d2402df4a9999cb2e32585df5910914cda201f5..d529294ab5b249300abc8c9dd257474fdb9c6512 100755 --- a/continuousintegration/orchestration/helm/tpsrv/scripts/taped.sh +++ b/continuousintegration/orchestration/helm/tpsrv/scripts/taped.sh @@ -20,7 +20,7 @@ echo "$(date '+%Y-%m-%d %H:%M:%S') [$(basename "${BASH_SOURCE[0]}")] Started" # Install missing RPMs dnf install -y mt-st lsscsi sg3_utils cta-taped cta-tape-label cta-debuginfo cta-eosdf cta-debugsource -if [ "$SCHEDULER_BACKEND" == "ceph" ]; then +if [[ "$SCHEDULER_BACKEND" == "ceph" ]]; then dnf config-manager --enable ceph dnf install -y ceph-common fi diff --git a/continuousintegration/orchestration/presets/dev-eos-grpc-values.yaml b/continuousintegration/orchestration/presets/dev-eos-grpc-values.yaml index afdee179365c860cb53815bf1e79726dd29b1ea2..c2aba7182ae7d9f6d61ebcbd418bcd3d7ec23f67 100644 --- a/continuousintegration/orchestration/presets/dev-eos-grpc-values.yaml +++ b/continuousintegration/orchestration/presets/dev-eos-grpc-values.yaml @@ -114,7 +114,7 @@ mgm: -H "Authorization: Bearer $admin_access_token" \ http://auth-keycloak:8080/admin/realms/master/clients/${CLIENT_ID}/default-client-scopes | jq -r '.[] | select(.name == "grpc-custom-sub") | .id') - if [ -z "$EXISTING_SCOPE" ]; then + if [[ -z "$EXISTING_SCOPE" ]]; then echo "Scope not found on client, adding it..." CUSTOM_SCOPE_ID=$(curl -s \ @@ -238,7 +238,7 @@ fst: -H "Authorization: Bearer $admin_access_token" \ http://auth-keycloak:8080/admin/realms/master/clients/${CLIENT_ID}/default-client-scopes | jq -r '.[] | select(.name == "grpc-custom-sub") | .id') - if [ -z "$EXISTING_SCOPE" ]; then + if [[ -z "$EXISTING_SCOPE" ]]; then echo "Scope not found on client, adding it..." CUSTOM_SCOPE_ID=$(curl -s \ diff --git a/continuousintegration/orchestration/presets/stress-cta-grpc-values.yaml b/continuousintegration/orchestration/presets/stress-cta-grpc-values.yaml index 6c20c1304ec6f068a4e7a41eb20a2074ad38d45b..610938c8f2425212a9ec1c30e7c278e71b35b87e 100644 --- a/continuousintegration/orchestration/presets/stress-cta-grpc-values.yaml +++ b/continuousintegration/orchestration/presets/stress-cta-grpc-values.yaml @@ -43,7 +43,7 @@ frontend: #!/bin/bash LOGMOUNT=/mnt/logs PV_PATH="" - if [ "-${MY_CONTAINER}-" != "--" ]; then + if [[ "-${MY_CONTAINER}-" != "--" ]]; then PV_PATH="${LOGMOUNT}/${MY_NAME}/${MY_CONTAINER}" else PV_PATH="${LOGMOUNT}/${MY_NAME}" @@ -63,7 +63,7 @@ tpsrv: #!/bin/bash LOGMOUNT=/mnt/logs PV_PATH="" - if [ "-${MY_CONTAINER}-" != "--" ]; then + if [[ "-${MY_CONTAINER}-" != "--" ]]; then PV_PATH="${LOGMOUNT}/${MY_NAME}/${MY_CONTAINER}" else PV_PATH="${LOGMOUNT}/${MY_NAME}" diff --git a/continuousintegration/orchestration/presets/stress-cta-xrd-values.yaml b/continuousintegration/orchestration/presets/stress-cta-xrd-values.yaml index 8b578c8741aa7b399bae0e00b4700cad133a6a3f..069bd20e0b7c49ed9b5fbeb5cf6bd5a6ef14a711 100644 --- a/continuousintegration/orchestration/presets/stress-cta-xrd-values.yaml +++ b/continuousintegration/orchestration/presets/stress-cta-xrd-values.yaml @@ -41,7 +41,7 @@ frontend: #!/bin/bash LOGMOUNT=/mnt/logs PV_PATH="" - if [ "-${MY_CONTAINER}-" != "--" ]; then + if [[ "-${MY_CONTAINER}-" != "--" ]]; then PV_PATH="${LOGMOUNT}/${MY_NAME}/${MY_CONTAINER}" else PV_PATH="${LOGMOUNT}/${MY_NAME}" @@ -61,7 +61,7 @@ tpsrv: #!/bin/bash LOGMOUNT=/mnt/logs PV_PATH="" - if [ "-${MY_CONTAINER}-" != "--" ]; then + if [[ "-${MY_CONTAINER}-" != "--" ]]; then PV_PATH="${LOGMOUNT}/${MY_NAME}/${MY_CONTAINER}" else PV_PATH="${LOGMOUNT}/${MY_NAME}" diff --git a/continuousintegration/orchestration/presets/stress-eos-grpc-values.yaml b/continuousintegration/orchestration/presets/stress-eos-grpc-values.yaml index 2a5acb77f4090be261eb210c78085dde1c930a0a..87a53b7a367713c8fa06721d15088326b8dddcdd 100644 --- a/continuousintegration/orchestration/presets/stress-eos-grpc-values.yaml +++ b/continuousintegration/orchestration/presets/stress-eos-grpc-values.yaml @@ -114,7 +114,7 @@ mgm: -H "Authorization: Bearer $admin_access_token" \ http://auth-keycloak:8080/admin/realms/master/clients/${CLIENT_ID}/default-client-scopes | jq -r '.[] | select(.name == "grpc-custom-sub") | .id') - if [ -z "$EXISTING_SCOPE" ]; then + if [[ -z "$EXISTING_SCOPE" ]]; then echo "Scope not found on client, adding it..." CUSTOM_SCOPE_ID=$(curl -s \ @@ -250,7 +250,7 @@ fst: -H "Authorization: Bearer $admin_access_token" \ http://auth-keycloak:8080/admin/realms/master/clients/${CLIENT_ID}/default-client-scopes | jq -r '.[] | select(.name == "grpc-custom-sub") | .id') - if [ -z "$EXISTING_SCOPE" ]; then + if [[ -z "$EXISTING_SCOPE" ]]; then echo "Scope not found on client, adding it..." CUSTOM_SCOPE_ID=$(curl -s \ diff --git a/continuousintegration/orchestration/run_systemtest.sh b/continuousintegration/orchestration/run_systemtest.sh index 7d0dec8817b8a99a4be17770671910af5dc70174..0e80304a3959d832fe9da4290ae9f2df033b96ec 100755 --- a/continuousintegration/orchestration/run_systemtest.sh +++ b/continuousintegration/orchestration/run_systemtest.sh @@ -71,13 +71,13 @@ execute_cmd_with_log() { echo "================================================================================" echo "Waiting for process took ${elapsed_time} seconds" - if [ "${execute_log_rc}" == "" ]; then + if [[ "${execute_log_rc}" == "" ]]; then echo "TIMEOUTING COMMAND, setting exit status to 1" 1>&2 kill -9 -${execute_log_pid} execute_log_rc=1 fi - if [ "${execute_log_rc}" != "0" ]; then + if [[ "${execute_log_rc}" != "0" ]]; then echo "Process exited with exit code: ${execute_log_rc}." 1>&2 if [ $keepnamespace == 0 ] ; then echo "Cleaning up environment" @@ -163,43 +163,43 @@ run_systemtest() { done # Argument checks - if [ -z "${namespace}" ]; then + if [[ -z "${namespace}" ]]; then echo "Missing mandatory argument: -n | --namespace" usage fi - if [ -z "${systemtest_script}" ]; then + if [[ -z "${systemtest_script}" ]]; then echo "Missing mandatory argument: -s | --test-script" usage fi - if [ -z "${cta_image_tag}" ]; then + if [[ -z "${cta_image_tag}" ]]; then echo "Missing mandatory argument: -i | --cta-image-tag" usage fi - if [ -z "${scheduler_config}" ]; then + if [[ -z "${scheduler_config}" ]]; then echo "Missing mandatory argument: -o | --scheduler-config" usage fi - if [ -z "${catalogue_config}" ]; then + if [[ -z "${catalogue_config}" ]]; then echo "Missing mandatory argument: -d | --catalogue-config" usage fi log_dir="${orchestration_dir}/../../pod_logs/${namespace}" mkdir -p "${log_dir}" - if [ -d "${log_dir}" ]; then + if [[ -d "${log_dir}" ]]; then # Delete log contents of previous runs if they exist rm -rf "${log_dir:?}/"* fi old_namespaces=$(kubectl get namespace -o json | jq -r '.items[].metadata.name | select(. != "default" and (test("^kube-") | not))') - if [ $cleanup_namespaces == 1 ]; then + if [[ $cleanup_namespaces == 1 ]]; then echo "Cleaning up old namespaces" echo $old_namespaces echo $old_namespaces | xargs -itoto ./delete_instance.sh -n toto -D echo "Cleanup complete" elif kubectl get namespace ${namespace} > /dev/null 2>&1; then die "Namespace ${namespace} already exists" - elif [ -n "$old_namespaces" ]; then + elif [[ -n "$old_namespaces" ]]; then echo "Error: Other namespaces exist: $old_namespaces" exit 1 fi @@ -209,7 +209,7 @@ run_systemtest() { execute_cmd_with_log "./create_instance.sh -n ${namespace} ${spawn_options} ${extra_spawn_options}" "${log_dir}/create_instance.log" ${create_instance_timeout} # Launch preflight_checks and timeout after ${preflight_checks_timeout} seconds - if [ -n "$preflight_checks_script" ] && [ -x "$preflight_checks_script" ]; then + if [[ -n "$preflight_checks_script" ]] && [[ -x "$preflight_checks_script" ]]; then cd $(dirname "${preflight_checks_script}") echo "Launching preflight checks: ${preflight_checks_script}" execute_cmd_with_log "./$(basename ${preflight_checks_script}) -n ${namespace}" \ diff --git a/continuousintegration/orchestration/setup/kinit_clients.sh b/continuousintegration/orchestration/setup/kinit_clients.sh index 32ec74dac0a5acbee5df21204c3a8530332c5d76..732c00d6fd9a08646a8d0e4a76ca7ef9c9629f12 100755 --- a/continuousintegration/orchestration/setup/kinit_clients.sh +++ b/continuousintegration/orchestration/setup/kinit_clients.sh @@ -51,7 +51,7 @@ while [[ "$#" -gt 0 ]]; do shift done -if [ -z "$NAMESPACE" ]; then +if [[ -z "$NAMESPACE" ]]; then echo "Missing mandatory argument: -n | --namespace" 1>&2 exit 1 fi diff --git a/continuousintegration/orchestration/setup/reset_tapes.sh b/continuousintegration/orchestration/setup/reset_tapes.sh index d1bce86d784d18d8ccf38b03ca71b5440dbd0a42..26360cd0dc53475e594344a6973bd4937c301d01 100755 --- a/continuousintegration/orchestration/setup/reset_tapes.sh +++ b/continuousintegration/orchestration/setup/reset_tapes.sh @@ -45,7 +45,7 @@ while [[ "$#" -gt 0 ]]; do shift done -if [ -z "$namespace" ]; then +if [[ -z "$namespace" ]]; then echo "Missing mandatory argument: -n | --namespace" 1>&2 exit 1 fi diff --git a/continuousintegration/orchestration/tests/changeStorageClassInCatalogue.sh b/continuousintegration/orchestration/tests/changeStorageClassInCatalogue.sh index 420c23e26c6aa1bbf15db6ec99b0bff5ccaee50d..21eb9c3bceb54bddc0f374ffebc5c3c6bafb5bf6 100755 --- a/continuousintegration/orchestration/tests/changeStorageClassInCatalogue.sh +++ b/continuousintegration/orchestration/tests/changeStorageClassInCatalogue.sh @@ -18,11 +18,11 @@ while getopts "n:" o; do done shift $((OPTIND-1)) -if [ -z "${NAMESPACE}" ]; then +if [[ -z "${NAMESPACE}" ]]; then usage fi -if [ -n "${error}" ]; then +if [[ -n "${error}" ]]; then echo -e "ERROR:\n${error}" exit 1 fi diff --git a/continuousintegration/orchestration/tests/client_abortPrepare.sh b/continuousintegration/orchestration/tests/client_abortPrepare.sh index 67c304c58e155384f8c2cbf837fee5276416dee1..daf1574db61d17e4b2c8c75228cc3b94aae643b3 100755 --- a/continuousintegration/orchestration/tests/client_abortPrepare.sh +++ b/continuousintegration/orchestration/tests/client_abortPrepare.sh @@ -86,7 +86,7 @@ sleep 1 requestsTotal=$(admin_cta --json sq | jq 'map(select (.mountType == "RETRIEVE") | .queuedFiles | tonumber) | add') echo "Retrieve requests count: ${requestsTotal}" filesCount=${NB_FILES} -if [ ${requestsTotal} -ne ${filesCount} ]; then +if [[ ${requestsTotal} -ne ${filesCount} ]]; then echo "ERROR: Retrieve queue(s) size mismatch: ${requestsTotal} requests queued for ${filesCount} files." fi @@ -132,7 +132,7 @@ while [[ ${REMAINING_REQUESTS} -gt 0 ]]; do REMAINING_REQUESTS=$(admin_cta --json sq | jq -r 'map(select (.mountType == "RETRIEVE") | .queuedFiles | tonumber) | add // 0'); # Prevent the result from being empty - if [ -z "$REMAINING_REQUEST" ]; then REMAINING_REQUESTS='0'; fi + if [[ -z "$REMAINING_REQUEST" ]]; then REMAINING_REQUESTS='0'; fi echo "${REMAINING_REQUESTS} requests remaining." done @@ -146,14 +146,14 @@ for ((subdir=0; subdir < ${NB_DIRS}; subdir++)); do done echo "Total restaged files found: ${RESTAGEDFILES}" -if [ "0" != "$(ls ${ERROR_DIR} 2> /dev/null | wc -l)" ]; then +if [[ "0" != "$(ls ${ERROR_DIR} 2> /dev/null | wc -l)" ]]; then # there were some prepare errors echo "Several errors occured during prepare cancel test!" echo "Please check client pod logs in artifacts" mv ${ERROR_DIR}/* ${LOGDIR}/xrd_errors/ fi -if [ ${RESTAGEDFILES} -ne "0" ]; then +if [[ ${RESTAGEDFILES} -ne "0" ]]; then echo "ERROR some files were retrieved in spite of retrieve cancellation." exit 1 fi diff --git a/continuousintegration/orchestration/tests/client_archive.sh b/continuousintegration/orchestration/tests/client_archive.sh index bfbd64d4eff98e865329013de7116169527552a5..dc091d23e0555f9a72cba03e38ef276962177f3e 100755 --- a/continuousintegration/orchestration/tests/client_archive.sh +++ b/continuousintegration/orchestration/tests/client_archive.sh @@ -55,7 +55,7 @@ for ((subdir=0; subdir < ${NB_DIRS}; subdir++)); do done start=$(date +%s) echo "Timestamp after all files copied ${start}" -if [ "0" != "$(ls ${ERROR_DIR} 2> /dev/null | wc -l)" ]; then +if [[ "0" != "$(ls ${ERROR_DIR} 2> /dev/null | wc -l)" ]]; then # there were some xrdcp errors echo "Several xrdcp errors occured during archival!" echo "Please check client pod logs in artifacts" diff --git a/continuousintegration/orchestration/tests/client_archive_metadata.sh b/continuousintegration/orchestration/tests/client_archive_metadata.sh index 59defb8bb135477a2ca7ae65a4fdfbed8fda2985..97219f339e34b49f2e04f7277616963011df00f6 100755 --- a/continuousintegration/orchestration/tests/client_archive_metadata.sh +++ b/continuousintegration/orchestration/tests/client_archive_metadata.sh @@ -23,7 +23,7 @@ usage() { exit 1 } -if [ -z "$1" ]; then +if [[ -z "$1" ]]; then usage fi diff --git a/continuousintegration/orchestration/tests/client_prepare_file.sh b/continuousintegration/orchestration/tests/client_prepare_file.sh index c83b1d4437810bb234d18677928207d1f935d3f5..e4d8b4ee79fbd27cc0ca059f252ee769e82ded29 100755 --- a/continuousintegration/orchestration/tests/client_prepare_file.sh +++ b/continuousintegration/orchestration/tests/client_prepare_file.sh @@ -44,7 +44,7 @@ while getopts "e:f:" o; do done shift $((OPTIND-1)) -if [ "x${FILES_LOCATION}" = "x" ]; then +if [[ "x${FILES_LOCATION}" = "x" ]]; then die "Files location in a list should be provided" fi diff --git a/continuousintegration/orchestration/tests/client_retrieve.sh b/continuousintegration/orchestration/tests/client_retrieve.sh index 43acde73fa6a90657a077c4b29e9fca6a269cd07..bc8e6c8f34e8affe9a281514fa091b476724cf77 100755 --- a/continuousintegration/orchestration/tests/client_retrieve.sh +++ b/continuousintegration/orchestration/tests/client_retrieve.sh @@ -56,7 +56,7 @@ for ((subdir=0; subdir < ${NB_DIRS}; subdir++)); do seq -w 0 $((${NB_FILES}-1)) | xargs --max-procs=${NB_PROCS} -iTEST_FILE_NAME bash -c "$command_str" | tee ${LOGDIR}/prepare_sys.retrieve.req_id_${subdir}.log | grep ^ERROR done -if [ "0" != "$(ls ${ERROR_DIR} 2> /dev/null | wc -l)" ]; then +if [[ "0" != "$(ls ${ERROR_DIR} 2> /dev/null | wc -l)" ]]; then # there were some prepare errors echo "Several prepare errors occured during retrieval!" echo "Please check client pod logs in artifacts" diff --git a/continuousintegration/orchestration/tests/client_retrieve_queue_cleanup.sh b/continuousintegration/orchestration/tests/client_retrieve_queue_cleanup.sh index 7b66e807c04430e0c8d6a95f8b81acbafce2b35b..b1aed0118b209c385a105bfcc53d3989d6f6673e 100755 --- a/continuousintegration/orchestration/tests/client_retrieve_queue_cleanup.sh +++ b/continuousintegration/orchestration/tests/client_retrieve_queue_cleanup.sh @@ -243,7 +243,7 @@ test_tape_state_queueing_priority() { for i in ${!TAPE_LIST_3[@]}; do echo "Checking tape ${TAPE_LIST_3[$i]}..." - if [ $i -eq $EXPECTED_SELECTED_QUEUE ]; then + if [[ $i -eq $EXPECTED_SELECTED_QUEUE ]]; then assert_number_of_files_in_queue "${TAPE_LIST_3[$i]}" 1 2 echo "Request found on ${TAPE_STATE_LIST[$i]} queue ${TAPE_LIST_3[$i]}, as expected." else diff --git a/continuousintegration/orchestration/tests/client_setup.sh b/continuousintegration/orchestration/tests/client_setup.sh index 607201d3550813359d46c21d828530004829162f..fade7883ac731b95b1d13a5f92232de931e5a6df 100755 --- a/continuousintegration/orchestration/tests/client_setup.sh +++ b/continuousintegration/orchestration/tests/client_setup.sh @@ -141,7 +141,7 @@ if [[ -n ${GFAL2_PROTOCOL} ]]; then fi fi -if [ ! -z "${error}" ]; then +if [[ ! -z "${error}" ]]; then echo -e "ERROR:\n${error}" exit 1 fi @@ -159,7 +159,7 @@ case "${CLI_TARGET}" in exit 1 esac -if [ "x${COMMENT}" = "x" ]; then +if [[ "x${COMMENT}" = "x" ]]; then echo "No annotation will be pushed to Influxdb" fi diff --git a/continuousintegration/orchestration/tests/client_stress_ar.sh b/continuousintegration/orchestration/tests/client_stress_ar.sh index 448075b30a0673a6381c65674245b61e6c859480..e38b3d1acb48982ede908f80882d32e1facb844e 100755 --- a/continuousintegration/orchestration/tests/client_stress_ar.sh +++ b/continuousintegration/orchestration/tests/client_stress_ar.sh @@ -230,7 +230,7 @@ while getopts "d:e:n:N:s:p:vS:rAPGt:m:Q" o; do done shift $((OPTIND-1)) -if [ "x${COMMENT}" = "x" ]; then +if [[ "x${COMMENT}" = "x" ]]; then echo "No annotation will be pushed to Influxdb" fi @@ -364,7 +364,7 @@ for ((subdir=0; subdir < ${NB_DIRS}; subdir++)); do eos root://${EOS_MGM_HOST} mkdir -p ${EOS_DIR}/${subdir} eos_exit_status=$? echo "eos mkdir exit status: ${eos_exit_status}" - if [ $eos_exit_status -ne 0 ]; then + if [[ $eos_exit_status -ne 0 ]]; then die "Cannot create directory ${EOS_DIR}/${subdir} in eos instance ${EOS_MGM_HOST}." fi TEST_FILE_NAME_WITH_SUBDIR="${TEST_FILE_NAME_BASE}${TEST_SUBDIRS[${subdir}]}" # this is the target filename of xrdcp processes just need to add the filenumber in each directory @@ -445,7 +445,7 @@ done #done -if [ "0" != "$(ls ${LOGDIR}/xrd_errors/ 2> /dev/null | wc -l)" ]; then +if [[ "0" != "$(ls ${LOGDIR}/xrd_errors/ 2> /dev/null | wc -l)" ]]; then # there were some xrdcp errors echo "Several xrdcp errors occured during archival!" echo "Please check client pod logs in artifacts" @@ -644,7 +644,7 @@ for ((subdir=0; subdir < ${NB_DIRS}; subdir++)); do mv -f "$TMP_FILE" "$ERROR_LOG" fi done -if [ "0" != "$(ls ${ERROR_DIR} 2> /dev/null | wc -l)" ]; then +if [[ "0" != "$(ls ${ERROR_DIR} 2> /dev/null | wc -l)" ]]; then # there were some prepare errors echo "Several prepare errors occured during retrieval!" echo "Please check client pod logs in artifacts" @@ -813,7 +813,7 @@ for ((subdir=0; subdir < ${NB_DIRS}; subdir++)); do cat ${STATUS_FILE} | grep ^${subdir}/ | cut -d/ -f2 | xargs --max-procs=${NB_PROCS} -iTEST_FILE_NAME bash -c "XRD_LOGLEVEL=Dump KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 xrdfs ${EOS_MGM_HOST} xattr ${EOS_DIR}/${subdir}/TEST_FILE_NAME get sys.retrieve.req_id 2>${ERROR_DIR}/XATTRGET_TEST_FILE_NAME && rm ${ERROR_DIR}/XATTRGET_TEST_FILE_NAME || echo ERROR with xrootd xattr get for file TEST_FILE_NAME, full logs in ${ERROR_DIR}/XATTRGET_TEST_FILE_NAME" | tee ${LOGDIR}/prepare2_sys.retrieve.req_id_${subdir}.log | grep ^ERROR echo Done. done -if [ "0" != "$(ls ${ERROR_DIR} 2> /dev/null | wc -l)" ]; then +if [[ "0" != "$(ls ${ERROR_DIR} 2> /dev/null | wc -l)" ]]; then # there were some prepare errors echo "Several prepare errors occured during retrieval!" echo "Please check client pod logs in artifacts" @@ -824,7 +824,7 @@ fi requestsTotal=$(admin_cta --json sq | jq 'map(select (.mountType == "RETRIEVE") | .queuedFiles | tonumber) | add') echo "Retrieve requests count: ${requestsTotal}" filesCount=$(cat ${STATUS_FILE} | wc -l) -if [ ${requestsTotal} -ne ${filesCount} ]; then +if [[ ${requestsTotal} -ne ${filesCount} ]]; then echo "ERROR: Retrieve queue(s) size mismatch: ${requestsTotal} requests queued for ${filesCount} files." fi @@ -853,7 +853,7 @@ WAIT_FOR_RETRIEVE_QUEUES_CLEAR_TIMEOUT=$((60)) REMAINING_REQUESTS=$(admin_cta --json sq | jq -r 'map(select (.mountType == "RETRIEVE") | .queuedFiles | tonumber) | add') echo "${REMAINING_REQUESTS} requests remaining." # Prevent the result from being empty -if [ -z "$REMAINING_REQUESTS" ]; then REMAINING_REQUESTS='0'; fi +if [[ -z "$REMAINING_REQUESTS" ]]; then REMAINING_REQUESTS='0'; fi while [[ ${REMAINING_REQUESTS} -gt 0 ]]; do echo "$(date +%s): Waiting for retrieve queues to be cleared: Seconds passed = ${SECONDS_PASSED}" sleep 1 @@ -866,7 +866,7 @@ while [[ ${REMAINING_REQUESTS} -gt 0 ]]; do REMAINING_REQUESTS=$(admin_cta --json sq | jq -r 'map(select (.mountType == "RETRIEVE") | .queuedFiles | tonumber) | add'); # Prevent the result from being empty - if [ -z "$REMAINING_REQUEST" ]; then REMAINING_REQUESTS='0'; fi + if [[ -z "$REMAINING_REQUEST" ]]; then REMAINING_REQUESTS='0'; fi echo "${REMAINING_REQUESTS} requests remaining." done @@ -880,7 +880,7 @@ for ((subdir=0; subdir < ${NB_DIRS}; subdir++)); do done echo "Total restaged files found: ${RESTAGEDFILES}" -if [ "0" != "$(ls ${ERROR_DIR} 2> /dev/null | wc -l)" ]; then +if [[ "0" != "$(ls ${ERROR_DIR} 2> /dev/null | wc -l)" ]]; then # there were some prepare errors echo "Several errors occured during prepare cancel test!" echo "Please check client pod logs in artifacts" @@ -906,20 +906,20 @@ test -z ${COMMENT} || annotate "test ${TESTID} FINISHED" "Summary:
NB_FILES: test -z $TAILPID || kill ${TAILPID} &> /dev/null RC=0 -if [ ${LASTCOUNT} -ne $((${NB_FILES} * ${NB_DIRS})) ]; then +if [[ ${LASTCOUNT} -ne $((${NB_FILES} * ${NB_DIRS})) ]]; then ((RC++)) echo "ERROR there were some lost files during the archive/retrieve test with ${NB_FILES} files (first 10):" grep -v retrieved ${STATUS_FILE} | sed -e "s;^;${EOS_DIR}/;" | head -10 fi -if [ $(cat ${LOGDIR}/prepare_sys.retrieve.req_id_*.log | grep -v value= | wc -l) -ne 0 ]; then +if [[ $(cat ${LOGDIR}/prepare_sys.retrieve.req_id_*.log | grep -v value= | wc -l) -ne 0 ]]; then # THIS IS NOT YET AN ERROR: UNCOMMENT THE FOLLOWING LINE WHEN https://gitlab.cern.ch/cta/CTA/issues/606 is fixed # ((RC++)) echo "ERROR $(cat ${LOGDIR}/prepare_sys.retrieve.req_id_*.log | grep -v value= | wc -l) files out of $(cat ${LOGDIR}/prepare_sys.retrieve.req_id_*.log | wc -l) prepared files have no sys.retrieve.req_id extended attribute set" fi -if [ ${RESTAGEDFILES} -ne "0" ]; then +if [[ ${RESTAGEDFILES} -ne "0" ]]; then ((RC++)) echo "ERROR some files were retrieved in spite of retrieve cancellation." fi @@ -927,9 +927,9 @@ fi # This one does not change the return code # WARNING if everything else was OK # ERROR otherwise as these xrootd failures could be the reason of the failure -if [ $(ls ${LOGDIR}/xrd_errors | wc -l) -ne 0 ]; then +if [[ $(ls ${LOGDIR}/xrd_errors | wc -l) -ne 0 ]]; then # ((RC++)) # do not change RC - if [ ${RC} -eq 0 ]; then + if [[ ${RC} -eq 0 ]]; then echo "WARNING several xrootd failures occured during this run, please check client dumps in ${LOGDIR}/xrd_errors." else echo "ERROR several xrootd failures occured during this run, please check client dumps in ${LOGDIR}/xrd_errors." diff --git a/continuousintegration/orchestration/tests/common/archive_file.sh b/continuousintegration/orchestration/tests/common/archive_file.sh index 84b6d050a42e9f4cce9d82877930350efa687996..4c5a24914150778e2724668e34b8fb428efb4bd2 100644 --- a/continuousintegration/orchestration/tests/common/archive_file.sh +++ b/continuousintegration/orchestration/tests/common/archive_file.sh @@ -35,11 +35,11 @@ while getopts "f:" o; do done shift $((OPTIND-1)) -if [ -z "${TEST_FILE_NAME}" ]; then +if [[ -z "${TEST_FILE_NAME}" ]]; then usage fi -if [ ! -z "${error}" ]; then +if [[ ! -z "${error}" ]]; then echo -e "ERROR:\n${error}" exit 1 fi diff --git a/continuousintegration/orchestration/tests/common/delete_file.sh b/continuousintegration/orchestration/tests/common/delete_file.sh index ebf8e5ea4f9df7e0d1d47131eb43674e8a80c3fd..dfb0c2bb6ce570afceb4f0e657e24bc00d0095bf 100644 --- a/continuousintegration/orchestration/tests/common/delete_file.sh +++ b/continuousintegration/orchestration/tests/common/delete_file.sh @@ -36,7 +36,7 @@ while getopts "i:f:" o; do done shift $((OPTIND-1)) -if [ ! -z "${error}" ]; then +if [[ ! -z "${error}" ]]; then echo -e "ERROR:\n${error}" exit 1 fi diff --git a/continuousintegration/orchestration/tests/cta_admin.sh b/continuousintegration/orchestration/tests/cta_admin.sh index c3de8e19cc971971275cb54af5ae122f32c5c6f0..17ee6c1763d4789953fd16297ce9474621248cad 100755 --- a/continuousintegration/orchestration/tests/cta_admin.sh +++ b/continuousintegration/orchestration/tests/cta_admin.sh @@ -554,7 +554,7 @@ echo # Wait for file corruption. #TIMEOUT=30 #SECONDS_PASSED=0 -#while [ -z "${RESPONSE}" ]; do +#while [[ -z "${RESPONSE}" ]]; do # log_message "Waiting for tape to be corrupted (${SECONDS_PASSED}s)..." # if test ${SECONDS_PASSED} -eq ${TIMEOUT}; then # log_message "Error. Timed out while waiting for tape to be corrupted." diff --git a/continuousintegration/orchestration/tests/eosNamespaceInject.sh b/continuousintegration/orchestration/tests/eosNamespaceInject.sh index c2d491317ee5cfd8e06ba1781b447043423ef0cf..773613f3c30dfbfb0f643694107490094ba450a4 100755 --- a/continuousintegration/orchestration/tests/eosNamespaceInject.sh +++ b/continuousintegration/orchestration/tests/eosNamespaceInject.sh @@ -44,11 +44,11 @@ while getopts "n:" o; do done shift $((OPTIND-1)) -if [ -z "${NAMESPACE}" ]; then +if [[ -z "${NAMESPACE}" ]]; then usage fi -if [ ! -z "${error}" ]; then +if [[ ! -z "${error}" ]]; then echo -e "ERROR:\n${error}" exit 1 fi diff --git a/continuousintegration/orchestration/tests/evict_tests.sh b/continuousintegration/orchestration/tests/evict_tests.sh index b80c25f9d7f9e58a718e1974a85edb2b900e385d..9a26f35d845cc43fcca0d63f0193038a11e4db42 100755 --- a/continuousintegration/orchestration/tests/evict_tests.sh +++ b/continuousintegration/orchestration/tests/evict_tests.sh @@ -72,7 +72,7 @@ DISK_FSID=$(KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 eos -- # 3.1. Check that evict fails when there is no tape replica echo "Testing 'eos root://${EOS_MGM_HOST} evict ${TEMP_FILE}'..." KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 eos root://${EOS_MGM_HOST} evict ${TEMP_FILE} -if [ $? -eq 0 ]; then +if [[ $? -eq 0 ]]; then error "'eos evict' command succeeded where it should have failed" else echo "'eos evict' command failed as expected" @@ -81,7 +81,7 @@ fi # 3.2. Check that evict --ignore-evict-counter --fsid fails when there is no tape replica echo "Testing 'eos root://${EOS_MGM_HOST} evict --ignore-evict-counter --fsid ${DISK_FSID} ${TEMP_FILE}'..." KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 eos root://${EOS_MGM_HOST} evict --ignore-evict-counter --fsid ${DISK_FSID} ${TEMP_FILE} -if [ $? -eq 0 ]; then +if [[ $? -eq 0 ]]; then error "'eos evict' command succeeded where it should have failed" else echo "'eos evict' command failed as expected" @@ -183,7 +183,7 @@ fi # 14. Test removing tape replica, should fail echo "Trying to remove tape replica with fsid ${FSID_TAPE}..." KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 eos root://${EOS_MGM_HOST} evict --ignore-evict-counter --fsid ${FSID_TAPE} ${TEMP_FILE} -if [ $? -eq 0 ]; then +if [[ $? -eq 0 ]]; then error "'eos evict --ignore-evict-counter --fsid ${FSID_TAPE}' command succeeded where it should have failed" else echo "'eos evict --ignore-evict-counter --fsid ${FSID_TAPE}' command failed as expected" @@ -197,7 +197,7 @@ fi # 15. Test removing non-existing replica, should fail echo "Trying to remove tape replica with non existing fsid ${FSID_NOT_SET_VALUE}..." KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 eos root://${EOS_MGM_HOST} evict --ignore-evict-counter --fsid ${FSID_NOT_SET_VALUE} ${TEMP_FILE} -if [ $? -eq 0 ]; then +if [[ $? -eq 0 ]]; then error "'eos evict --ignore-evict-counter --fsid ${FSID_NOT_SET_VALUE}' command succeeded where it should have failed" else echo "'eos evict --ignore-evict-counter --fsid ${FSID_NOT_SET_VALUE}' command failed as expected" @@ -211,7 +211,7 @@ fi # 16. Test removing existing replica, should fail when --ignore-evict-counter is not used echo "Testing 'eos root://${EOS_MGM_HOST} evict --fsid ${FSID_DUMMY_1_VALUE} ${TEMP_FILE}'... without '--ignore-evict-counter'" KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 eos root://${EOS_MGM_HOST} evict --fsid ${FSID_DUMMY_1_VALUE} ${TEMP_FILE} -if [ $? -eq 0 ]; then +if [[ $? -eq 0 ]]; then error "'eos evict' command succeeded where it should have failed" else echo "'eos evict' command failed as expected" @@ -220,7 +220,7 @@ fi # 17.1. Test removing existing replica with --ignore-removal-on-fst, should fail when --fsid is not used echo "Testing 'eos root://${EOS_MGM_HOST} evict --ignore-removal-on-fst ${TEMP_FILE}'... without '--fsid'" KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 eos root://${EOS_MGM_HOST} evict --ignore-removal-on-fst ${TEMP_FILE} -if [ $? -eq 0 ]; then +if [[ $? -eq 0 ]]; then error "'eos evict' command succeeded where it should have failed" else echo "'eos evict' command failed as expected" @@ -229,7 +229,7 @@ fi # 17.2. Test removing existing replica with --ignore-removal-on-fst, should fail when --ignore-evict-counter is not used echo "Testing 'eos root://${EOS_MGM_HOST} evict --ignore-removal-on-fst --fsid ${FSID_DUMMY_1_VALUE} ${TEMP_FILE}'... without '--ignore-evict-counter'" KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 eos root://${EOS_MGM_HOST} evict --ignore-removal-on-fst --fsid ${FSID_DUMMY_1_VALUE} ${TEMP_FILE} -if [ $? -eq 0 ]; then +if [[ $? -eq 0 ]]; then error "'eos evict' command succeeded where it should have failed" else echo "'eos evict' command failed as expected" @@ -238,7 +238,7 @@ fi # 18. Test removing one existing replica, should succeed and keep remaining replicas intact echo "Trying to remove a replica with existing fsid ${FSID_DUMMY_1_VALUE} ${TEMP_FILE} and --ignore-evict-counter..." KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 eos root://${EOS_MGM_HOST} evict --ignore-evict-counter --fsid ${FSID_DUMMY_1_VALUE} ${TEMP_FILE} -if [ $? -ne 0 ]; then +if [[ $? -ne 0 ]]; then error "'eos evict --ignore-evict-counter --fsid ${FSID_DUMMY_1_VALUE}' command failed where it should have succeeded" else echo "'eos evict --ignore-evict-counter --fsid ${FSID_DUMMY_1_VALUE}' command succeeded as expected" @@ -252,7 +252,7 @@ fi # 19. Test removing one existing replica, should succeed and keep remaining replicas intact echo "Trying to remove a replica with existing fsid ${FSID_DUMMY_2_VALUE} ${TEMP_FILE}, --ignore-removal-on-fst and --ignore-evict-counter..." KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 eos root://${EOS_MGM_HOST} evict --ignore-removal-on-fst --ignore-evict-counter --fsid ${FSID_DUMMY_2_VALUE} ${TEMP_FILE} -if [ $? -ne 0 ]; then +if [[ $? -ne 0 ]]; then error "'eos evict --ignore-removal-on-fst --ignore-evict-counter --fsid ${FSID_DUMMY_2_VALUE}' command failed where it should have succeeded" else echo "'eos evict --ignore-removal-on-fst --ignore-evict-counter --fsid ${FSID_DUMMY_2_VALUE}' command succeeded as expected" @@ -266,7 +266,7 @@ fi # 20. Test removing all remaining replicas echo "Trying to remove all disk replicas..." KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 eos root://${EOS_MGM_HOST} evict --ignore-evict-counter ${TEMP_FILE} -if [ $? -ne 0 ]; then +if [[ $? -ne 0 ]]; then error "'eos evict' command failed where it should have succeeded" else echo "'eos evict' command succeeded as expected" diff --git a/continuousintegration/orchestration/tests/grep_eosreport_for_archive_metadata.sh b/continuousintegration/orchestration/tests/grep_eosreport_for_archive_metadata.sh index 8b7105947ed45df09b704ae2ee07b95b26d299ca..807ea13c1d2287442c5f37f6d50a97786b500b13 100755 --- a/continuousintegration/orchestration/tests/grep_eosreport_for_archive_metadata.sh +++ b/continuousintegration/orchestration/tests/grep_eosreport_for_archive_metadata.sh @@ -22,7 +22,7 @@ usage() { exit 1 } -if [ -z "$1" ]; then +if [[ -z "$1" ]]; then usage fi diff --git a/continuousintegration/orchestration/tests/idempotent_prepare.sh b/continuousintegration/orchestration/tests/idempotent_prepare.sh index 5cc2e64ee8e3e6c402e6dffaab6d8cd764a3d54a..417e5e701da8c0633a23a7f3831893acee6a3e28 100755 --- a/continuousintegration/orchestration/tests/idempotent_prepare.sh +++ b/continuousintegration/orchestration/tests/idempotent_prepare.sh @@ -129,7 +129,7 @@ echo "Trigering EOS retrieve workflow as poweruser1:powerusers (expects error).. # We need the -s as we are staging the files from tape (see xrootd prepare definition) REQUEST_ID=$(KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 xrdfs ${EOS_MGM_HOST} prepare -s ${TEMP_FILE_FAIL}) -if [ $? -eq 0 ]; then +if [[ $? -eq 0 ]]; then echo "ERROR: Preparing a single file that does not exist (all files failec) should return an error." exit 1 fi @@ -157,7 +157,7 @@ echo "Trigering EOS retrieve workflow as poweruser1:powerusers..." # We need the -s as we are staging the files from tape (see xrootd prepare definition) REQUEST_ID=$(KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 xrdfs ${EOS_MGM_HOST} prepare -s ${TEMP_FILE_OK} ${TEMP_FILE}) -if [ $? -ne 0 ]; then +if [[ $? -ne 0 ]]; then echo "ERROR: Unexpected error returned by prepare command." exit 1 fi @@ -213,7 +213,7 @@ echo "Trigering EOS retrieve workflow as poweruser1:powerusers (expects error).. # We need the -s as we are staging the files from tape (see xrootd prepare definition) REQUEST_ID=$(KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 xrdfs ${EOS_MGM_HOST} prepare -s ${TEMP_FILE_1} ${TEMP_FILE_2}) -if [ $? -eq 0 ]; then +if [[ $? -eq 0 ]]; then echo "ERROR: Preparing command where no single file has prepare permissions (all files failed) should return an error." exit 1 fi @@ -244,7 +244,7 @@ echo "Trigering EOS retrieve workflow as poweruser1:powerusers..." # We need the -s as we are staging the files from tape (see xrootd prepare definition) REQUEST_ID=$(KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 xrdfs ${EOS_MGM_HOST} prepare -s ${TEMP_FILE_OK} ${TEMP_FILE}) -if [ $? -ne 0 ]; then +if [[ $? -ne 0 ]]; then echo "ERROR: Unexpected error returned by prepare command." exit 1 fi @@ -294,7 +294,7 @@ echo "Trigering EOS retrieve workflow as poweruser1:powerusers (expects error).. # We need the -s as we are staging the files from tape (see xrootd prepare definition) REQUEST_ID=$(KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 xrdfs ${EOS_MGM_HOST} prepare -s ${TEMP_FILE_1} ${TEMP_FILE_2}) -if [ $? -eq 0 ]; then +if [[ $? -eq 0 ]]; then echo "ERROR: Preparing command where no single file exists (all files failed) should return an error." exit 1 fi @@ -342,7 +342,7 @@ echo "Trigering EOS retrieve workflow as poweruser1:powerusers..." # We need the -s as we are staging the files from tape (see xrootd prepare definition) REQUEST_ID=$(cat ${TEST_FILES_TAPE_LIST} ${TEST_FILES_NO_P_LIST} ${TEST_FILES_NONE_LIST} | KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 xargs xrdfs ${EOS_MGM_HOST} prepare -s) -if [ $? -ne 0 ]; then +if [[ $? -ne 0 ]]; then echo "ERROR: Unexpected error returned by prepare command." exit 1 fi @@ -443,7 +443,7 @@ REQUEST_ID=$(KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 xrdfs echo "Trigering EOS abort workflow as poweruser1:powerusers..." KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 xrdfs ${EOS_MGM_HOST} prepare -a ${REQUEST_ID} ${TEMP_FILE} -if [ $? -ne 0 ]; then +if [[ $? -ne 0 ]]; then echo "ERROR: Prepare abort command should not have failed." exit 1 fi @@ -498,7 +498,7 @@ echo "Trigering EOS abort workflow as poweruser1:powerusers, for ${TEMP_FILE_TAP echo "Error expected" KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 xrdfs ${EOS_MGM_HOST} prepare -a ${REQUEST_ID} ${TEMP_FILE_TAPE} ${TEMP_FILE_NONE} -if [ $? -eq 0 ]; then +if [[ $? -eq 0 ]]; then echo "ERROR: Prepare abort command should have returned an error." exit 1 fi @@ -548,7 +548,7 @@ wait_for_retrieve ${EOS_MGM_HOST} ${TEMP_FILE} echo "Trigering EOS evict workflow as poweruser1:powerusers..." KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 xrdfs ${EOS_MGM_HOST} prepare -e ${TEMP_FILE} -if [ $? -ne 0 ]; then +if [[ $? -ne 0 ]]; then echo "ERROR: Prepare evict command should not have failed." exit 1 fi @@ -586,7 +586,7 @@ echo "Trigering EOS abort workflow as poweruser1:powerusers..." echo "Error expected" KRB5CCNAME=/tmp/${EOSPOWER_USER}/krb5cc_0 XrdSecPROTOCOL=krb5 xrdfs ${EOS_MGM_HOST} prepare -e ${TEMP_FILE_TAPE} ${TEMP_FILE_NONE} -if [ $? -eq 0 ]; then +if [[ $? -eq 0 ]]; then echo "ERROR: Prepare evict command should have returned an error." exit 1 fi diff --git a/continuousintegration/orchestration/tests/postrun_checks.sh b/continuousintegration/orchestration/tests/postrun_checks.sh index 5ffeb20128c70984352cecab4c5f9e6185f66dc0..4344459b8ef8b53624b71ef545cdc3178727d8ff 100755 --- a/continuousintegration/orchestration/tests/postrun_checks.sh +++ b/continuousintegration/orchestration/tests/postrun_checks.sh @@ -41,7 +41,7 @@ while getopts "n:w:q" o; do done shift $((OPTIND-1)) -if [ -z "${NAMESPACE}" ]; then +if [[ -z "${NAMESPACE}" ]]; then usage fi @@ -66,7 +66,7 @@ for pod in $(echo "${pods}" | jq -r '.metadata.name'); do fi pod_status=$(echo "${pods}" | jq -r "select(.metadata.name==\"${pod}\") | .status.phase") - if [ "${pod_status}" != "Succeeded" ] && [ "${pod_status}" != "Running" ]; then + if [[ "${pod_status}" != "Succeeded" ]] && [[ "${pod_status}" != "Running" ]]; then echo "Error: ${pod} is in state ${pod_status}" general_errors=$((general_errors + 1)) continue @@ -81,7 +81,7 @@ for pod in $(echo "${pods}" | jq -r '.metadata.name'); do bash -c "find /var/log/tmp/ -type f -name '*.core' 2>/dev/null \ | grep -v -E 'client-0-[0-9]+-xrdcp-.*\.core$' \ || true") - if [ -n "${coredumpfiles}" ]; then + if [[ -n "${coredumpfiles}" ]]; then num_files=$(wc -l <<< "${coredumpfiles}") echo "Found ${num_files} core dump files in pod ${pod} - container ${container}" core_dump_counter=$((core_dump_counter + num_files)) @@ -95,7 +95,7 @@ for pod in $(echo "${pods}" | jq -r '.metadata.name'); do | grep -a -E '\"log_level\":\"(ERROR|CRITICAL)\"' \ | grep -v -E 'Cannot update status for drive [A-Za-z0-9_-]+. Drive not found' \ || true") - if [ -n "${logged_error_messages}" ]; then + if [[ -n "${logged_error_messages}" ]]; then num_errors=$(wc -l <<< "${logged_error_messages}") all_logged_error_messages+="$logged_error_messages"$'\n' echo "Found ${num_errors} logged errors in pod ${pod} - container ${container}" @@ -106,7 +106,7 @@ done echo "" echo "Summary of logged error messages:" non_whitelisted_errors=0 -if [ -n "${all_logged_error_messages}" ]; then +if [[ -n "${all_logged_error_messages}" ]]; then while read -r count message; do if grep -Fxq "${message}" "${WHITELIST_FILE}"; then echo "(whitelisted) Count: ${count}, Message: \"${message}\"" @@ -129,7 +129,7 @@ echo "Found ${general_errors} general pod errors." echo "Found ${core_dump_counter} core dumps." echo "Found ${non_whitelisted_errors} logged errors not in the whitelist." -if [ "${core_dump_counter}" -gt 0 ] || [ "${non_whitelisted_errors}" -gt 0 ] || [ "${general_errors}" -gt 0 ]; then +if [[ "${core_dump_counter}" -gt 0 ]] || [[ "${non_whitelisted_errors}" -gt 0 ]] || [[ "${general_errors}" -gt 0 ]]; then echo "Failing due to non-whitelisted errors or core dumps or general errors." exit 1 fi diff --git a/continuousintegration/orchestration/tests/preflight_checks.sh b/continuousintegration/orchestration/tests/preflight_checks.sh index abc1e5b79b26e762e98a85b18b17a6f3717078aa..285815cd35aa90f32319d91f664a8df35f7cb81b 100755 --- a/continuousintegration/orchestration/tests/preflight_checks.sh +++ b/continuousintegration/orchestration/tests/preflight_checks.sh @@ -35,11 +35,11 @@ while getopts "n:" o; do done shift $((OPTIND-1)) -if [ -z "${NAMESPACE}" ]; then +if [[ -z "${NAMESPACE}" ]]; then usage fi -if [ ! -z "${error}" ]; then +if [[ ! -z "${error}" ]]; then echo -e "ERROR:\n${error}" exit 1 fi @@ -108,7 +108,7 @@ kubectl -n ${NAMESPACE} exec ${EOS_MGM_POD} -c eos-mgm -- xrdfs root://localhost INPUT_FILE_LIST="$(cat ${TMPDIR}/xrootd_API_src.json | jq -r '. | map(.path) | join(" ")')" OUTPUT_FILE_LIST="$(cat ${TMPDIR}/xrootd_API_query_prepare_result.json | jq -r '.responses| map(.path) | join(" ")')" -if [ "-${INPUT_FILE_LIST}-" == "-${OUTPUT_FILE_LIST}-" ]; then +if [[ "-${INPUT_FILE_LIST}-" == "-${OUTPUT_FILE_LIST}-" ]]; then echo "xrootd_API capabilities: SUCCESS" else echo "xrootd_API capabilities: FAILED" @@ -128,7 +128,7 @@ echo "-${OUTPUT_FILE_LIST}-" FLIGHTTEST_RC=$((${FLIGHTTEST_TPC_RC}+${FLIGHTTEST_XROOTD_API_RC})) echo -if [ ${FLIGHTTEST_RC} -eq 0 ]; then +if [[ ${FLIGHTTEST_RC} -eq 0 ]]; then echo "PREFLIGHT test: SUCCESS" else echo "PREFLIGHT test: FAILED" diff --git a/continuousintegration/orchestration/tests/prepare_tests.sh b/continuousintegration/orchestration/tests/prepare_tests.sh index 5c8d96196f6d30723819325e69d598069f6efdb9..d4a1a63fb0ed227ee3a4bb0510110c0162d396ca 100755 --- a/continuousintegration/orchestration/tests/prepare_tests.sh +++ b/continuousintegration/orchestration/tests/prepare_tests.sh @@ -33,7 +33,7 @@ while getopts "n:" o; do done shift $((OPTIND-1)) -if [ -z "${NAMESPACE}" ]; then +if [[ -z "${NAMESPACE}" ]]; then usage fi @@ -319,7 +319,7 @@ for VID in "${TAPES[@]}"; do # The external tape format test leaves data inside of the tape, then the tapes for labeling are not empty between # tests. That's why we need to force cta-tape-label, but only for CI testing. kubectl --namespace ${NAMESPACE} exec ${CTA_TPSRV_POD} -c cta-taped-0 -- cta-tape-label --vid ${VID} --force - if [ $? -ne 0 ]; then + if [[ $? -ne 0 ]]; then echo "ERROR: failed to label the tape ${VID}" exit 1 fi diff --git a/continuousintegration/orchestration/tests/prepare_tests_dcache.sh b/continuousintegration/orchestration/tests/prepare_tests_dcache.sh index 29d6c2a7d437814121a1e5f65a6e596c50c6fe52..7544643a3290f96da0d70dc836763b9854ac19a7 100755 --- a/continuousintegration/orchestration/tests/prepare_tests_dcache.sh +++ b/continuousintegration/orchestration/tests/prepare_tests_dcache.sh @@ -33,7 +33,7 @@ while getopts "n:" o; do done shift $((OPTIND-1)) -if [ -z "${NAMESPACE}" ]; then +if [[ -z "${NAMESPACE}" ]]; then usage fi @@ -221,7 +221,7 @@ for VID in "${TAPES[@]}"; do # The external tape format test leaves data inside of the tape, then the tapes for labeling are not empty between # tests. That's why we need to force cta-tape-label, but only for CI testing. kubectl --namespace ${NAMESPACE} exec ${CTA_TPSRV_POD} -c cta-taped-0 -- cta-tape-label --vid ${VID} --force - if [ $? -ne 0 ]; then + if [[ $? -ne 0 ]]; then echo "ERROR: failed to label the tape ${VID}" exit 1 fi diff --git a/continuousintegration/orchestration/tests/refresh_log_fd.sh b/continuousintegration/orchestration/tests/refresh_log_fd.sh index 5a9b03ad5f8917ac0d2bc0118fbf59e70f3d7578..09755e974fbd803206d65b98d45d045e33c496d6 100755 --- a/continuousintegration/orchestration/tests/refresh_log_fd.sh +++ b/continuousintegration/orchestration/tests/refresh_log_fd.sh @@ -18,7 +18,7 @@ log_dir="/var/log/cta/" STRACE_SLEEP_SECS=2 -if [ "$(find ${log_dir} -type f | wc -l)" -ne 1 ]; then +if [[ "$(find ${log_dir} -type f | wc -l)" -ne 1 ]]; then echo "ERROR: More than one log file found at ${log_dir}." exit 1 fi @@ -33,15 +33,15 @@ tpd_parent_pid=$(pgrep "parent" -u cta) tpd_maint_pid=$(pgrep "maint" -u cta) tpd_drv_pid=$(pgrep "drive" -u cta) -if [ -z "${tpd_parent_pid}" ]; then +if [[ -z "${tpd_parent_pid}" ]]; then echo "ERROR: No '$DRIVE_NAME-parent' process found." exit 1 fi -if [ -z "${tpd_maint_pid}" ]; then +if [[ -z "${tpd_maint_pid}" ]]; then echo "ERROR: No '$DRIVE_NAME-maint' process found." exit 1 fi -if [ -z "${tpd_drv_pid}" ]; then +if [[ -z "${tpd_drv_pid}" ]]; then echo "ERROR: No '$DRIVE_NAME-drive' process found." exit 1 fi @@ -52,7 +52,7 @@ echo "Found '$DRIVE_NAME-drive' process PID: ${tpd_drv_pid}" # Get number of file descriptor used to open the log file log_file_fd=$(lsof -p "${tpd_parent_pid}" | grep "${log_file}" | awk '{print $4}' | awk -F'[^0-9]' '{print $1}') -if [ -z "${log_file_fd}" ]; then +if [[ -z "${log_file_fd}" ]]; then echo "ERROR: No file descriptor found for log file ${log_file}." exit 1 else @@ -95,19 +95,19 @@ sleep 1 # Confirm that the file descriptor was reopened in all processes echo "Checking '$DRIVE_NAME-parent' file descriptor reopening..." -if [ "$(grep -c "close(${log_file_fd})" "${tpd_parent_tmp_file}")" -eq 0 ]; then +if [[ "$(grep -c "close(${log_file_fd})" "${tpd_parent_tmp_file}")" -eq 0 ]]; then echo "ERROR: File descriptor #${log_file_fd} not closed in '$DRIVE_NAME-parent'." exit 1 -elif [ "$(grep -c "close(${log_file_fd})" "${tpd_parent_tmp_file}")" -gt 1 ]; then +elif [[ "$(grep -c "close(${log_file_fd})" "${tpd_parent_tmp_file}")" -gt 1 ]]; then echo "ERROR: File descriptor #${log_file_fd} closed more than once in '$DRIVE_NAME-parent'." exit 1 else echo "OK: File descriptor #${log_file_fd} closed once in '$DRIVE_NAME-parent'." fi -if [ "$(grep "open" "${tpd_parent_tmp_file}" | grep -c "${log_file}")" -eq 0 ]; then +if [[ "$(grep "open" "${tpd_parent_tmp_file}" | grep -c "${log_file}")" -eq 0 ]]; then echo "ERROR: File descriptor for ${log_file} not reopened in '$DRIVE_NAME-parent'." exit 1 -elif [ "$(grep "open" "${tpd_parent_tmp_file}" | grep -c "${log_file}")" -gt 1 ]; then +elif [[ "$(grep "open" "${tpd_parent_tmp_file}" | grep -c "${log_file}")" -gt 1 ]]; then echo "ERROR: File descriptor for ${log_file} reopened more than once in '$DRIVE_NAME-parent'." exit 1 else @@ -115,19 +115,19 @@ else fi echo "Checking '$DRIVE_NAME-maint' file descriptor reopening..." -if [ "$(grep -c "close(${log_file_fd})" "${tpd_maint_tmp_file}")" -eq 0 ]; then +if [[ "$(grep -c "close(${log_file_fd})" "${tpd_maint_tmp_file}")" -eq 0 ]]; then echo "ERROR: File descriptor #${log_file_fd} not closed in '$DRIVE_NAME-maint'." exit 1 -elif [ "$(grep -c "close(${log_file_fd})" "${tpd_maint_tmp_file}")" -gt 1 ]; then +elif [[ "$(grep -c "close(${log_file_fd})" "${tpd_maint_tmp_file}")" -gt 1 ]]; then echo "ERROR: File descriptor #${log_file_fd} closed more than once in '$DRIVE_NAME-maint'." exit 1 else echo "OK: File descriptor #${log_file_fd} closed once in '$DRIVE_NAME-maint'." fi -if [ "$(grep "open" "${tpd_maint_tmp_file}" | grep -c "${log_file}")" -eq 0 ]; then +if [[ "$(grep "open" "${tpd_maint_tmp_file}" | grep -c "${log_file}")" -eq 0 ]]; then echo "ERROR: File descriptor for ${log_file} not reopened in '$DRIVE_NAME-maint'." exit 1 -elif [ "$(grep "open" "${tpd_maint_tmp_file}" | grep -c "${log_file}")" -gt 1 ]; then +elif [[ "$(grep "open" "${tpd_maint_tmp_file}" | grep -c "${log_file}")" -gt 1 ]]; then echo "ERROR: File descriptor for ${log_file} reopened more than once in '$DRIVE_NAME-maint'." exit 1 else @@ -135,19 +135,19 @@ else fi echo "Checking '$DRIVE_NAME-drive' drive handler file descriptor reopening..." -if [ "$(grep -c "close(${log_file_fd})" "${tpm_srv_tmp_file}")" -eq 0 ]; then +if [[ "$(grep -c "close(${log_file_fd})" "${tpm_srv_tmp_file}")" -eq 0 ]]; then echo "ERROR: File descriptor #${log_file_fd} not closed in '$DRIVE_NAME-drive' drive handler ." exit 1 -elif [ "$(grep -c "close(${log_file_fd})" "${tpm_srv_tmp_file}")" -gt 1 ]; then +elif [[ "$(grep -c "close(${log_file_fd})" "${tpm_srv_tmp_file}")" -gt 1 ]]; then echo "ERROR: File descriptor #${log_file_fd} closed more than once in '$DRIVE_NAME-drive' drive handler." exit 1 else echo "OK: File descriptor #${log_file_fd} closed once in '$DRIVE_NAME-drive' drive handler ." fi -if [ "$(grep "open" "${tpm_srv_tmp_file}" | grep -c "${log_file}")" -eq 0 ]; then +if [[ "$(grep "open" "${tpm_srv_tmp_file}" | grep -c "${log_file}")" -eq 0 ]]; then echo "ERROR: File descriptor for ${log_file} not reopened in '$DRIVE_NAME-drive' drive handler ." exit 1 -elif [ "$(grep "open" "${tpm_srv_tmp_file}" | grep -c "${log_file}")" -gt 1 ]; then +elif [[ "$(grep "open" "${tpm_srv_tmp_file}" | grep -c "${log_file}")" -gt 1 ]]; then echo "ERROR: File descriptor for ${log_file} reopened more than once in '$DRIVE_NAME-drive' drive handler ." exit 1 else diff --git a/continuousintegration/orchestration/tests/repack_helper.sh b/continuousintegration/orchestration/tests/repack_helper.sh index 0b748d86cf874706e6e8734d6ef68412a6fd49f0..c27d3ec7f3c8e2e29c1e0a3429652e105dbe25cb 100755 --- a/continuousintegration/orchestration/tests/repack_helper.sh +++ b/continuousintegration/orchestration/tests/repack_helper.sh @@ -33,11 +33,11 @@ while getopts "n:" o; do done shift $((OPTIND-1)) -if [ -z "${NAMESPACE}" ]; then +if [[ -z "${NAMESPACE}" ]]; then usage fi -if [ ! -z "${error}" ]; then +if [[ ! -z "${error}" ]]; then echo -e "ERROR:\n${error}" exit 1 fi diff --git a/continuousintegration/orchestration/tests/repack_systemtest.sh b/continuousintegration/orchestration/tests/repack_systemtest.sh index b24bcbc715c4d6bb134bfd4df2e03a57420457ac..8fa890dd5bf539a1100fe14ce12acfcb117a3f20 100755 --- a/continuousintegration/orchestration/tests/repack_systemtest.sh +++ b/continuousintegration/orchestration/tests/repack_systemtest.sh @@ -109,28 +109,28 @@ while getopts "v:f:e:b:t:r:n:ampud" o; do done shift $((OPTIND -1)) -if [ "x${REPACK_BUFFER_BASEDIR}" = "x" ]; then +if [[ "x${REPACK_BUFFER_BASEDIR}" = "x" ]]; then usage die "No repack buffer URL provided." fi -if [ "x${VID_TO_REPACK}" = "x" ]; then +if [[ "x${VID_TO_REPACK}" = "x" ]]; then usage die "No vid to repack provided." fi -if [ "x${MOUNT_POLICY_NAME}" = "x" ]; then +if [[ "x${MOUNT_POLICY_NAME}" = "x" ]]; then usage die "No mount policy name provided." fi -if [ ! -z $MAX_FILES_TO_SELECT ]; then +if [[ ! -z $MAX_FILES_TO_SELECT ]]; then MAX_FILES_TO_SELECT_ARG="--maxfilestoselect ${MAX_FILES_TO_SELECT}" fi REPACK_OPTION="" -if [ "x${ADD_COPIES_ONLY}" != "x" ] && [ "x${MOVE_ONLY}" != "x" ]; then +if [[ "x${ADD_COPIES_ONLY}" != "x" ]] && [[ "x${MOVE_ONLY}" != "x" ]]; then die "-a and -m options are mutually exclusive" fi @@ -166,7 +166,7 @@ admin_cta repack rm --vid ${VID_TO_REPACK} echo "Marking the tape ${VID_TO_REPACK} as full before Repacking it" admin_cta tape ch --vid ${VID_TO_REPACK} --full true -if [ ! -z $BACKPRESSURE_TEST ]; then +if [[ ! -z $BACKPRESSURE_TEST ]]; then echo "Backpressure test: setting too high free space requirements" # This should be idempotent as we will be called several times if [[ $( admin_cta --json ds ls | jq '.[] | select(.name=="repackBuffer") | .name') != '"repackBuffer"' ]]; then @@ -183,14 +183,14 @@ fi echo "Launching repack request for VID ${VID_TO_REPACK}, bufferURL = ${FULL_REPACK_BUFFER_URL}" NO_RECALL_FLAG="" -if [ ! -z $NO_RECALL ]; then +if [[ ! -z $NO_RECALL ]]; then NO_RECALL_FLAG="--nr" fi # Check if not all files will be repacked -if [ ! -z $MAX_FILES_TO_SELECT ]; then +if [[ ! -z $MAX_FILES_TO_SELECT ]]; then TOTAL_FILES_IN_TAPE=$(admin_cta --json tf ls --vid ${VID_TO_REPACK} | jq -r '. | length') - if [ "$TOTAL_FILES_IN_TAPE" -gt "$MAX_FILES_TO_SELECT" ]; then + if [[ "$TOTAL_FILES_IN_TAPE" -gt "$MAX_FILES_TO_SELECT" ]]; then echo "Partial repack covering only ${MAX_FILES_TO_SELECT}/${TOTAL_FILES_IN_TAPE} files from tape ${VID_TO_REPACK}" else echo "Partial repack covering all ${TOTAL_FILES_IN_TAPE} files from tape ${VID_TO_REPACK}" @@ -203,7 +203,7 @@ amountRecyleTapeFilesPrev=$(admin_cta --json recycletf ls --vid ${VID_TO_REPACK} echo "admin_cta repack add --mountpolicy ${MOUNT_POLICY_NAME} --vid ${VID_TO_REPACK} ${REPACK_OPTION} --bufferurl ${FULL_REPACK_BUFFER_URL} ${NO_RECALL_FLAG} ${MAX_FILES_TO_SELECT_ARG}" admin_cta repack add --mountpolicy ${MOUNT_POLICY_NAME} --vid ${VID_TO_REPACK} ${REPACK_OPTION} --bufferurl ${FULL_REPACK_BUFFER_URL} ${NO_RECALL_FLAG} ${MAX_FILES_TO_SELECT_ARG} || exit 1 -if [ ! -z $BACKPRESSURE_TEST ]; then +if [[ ! -z $BACKPRESSURE_TEST ]]; then echo "Backpressure test: waiting to see a report of sleeping retrieve queue." SECONDS_PASSED=0 while test 0 = $(admin_cta --json sq | jq -r ".[] | select(.vid == \"${VID_TO_REPACK}\" and .sleepingForSpace == true) | .vid" | wc -l); do @@ -259,9 +259,9 @@ if test 1 = $(admin_cta --json repack ls --vid ${VID_TO_REPACK} | jq -r '[.[0] | fi # If not all files were repacked, confirm this as true -if [ ! -z $MAX_FILES_TO_SELECT ]; then +if [[ ! -z $MAX_FILES_TO_SELECT ]]; then TOTAL_FILES_IN_TAPE=$(admin_cta --json tf ls --vid ${VID_TO_REPACK} | jq -r '. | length') - if [ "$TOTAL_FILES_IN_TAPE" -gt "$MAX_FILES_TO_SELECT" ]; then + if [[ "$TOTAL_FILES_IN_TAPE" -gt "$MAX_FILES_TO_SELECT" ]]; then if test 1 = $(admin_cta --json repack ls --vid ${VID_TO_REPACK} | jq -r '[.[0] | select (.allFilesSelectedAtStart == false)] | length') && test 0 != $(admin_cta --json tf ls --vid ${VID_TO_REPACK} | jq -r '. | length'); then echo "Partial repack selected a subset of files, as expected" else diff --git a/continuousintegration/orchestration/tests/restore_files.sh b/continuousintegration/orchestration/tests/restore_files.sh index 40a8bf8043038de12d4499e2b5fe1040ff250f7b..d873903468e449b3740ffc5e49aff124ed3af7f9 100755 --- a/continuousintegration/orchestration/tests/restore_files.sh +++ b/continuousintegration/orchestration/tests/restore_files.sh @@ -39,11 +39,11 @@ while getopts "n:" o; do done shift $((OPTIND-1)) -if [ -z "${NAMESPACE}" ]; then +if [[ -z "${NAMESPACE}" ]]; then usage fi -if [ ! -z "${error}" ]; then +if [[ ! -z "${error}" ]]; then echo -e "ERROR:\n${error}" exit 1 fi diff --git a/continuousintegration/orchestration/tests/stress_test.sh b/continuousintegration/orchestration/tests/stress_test.sh index db63b4acff715098b8e73b8e52dd0e3b1e0b1737..ae4a0427676af92b9c080553c4092eed832f3e3d 100755 --- a/continuousintegration/orchestration/tests/stress_test.sh +++ b/continuousintegration/orchestration/tests/stress_test.sh @@ -43,7 +43,7 @@ while getopts "n:Q" o; do done shift $((OPTIND-1)) -if [ -z "${NAMESPACE}" ]; then +if [[ -z "${NAMESPACE}" ]]; then usage fi diff --git a/continuousintegration/orchestration/tests/test_client.sh b/continuousintegration/orchestration/tests/test_client.sh index b358e5001e997543635870e6c3b4f59bf1fedbf7..b1d1a0f9dbbbbcc9e1f176378199161730431321 100755 --- a/continuousintegration/orchestration/tests/test_client.sh +++ b/continuousintegration/orchestration/tests/test_client.sh @@ -38,11 +38,11 @@ while getopts "n:q" o; do done shift $((OPTIND-1)) -if [ -z "${NAMESPACE}" ]; then +if [[ -z "${NAMESPACE}" ]]; then usage fi -if [ ! -z "${error}" ]; then +if [[ ! -z "${error}" ]]; then echo -e "ERROR:\n${error}" exit 1 fi @@ -56,7 +56,7 @@ EOS_MGM_HOST="ctaeos" if [[ ${PREPARE} -eq 1 ]]; then echo "Preparing namespace for the tests" . prepare_tests.sh -n ${NAMESPACE} - if [ $? -ne 0 ]; then + if [[ $? -ne 0 ]]; then echo "ERROR: failed to prepare namespace for the tests" exit 1 fi diff --git a/continuousintegration/orchestration/tests/test_client_gfal2.sh b/continuousintegration/orchestration/tests/test_client_gfal2.sh index 081ab080629b5126346d3f6436890466451af0b6..8b37e30d43778a80d8448beb0d45a59f8b06881b 100755 --- a/continuousintegration/orchestration/tests/test_client_gfal2.sh +++ b/continuousintegration/orchestration/tests/test_client_gfal2.sh @@ -34,18 +34,18 @@ while getopts "n:" o; do done shift $((OPTIND-1)) -if [ -z "${NAMESPACE}" ]; then +if [[ -z "${NAMESPACE}" ]]; then usage fi -if [ ! -z "${error}" ]; then +if [[ ! -z "${error}" ]]; then echo -e "ERROR:\n${error}" exit 1 fi echo "Preparing namespace for the tests" . prepare_tests.sh -n ${NAMESPACE} -if [ $? -ne 0 ]; then +if [[ $? -ne 0 ]]; then echo "ERROR: failed to prepare namespace for the tests" exit 1 fi diff --git a/continuousintegration/orchestration/tests/test_cta_admin.sh b/continuousintegration/orchestration/tests/test_cta_admin.sh index c79b898e2e85bb6f9ca2d1535ca760b849227277..1b340837ae6e63350282c5f320875563668d7b07 100755 --- a/continuousintegration/orchestration/tests/test_cta_admin.sh +++ b/continuousintegration/orchestration/tests/test_cta_admin.sh @@ -34,18 +34,18 @@ while getopts "n:" o; do done shift $((OPTIND-1)) -if [ -z "${NAMESPACE}" ]; then +if [[ -z "${NAMESPACE}" ]]; then usage fi -if [ -n "${error}" ]; then +if [[ -n "${error}" ]]; then echo -e "ERROR:\n${error}" exit 1 fi echo "Preparing namespace for the tests" . prepare_tests.sh -n "${NAMESPACE}" -if [ $? -ne 0 ]; then +if [[ $? -ne 0 ]]; then echo "ERROR: failed to prepare namespace for the tests" exit 1 fi @@ -60,7 +60,7 @@ kubectl -n "${NAMESPACE}" cp . ${CLIENT_POD}:/root/ -c client || exit 1 # ( # TIMEOUT=360 # SECS=0 -# while [ -z "${MESSAGE}" ]; do +# while [[ -z "${MESSAGE}" ]]; do # if test ${SECS} -eq ${TIMEOUT}; then # echo "ERROR. Timedout waiting for client to remove drive." # exit 1 @@ -94,7 +94,7 @@ kubectl -n "${NAMESPACE}" cp . ${CLIENT_POD}:/root/ -c client || exit 1 #( #TIMEOUT=360 #SECONDS_PASSED=0 -#while [ -z "${MESSAGE}" ]; do +#while [[ -z "${MESSAGE}" ]]; do # if test ${SECONDS_PASSED} -eq ${TIMEOUT}; then # echo "ERROR. Timedout waiting for ctafrontendpod to generate file." # exit 1 @@ -120,7 +120,7 @@ kubectl -n "${NAMESPACE}" cp . ${CLIENT_POD}:/root/ -c client || exit 1 # Restore tape #SECONDS_PASSED=0 -#while [ -z "${TEST_DONE}" ]; do +#while [[ -z "${TEST_DONE}" ]]; do # if test ${SECONDS_PASSED} -eq ${TIMEOUT}; then # echo "ERROR. Timed out while waiting for client pod to get fail request." # exit 1 diff --git a/continuousintegration/orchestration/tests/test_external_tape_formats.sh b/continuousintegration/orchestration/tests/test_external_tape_formats.sh index 1bc41809a5cfd52fd72c7f59edb2aabadbfa3712..756b7ea0009a880e3f3ae463bf7df6fb1458756e 100755 --- a/continuousintegration/orchestration/tests/test_external_tape_formats.sh +++ b/continuousintegration/orchestration/tests/test_external_tape_formats.sh @@ -36,7 +36,7 @@ while getopts "n:" o; do done shift $((OPTIND-1)) -if [ -z "${NAMESPACE}" ]; then +if [[ -z "${NAMESPACE}" ]]; then usage fi diff --git a/continuousintegration/orchestration/tests/test_liquibase_update.sh b/continuousintegration/orchestration/tests/test_liquibase_update.sh index bda930dff36a95603d536443cc5679862a84cf3a..db0054e6424d6ab42ad86302e433d585a8b7ea7a 100755 --- a/continuousintegration/orchestration/tests/test_liquibase_update.sh +++ b/continuousintegration/orchestration/tests/test_liquibase_update.sh @@ -36,7 +36,7 @@ check_schema_version() { | grep -o -E '[0-9]+\.[0-9]') # Check if the current schema version is the same as the previous one - if [ ${CURRENT_SCHEMA_VERSION} == ${DESIRED_SCHEMA_VERSION} ]; then + if [[ ${CURRENT_SCHEMA_VERSION} == ${DESIRED_SCHEMA_VERSION} ]]; then echo "The current Catalogue Schema Version is: ${CURRENT_SCHEMA_VERSION}" else echo "Error. Unexpected Catalogue Schema Version: ${CURRENT_SCHEMA_VERSION}, it should be: ${DESIRED_SCHEMA_VERSION}" @@ -56,7 +56,7 @@ while getopts "n:" o; do done shift $((OPTIND-1)) -if [ -z "${NAMESPACE}" ]; then +if [[ -z "${NAMESPACE}" ]]; then usage fi diff --git a/continuousintegration/orchestration/tests/test_regression_dCache.sh b/continuousintegration/orchestration/tests/test_regression_dCache.sh index 32a1128c515a29af266f5c144826f1f6d26b8806..23e4e24514f12d3fc7bbfb86e5da547b28f243ea 100755 --- a/continuousintegration/orchestration/tests/test_regression_dCache.sh +++ b/continuousintegration/orchestration/tests/test_regression_dCache.sh @@ -37,7 +37,7 @@ while getopts "n:" o; do done shift $((OPTIND - 1)) -if [ -z "${NAMESPACE}" ]; then +if [[ -z "${NAMESPACE}" ]]; then usage fi diff --git a/continuousintegration/orchestration/tests/test_repack.sh b/continuousintegration/orchestration/tests/test_repack.sh index 43601329e9f47901146bdb93065a4a91f1dab6c0..df6346e5a4092361294a9c1951d0327ea6376d6c 100755 --- a/continuousintegration/orchestration/tests/test_repack.sh +++ b/continuousintegration/orchestration/tests/test_repack.sh @@ -35,11 +35,11 @@ while getopts "n:" o; do done shift $((OPTIND-1)) -if [ -z "${NAMESPACE}" ]; then +if [[ -z "${NAMESPACE}" ]]; then usage fi -if [ ! -z "${error}" ]; then +if [[ ! -z "${error}" ]]; then echo -e "ERROR:\n${error}" exit 1 fi @@ -261,7 +261,7 @@ repackJustMoveWithMaxFiles() { modifyTapeState ${VID_TO_REPACK} ACTIVE echo "Reclaiming tape ${VID_TO_REPACK}, should fail because tape should still have files" kubectl -n ${NAMESPACE} exec ${CTA_CLI_POD} -c cta-cli -- cta-admin tape reclaim --vid ${VID_TO_REPACK} - if [ $? -eq 0 ]; then + if [[ $? -eq 0 ]]; then echo "Reclaim should have failed" exit 1 else @@ -280,7 +280,7 @@ repackJustMoveWithMaxFiles() { modifyTapeState ${VID_TO_REPACK} ACTIVE echo "Reclaiming tape ${VID_TO_REPACK}, should succeed because tape no longer has files" kubectl -n ${NAMESPACE} exec ${CTA_CLI_POD} -c cta-cli -- cta-admin tape reclaim --vid ${VID_TO_REPACK} - if [ $? -eq 0 ]; then + if [[ $? -eq 0 ]]; then echo "Reclaim succeeded as expected" else echo "Reclaim should have succeeded" diff --git a/continuousintegration/orchestration/upgrade_cta_instance.sh b/continuousintegration/orchestration/upgrade_cta_instance.sh index ca1b4fec180068d60c57beef6895590acaa24535..4c51a3be273082eb2554b72d3fc740601ea58ae9 100755 --- a/continuousintegration/orchestration/upgrade_cta_instance.sh +++ b/continuousintegration/orchestration/upgrade_cta_instance.sh @@ -117,7 +117,7 @@ upgrade_instance() { done # Argument checks - if [ -z "${namespace}" ]; then + if [[ -z "${namespace}" ]]; then echo "Missing mandatory argument: -n | --namespace" usage fi @@ -127,22 +127,22 @@ upgrade_instance() { helm_flags="" - if [ $force == 1 ]; then + if [[ $force == 1 ]]; then helm_flags+=" --force" fi - if [ -n "$cta_image_repository" ]; then + if [[ -n "$cta_image_repository" ]]; then helm_flags+=" --set global.image.repository=${cta_image_repository}" fi - if [ -n "$cta_image_tag" ]; then + if [[ -n "$cta_image_tag" ]]; then helm_flags+=" --set global.image.tag=${cta_image_tag}" fi - if [ -n "$catalogue_schema_version" ]; then + if [[ -n "$catalogue_schema_version" ]]; then helm_flags+=" --set global.catalogueSchemaVersion=${catalogue_schema_version}" fi - if [ -n "$scheduler_config" ]; then + if [[ -n "$scheduler_config" ]]; then helm_flags+=" --set-file global.configuration.scheduler=${scheduler_config}" fi - if [ -n "$tapeservers_config" ]; then + if [[ -n "$tapeservers_config" ]]; then devices_in_use=$(kubectl get all --all-namespaces -l cta/library-device -o jsonpath='{.items[*].metadata.labels.cta/library-device}' | tr ' ' '\n' | sort | uniq) # Check that all devices in the provided config are already in use for library_device in $(awk '/libraryDevice:/ {gsub("\"","",$2); print $2}' "$tapeservers_config"); do diff --git a/continuousintegration/release/check_rpm_available.sh b/continuousintegration/release/check_rpm_available.sh index 71f661712ef5f4ef21a999cc650ddc636f8d3531..c022ce0037ae8986690770f888f2019d3936f344 100755 --- a/continuousintegration/release/check_rpm_available.sh +++ b/continuousintegration/release/check_rpm_available.sh @@ -70,15 +70,15 @@ check_package_available() { shift done - if [ -z "${repository}" ]; then + if [[ -z "${repository}" ]]; then echo "Failure: Missing mandatory argument --repository-url" usage fi - if [ -z "${package}" ]; then + if [[ -z "${package}" ]]; then echo "Failure: Missing mandatory argument --package" usage fi - if [ -z "${version}" ]; then + if [[ -z "${version}" ]]; then echo "Failure: Missing mandatory argument --version" usage fi diff --git a/continuousintegration/release/upload_to_eos.sh b/continuousintegration/release/upload_to_eos.sh index e8070d260838108d3ca54cb7e5deaf8333ed0d18..5348572d9972ee2a81ac76b37b56d5072eb2c928 100755 --- a/continuousintegration/release/upload_to_eos.sh +++ b/continuousintegration/release/upload_to_eos.sh @@ -128,79 +128,79 @@ upload_to_eos() { shift done - if [ -z "${eos_account_username}" ]; then + if [[ -z "${eos_account_username}" ]]; then echo "Failure: Missing mandatory argument --eos-username" usage fi - if [ -z "${eos_account_password}" ]; then + if [[ -z "${eos_account_password}" ]]; then echo "Failure: Missing mandatory argument --eos-password" usage fi - if [ -z "${eos_target_dir}" ]; then + if [[ -z "${eos_target_dir}" ]]; then echo "Failure: Missing mandatory argument --eos-target-dir" usage fi - if [ -z "${local_source_dir}" ] && [ -z "${eos_source_dir}" ]; then + if [[ -z "${local_source_dir}" ]] && [[ -z "${eos_source_dir}" ]]; then echo "Failure: Missing mandatory argument --local-source-dir or --eos-source-dir" usage fi - if [ -n "${local_source_dir}" ] && [ -n "${eos_source_dir}" ]; then + if [[ -n "${local_source_dir}" ]] && [[ -n "${eos_source_dir}" ]]; then echo "Failure: Do not use both arguments --local-source-dir and --eos-source-dir" usage fi # Check the source directory exists - if [ -n "${local_source_dir}" ] && [ ! -d "${local_source_dir}" ]; then + if [[ -n "${local_source_dir}" ]] && [[ ! -d "${local_source_dir}" ]]; then echo "ERROR: Source directory ${local_source_dir} doesn't exist" exit 1 fi # Check the cta_version argument was received - if [ -n "${eos_source_dir}" ] && [ -z "${cta_version}" ]; then + if [[ -n "${eos_source_dir}" ]] && [[ -z "${cta_version}" ]]; then echo "ERROR: Argument --eos-source-dir should be used with --cta-version" exit 1 fi # Get credentials echo "$eos_account_password" | kinit $eos_account_username@CERN.CH 2>&1 >/dev/null - if [ $? -ne 0 ]; then + if [[ $? -ne 0 ]]; then echo "ERROR: Failed to get Krb5 credentials for $eos_account_username" exit 1 fi - if [ -n "${local_source_dir}" ]; then + if [[ -n "${local_source_dir}" ]]; then # Rely on xrootd to do the copy of files to EOS xrdcp --force --recursive "${local_source_dir}"/ root://eoshome.cern.ch/"${eos_target_dir}"/ 2>&1 >/dev/null - if [ $? -ne 0 ]; then + if [[ $? -ne 0 ]]; then echo "ERROR: Failed to copy files to ${eos_target_dir} via xrdcp" exit 1 fi fi - if [ -n "${eos_source_dir}" ]; then + if [[ -n "${eos_source_dir}" ]]; then # Rely on xrootd to copy the files, inside EOS, with the provided cta-version xrdfs root://eoshome.cern.ch/ ls -R "${eos_source_dir}" \ | grep "${cta_version}" \ | sed "s|^${eos_source_dir}||" \ | xargs -I {} xrdcp --force --recursive root://eoshome.cern.ch/"${eos_source_dir}"/{} root://eoshome.cern.ch/"${eos_target_dir}"/{} 2>&1 >/dev/null - if [ $? -ne 0 ]; then + if [[ $? -ne 0 ]]; then echo "ERROR: Failed to copy release ${cta_version} files from ${eos_source_dir} to ${eos_target_dir} via xrdcp" exit 1 fi fi # Run the provided hook on lxplus - if [ -n "${hook}" ]; then + if [[ -n "${hook}" ]]; then ssh -o StrictHostKeyChecking=no \ -o GSSAPIAuthentication=yes \ -o GSSAPITrustDNS=yes \ -o GSSAPIDelegateCredentials=yes \ $eos_account_username@lxplus.cern.ch $hook 2>&1 - if [ $? -ne 0 ]; then + if [[ $? -ne 0 ]]; then echo "ERROR: Something went wrong while running hook $hook on lxplus" exit 1 fi @@ -209,7 +209,7 @@ upload_to_eos() { # Destroy credentials kdestroy - if [ $? -ne 0 ]; then + if [[ $? -ne 0 ]]; then echo "WARNING: Krb5 credentials for $eos_account_username have not been cleared up" fi } diff --git a/continuousintegration/utils/get_latest_image_tag_on_branch.sh b/continuousintegration/utils/get_latest_image_tag_on_branch.sh index fa68f11e4ddb5a09cb78945422b6fdeed27c014d..1730be76e38f7d7d116ba1813514b11427a3160c 100755 --- a/continuousintegration/utils/get_latest_image_tag_on_branch.sh +++ b/continuousintegration/utils/get_latest_image_tag_on_branch.sh @@ -48,7 +48,7 @@ while [[ "$#" -gt 0 ]]; do shift done -if [ -z "$branch" ]; then +if [[ -z "$branch" ]]; then die "Please provide a branch to find the latest image tag for" fi @@ -67,7 +67,7 @@ commit_id=$(curl --silent --url "https://gitlab.cern.ch/api/v4/projects/139306/r imagetag=$(./list_images.sh 2>/dev/null | grep ${commit_id} | tail -n1) # Validate if image tag was found -if [ -z "${imagetag}" ]; then +if [[ -z "${imagetag}" ]]; then die "ERROR: Commit ${commit_id} has no Docker image available in the GitLab registry. Please check the pipeline status and available images." fi diff --git a/continuousintegration/utils/tape/generate_tapeservers_config.sh b/continuousintegration/utils/tape/generate_tapeservers_config.sh index 7cfe031548e4f2056ea2018180f77356e77d897a..eda0961ad56ea14b5075cbb5a2a9b5c0b6b51065 100755 --- a/continuousintegration/utils/tape/generate_tapeservers_config.sh +++ b/continuousintegration/utils/tape/generate_tapeservers_config.sh @@ -56,11 +56,11 @@ generate_tpsrvs_config_for_library() { # Find the line in lsscsi output corresponding to the current library device local line=$(echo "$lsscsi_g" | grep "mediumx" | grep "${library_device}") - if [ -z "$line" ]; then + if [[ -z "$line" ]]; then echo "Library device $library_device does not exist. Skipping..." 1>&2 return fi - if [ $(echo "$line" | wc -l) -gt 1 ]; then + if [[ $(echo "$line" | wc -l) -gt 1 ]]; then echo "Too many lines matching library device \"$library_device\" found. Ensure you passed the correct library device. Skipping..." 1>&2 return fi @@ -101,7 +101,7 @@ done) EOF ((tpsrv_counter++)) - if [ "$tpsrv_counter" -gt "$max_tape_servers" ]; then + if [[ "$tpsrv_counter" -gt "$max_tape_servers" ]]; then return fi done