From 0cf89ba317033392e5791ddd9feeb9021bb746d2 Mon Sep 17 00:00:00 2001 From: guenther Date: Thu, 27 Nov 2025 15:59:56 +0100 Subject: [PATCH 01/12] fixing rule cpp:S6004 --- catalogue/DatabaseMetadataGetter.cpp | 6 +- catalogue/rdbms/RdbmsArchiveFileCatalogue.cpp | 18 ++--- .../RdbmsCatalogueGetArchiveFilesItor.cpp | 13 ++- .../RdbmsCatalogueGetFileRecycleLogItor.cpp | 19 ++--- catalogue/rdbms/RdbmsCatalogueUtils.cpp | 3 +- catalogue/rdbms/RdbmsDriveStateCatalogue.cpp | 3 +- catalogue/rdbms/RdbmsSchemaCatalogue.cpp | 6 +- catalogue/rdbms/RdbmsTapeFileCatalogue.cpp | 18 ++--- cmdline/CtaAdminParsedCmd.cpp | 16 ++-- cmdline/EosCtaStub.cpp | 4 +- .../common/ConnectionConfiguration.cpp | 3 +- .../restore_files/RestoreFilesCmd.cpp | 9 +-- common/CRC.cpp | 12 ++- common/config/Config.cpp | 3 +- common/exception/Backtrace.cpp | 3 +- common/json/object/JSONCObject.cpp | 6 +- common/threading/System.cpp | 9 +-- common/utils/Regex.cpp | 6 +- common/utils/utils.cpp | 6 +- disk/DiskSystem.cpp | 24 +++--- eos_grpc_client/GrpcClient.cpp | 3 +- frontend/common/AdminCmd.cpp | 10 ++- frontend/common/AdminCmdOptions.hpp | 3 +- frontend/common/DriveLsResponseStream.cpp | 43 +++++----- frontend/common/FrontendService.cpp | 28 +++---- frontend/common/TapeLsResponseStream.cpp | 3 +- frontend/common/WorkflowEvent.cpp | 13 ++- frontend/grpc/FrontendGrpcService.cpp | 3 +- frontend/grpc/Main.cpp | 3 +- .../grpc/ServiceKerberosAuthProcessor.cpp | 12 +-- frontend/grpc/TokenStorage.cpp | 3 +- mediachanger/CommonMarshal.cpp | 4 +- mediachanger/io.cpp | 22 +++-- mediachanger/librmc/Cdomainname.cpp | 3 +- mediachanger/rmcd/rmc_serv.cpp | 19 +++-- objectstore/Algorithms.hpp | 17 ++-- objectstore/ArchiveQueue.cpp | 20 ++--- objectstore/RetrieveQueue.cpp | 14 ++-- objectstore/RetrieveQueueAlgorithms.hpp | 3 +- objectstore/Sorter.cpp | 4 +- plugin-manager/PluginManager.hpp | 3 +- rdbms/Login.cpp | 4 +- rdbms/wrapper/OcciColumn.cpp | 13 +-- rdbms/wrapper/ParamNameToIdx.cpp | 4 +- rdbms/wrapper/PostgresConn.cpp | 9 +-- rdbms/wrapper/PostgresRset.cpp | 6 +- rdbms/wrapper/PostgresStmt.cpp | 4 +- rdbms/wrapper/SqliteConn.cpp | 10 +-- rdbms/wrapper/SqliteStmt.cpp | 8 +- scheduler/OStoreDB/OStoreDB.cpp | 76 +++++++++--------- scheduler/Scheduler.cpp | 11 +-- scheduler/rdbms/RelationalDB.cpp | 6 +- scheduler/rdbms/RepackRequest.cpp | 20 ++--- scheduler/rdbms/postgres/ArchiveJobQueue.cpp | 4 +- scheduler/rdbms/postgres/RetrieveJobQueue.cpp | 4 +- .../rdbms/schema/CreateSchemaCmdLineArgs.cpp | 4 +- scheduler/rdbms/schema/DropSchemaCmd.cpp | 3 +- scheduler/rdbms/schema/SchedulerSchema.cpp | 3 +- statistics/StatisticsUpdateCmdLineArgs.cpp | 4 +- .../castor/tape/tapeserver/SCSI/Device.cpp | 8 +- .../tape/tapeserver/daemon/CleanerSession.cpp | 4 +- .../tapeserver/daemon/DataTransferSession.cpp | 4 +- .../daemon/MigrationReportPacker.cpp | 4 +- .../tapeserver/daemon/RecallTaskInjector.cpp | 3 +- .../daemon/TapeWriteSingleThread.cpp | 4 +- .../tape/tapeserver/daemon/TapeWriteTask.cpp | 4 +- .../tape/tapeserver/drive/DriveGeneric.cpp | 12 +-- .../file/EnstoreLargeReadSession.cpp | 4 +- .../tapeserver/file/EnstoreReadSession.cpp | 4 +- .../tape/tapeserver/file/HeaderChecker.cpp | 2 +- .../tape/tapeserver/system/FileWrappers.cpp | 61 +++++++------- tapeserver/daemon/DriveHandler.cpp | 18 ++--- .../daemon/DriveHandlerStateReporter.cpp | 43 +++++----- tapeserver/daemon/ProcessManager.cpp | 80 +++++++++---------- tapeserver/readtp/ReadtpCmd.cpp | 3 +- tests/ImmutableFileTestCmdLineArgs.cpp | 4 +- xroot_plugins/XrdSsiCtaServiceProvider.cpp | 4 +- 77 files changed, 402 insertions(+), 477 deletions(-) diff --git a/catalogue/DatabaseMetadataGetter.cpp b/catalogue/DatabaseMetadataGetter.cpp index 960e171af8..ed4230a534 100644 --- a/catalogue/DatabaseMetadataGetter.cpp +++ b/catalogue/DatabaseMetadataGetter.cpp @@ -93,10 +93,10 @@ SchemaVersion DatabaseMetadataGetter::getCatalogueVersion(){ try{ auto rset2 = stmt2.executeQuery(); if(rset2.next()){ - auto schemaVersionMajorNext = rset2.columnOptionalUint64("NEXT_SCHEMA_VERSION_MAJOR"); - auto schemaVersionMinorNext = rset2.columnOptionalUint64("NEXT_SCHEMA_VERSION_MINOR"); auto schemaStatus = rset2.columnString("STATUS"); - if(schemaVersionMajorNext.has_value() && schemaVersionMinorNext.has_value()){ + if (auto schemaVersionMajorNext = rset2.columnOptionalUint64("NEXT_SCHEMA_VERSION_MAJOR"), + schemaVersionMinorNext = rset2.columnOptionalUint64("NEXT_SCHEMA_VERSION_MINOR"); + schemaVersionMajorNext.has_value() && schemaVersionMinorNext.has_value()) { schemaVersionBuilder.nextSchemaVersionMajor(schemaVersionMajorNext.value()) .nextSchemaVersionMinor(schemaVersionMinorNext.value()) .status(schemaStatus); diff --git a/catalogue/rdbms/RdbmsArchiveFileCatalogue.cpp b/catalogue/rdbms/RdbmsArchiveFileCatalogue.cpp index 5985ad193b..c8a28c62e6 100644 --- a/catalogue/rdbms/RdbmsArchiveFileCatalogue.cpp +++ b/catalogue/rdbms/RdbmsArchiveFileCatalogue.cpp @@ -360,13 +360,10 @@ common::dataStructures::ArchiveFileSummary RdbmsArchiveFileCatalogue::getTapeFil TAPE.TAPE_POOL_ID = TAPE_POOL.TAPE_POOL_ID )SQL"; - const bool thereIsAtLeastOneSearchCriteria = - searchCriteria.archiveFileId || - searchCriteria.diskInstance || - searchCriteria.vid || - searchCriteria.diskFileIds; - - if(thereIsAtLeastOneSearchCriteria) { + if(searchCriteria.archiveFileId || + searchCriteria.diskInstance || + searchCriteria.vid || + searchCriteria.diskFileIds) { sql += R"SQL( WHERE )SQL"; } @@ -636,8 +633,8 @@ common::dataStructures::TapeCopyToPoolMap RdbmsArchiveFileCatalogue::getTapeCopy const auto copyNb = static_cast(rset.columnUint64("COPY_NB")); const std::string tapePoolName = rset.columnString("TAPE_POOL_NAME"); auto archiveRouteTypeStr = rset.columnString("ARCHIVE_ROUTE_TYPE"); - auto archiveRouteType = common::dataStructures::strToArchiveRouteType(archiveRouteTypeStr); - if (archiveRouteType == common::dataStructures::ArchiveRouteType::DEFAULT && copyToPoolMap.contains(copyNb)) { + if (auto archiveRouteType = common::dataStructures::strToArchiveRouteType(archiveRouteTypeStr); + archiveRouteType == common::dataStructures::ArchiveRouteType::DEFAULT && copyToPoolMap.contains(copyNb)) { // A DEFAULT archive route type should not override a previously found value continue; } @@ -950,10 +947,9 @@ std::unique_ptr RdbmsArchiveFileCatalogue::getArchiveFileRowById )SQL"; auto stmt = conn.createStmt(sql); stmt.bindUint64(":ARCHIVE_FILE_ID", id); - auto rset = stmt.executeQuery(); std::unique_ptr row; - if (rset.next()) { + if (auto rset = stmt.executeQuery(); rset.next()) { row = std::make_unique(); row->archiveFileId = rset.columnUint64("ARCHIVE_FILE_ID"); diff --git a/catalogue/rdbms/RdbmsCatalogueGetArchiveFilesItor.cpp b/catalogue/rdbms/RdbmsCatalogueGetArchiveFilesItor.cpp index b559816c5a..cafad4b83a 100644 --- a/catalogue/rdbms/RdbmsCatalogueGetArchiveFilesItor.cpp +++ b/catalogue/rdbms/RdbmsCatalogueGetArchiveFilesItor.cpp @@ -109,14 +109,11 @@ RdbmsCatalogueGetArchiveFilesItor::RdbmsCatalogueGetArchiveFilesItor( TAPE.TAPE_POOL_ID = TAPE_POOL.TAPE_POOL_ID )SQL"; - const bool thereIsAtLeastOneSearchCriteria = - searchCriteria.archiveFileId.has_value() || - searchCriteria.diskInstance.has_value() || - searchCriteria.vid.has_value() || - searchCriteria.diskFileIds.has_value() || - searchCriteria.fSeq.has_value(); - - if(thereIsAtLeastOneSearchCriteria) { + if(searchCriteria.archiveFileId.has_value() || + searchCriteria.diskInstance.has_value() || + searchCriteria.vid.has_value() || + searchCriteria.diskFileIds.has_value() || + searchCriteria.fSeq.has_value();) { sql += R"SQL( WHERE )SQL"; } diff --git a/catalogue/rdbms/RdbmsCatalogueGetFileRecycleLogItor.cpp b/catalogue/rdbms/RdbmsCatalogueGetFileRecycleLogItor.cpp index 77c75be698..b24014959d 100644 --- a/catalogue/rdbms/RdbmsCatalogueGetFileRecycleLogItor.cpp +++ b/catalogue/rdbms/RdbmsCatalogueGetFileRecycleLogItor.cpp @@ -66,17 +66,14 @@ RdbmsCatalogueGetFileRecycleLogItor::RdbmsCatalogueGetFileRecycleLogItor( VIRTUAL_ORGANIZATION ON VIRTUAL_ORGANIZATION.VIRTUAL_ORGANIZATION_ID = STORAGE_CLASS.VIRTUAL_ORGANIZATION_ID )SQL"; - const bool thereIsAtLeastOneSearchCriteria = - searchCriteria.vid.has_value() || - searchCriteria.diskFileIds.has_value() || - searchCriteria.archiveFileId.has_value() || - searchCriteria.copynb.has_value() || - searchCriteria.diskInstance.has_value() || - searchCriteria.recycleLogTimeMin.has_value() || - searchCriteria.recycleLogTimeMax.has_value() || - searchCriteria.vo.has_value(); - - if(thereIsAtLeastOneSearchCriteria) { + if (searchCriteria.vid.has_value() || + searchCriteria.diskFileIds.has_value() || + searchCriteria.archiveFileId.has_value() || + searchCriteria.copynb.has_value() || + searchCriteria.diskInstance.has_value() || + searchCriteria.recycleLogTimeMin.has_value() || + searchCriteria.recycleLogTimeMax.has_value() || + searchCriteria.vo.has_value();) { sql += R"SQL( WHERE )SQL"; } diff --git a/catalogue/rdbms/RdbmsCatalogueUtils.cpp b/catalogue/rdbms/RdbmsCatalogueUtils.cpp index 34f16b28d9..e859898800 100644 --- a/catalogue/rdbms/RdbmsCatalogueUtils.cpp +++ b/catalogue/rdbms/RdbmsCatalogueUtils.cpp @@ -101,8 +101,7 @@ std::optional RdbmsCatalogueUtils::defaultVirtualOrganizationForRep IS_REPACK_VO = '1' )SQL"; auto stmt = conn.createStmt(sql); - auto rset = stmt.executeQuery(); - if (rset.next()) { + if (auto rset = stmt.executeQuery(); rset.next()) { return rset.columnString("VIRTUAL_ORGANIZATION_NAME"); } else { return std::nullopt; diff --git a/catalogue/rdbms/RdbmsDriveStateCatalogue.cpp b/catalogue/rdbms/RdbmsDriveStateCatalogue.cpp index 540b9bb022..88d401d51d 100644 --- a/catalogue/rdbms/RdbmsDriveStateCatalogue.cpp +++ b/catalogue/rdbms/RdbmsDriveStateCatalogue.cpp @@ -585,9 +585,8 @@ std::optional RdbmsDriveStateCatalogue::getTa auto conn = m_connPool->getConn(); auto stmt = conn.createStmt(sql); stmt.bindString(":DRIVE_NAME", tapeDriveName); - auto rset = stmt.executeQuery(); - if (rset.next()) { + if (auto rset = stmt.executeQuery(); rset.next()) { return gettingSqlTapeDriveValues(&rset); } return std::nullopt; diff --git a/catalogue/rdbms/RdbmsSchemaCatalogue.cpp b/catalogue/rdbms/RdbmsSchemaCatalogue.cpp index 7d7bc4fbc4..de719abf9a 100644 --- a/catalogue/rdbms/RdbmsSchemaCatalogue.cpp +++ b/catalogue/rdbms/RdbmsSchemaCatalogue.cpp @@ -54,9 +54,9 @@ SchemaVersion RdbmsSchemaCatalogue::getSchemaVersion() const { schemaVersionBuilder.schemaVersionMajor(rset.columnUint64("SCHEMA_VERSION_MAJOR")) .schemaVersionMinor(rset.columnUint64("SCHEMA_VERSION_MINOR")) .status(rset.columnString("STATUS")); - auto schemaVersionMajorNext = rset.columnOptionalUint64("NEXT_SCHEMA_VERSION_MAJOR"); - auto schemaVersionMinorNext = rset.columnOptionalUint64("NEXT_SCHEMA_VERSION_MINOR"); - if(schemaVersionMajorNext.has_value() && schemaVersionMinorNext.has_value()){ + if (auto schemaVersionMajorNext = rset.columnOptionalUint64("NEXT_SCHEMA_VERSION_MAJOR"), + schemaVersionMinorNext = rset.columnOptionalUint64("NEXT_SCHEMA_VERSION_MINOR"); + schemaVersionMajorNext.has_value() && schemaVersionMinorNext.has_value()) { schemaVersionBuilder.nextSchemaVersionMajor(schemaVersionMajorNext.value()) .nextSchemaVersionMinor(schemaVersionMinorNext.value()); } diff --git a/catalogue/rdbms/RdbmsTapeFileCatalogue.cpp b/catalogue/rdbms/RdbmsTapeFileCatalogue.cpp index 20e6b42217..6bea3438b7 100644 --- a/catalogue/rdbms/RdbmsTapeFileCatalogue.cpp +++ b/catalogue/rdbms/RdbmsTapeFileCatalogue.cpp @@ -212,15 +212,15 @@ common::dataStructures::RetrieveFileQueueCriteria RdbmsTapeFileCatalogue::prepar ex.getMessage() << "File with archive file ID " << archiveFileId << " does not exist in CTA namespace"; throw ex; } - const auto nonBrokenState = std::find_if(std::begin(tapeFileStateList), std::end(tapeFileStateList), - [](const std::pair& state) { - return (state.second != "BROKEN") - && (state.second != "BROKEN_PENDING") - && (state.second != "EXPORTED") - && (state.second != "EXPORTED_PENDING"); - }); - - if (nonBrokenState != std::end(tapeFileStateList)) { + if (const auto nonBrokenState = std::find_if( + std::begin(tapeFileStateList), std::end(tapeFileStateList), + [](const std::pair& state) { + return (state.second != "BROKEN") + && (state.second != "BROKEN_PENDING") + && (state.second != "EXPORTED") + && (state.second != "EXPORTED_PENDING"); + }); + nonBrokenState != std::end(tapeFileStateList)) { ex.getMessage() << "WARNING: The requested file is on tape " << nonBrokenState->first << ", which is temporarily unavailable (" << nonBrokenState->second << "). Please retry later."; throw ex; diff --git a/cmdline/CtaAdminParsedCmd.cpp b/cmdline/CtaAdminParsedCmd.cpp index b9a0d953e4..6b7793ef57 100644 --- a/cmdline/CtaAdminParsedCmd.cpp +++ b/cmdline/CtaAdminParsedCmd.cpp @@ -42,8 +42,7 @@ CtaAdminParsedCmd::CtaAdminParsedCmd(int argc, const char* const* const argv) : // Strip path from execname - size_t p = m_execname.find_last_of('/'); - if (p != std::string::npos) { + if (size_t p = m_execname.find_last_of('/'); p != std::string::npos) { m_execname.erase(0, p + 1); } @@ -152,10 +151,12 @@ void CtaAdminParsedCmd::parseOptions(int start, int argc, const char* const* con // Check if the value is '--all' if (std::string(argv[i]) == "--all" || std::string(argv[i]) == "-a") { // Find the OPT_FLAG type --all option explicitly - auto flag_it = std::find_if(options.begin(), options.end(), [](const Option& opt) { - return opt.get_type() == Option::OPT_FLAG && (opt == opt_all); - }); - if (flag_it != options.end()) { + if (auto flag_it = std::find_if( + options.begin(), options.end(), + [](const Option& opt) { + return opt.get_type() == Option::OPT_FLAG && (opt == opt_all); + }); + flag_it != options.end()) { addOption(*flag_it, ""); // Add --all as a flag option continue; // Move to the next argument } @@ -251,8 +252,7 @@ void CtaAdminParsedCmd::readListFromFile(cta::admin::OptionStrList& str_list, co while (std::getline(file, line)) { // Strip out comments - auto pos = line.find('#'); - if (pos != std::string::npos) { + if (auto pos = line.find('#'); pos != std::string::npos) { line.resize(pos); } diff --git a/cmdline/EosCtaStub.cpp b/cmdline/EosCtaStub.cpp index f86c574c3d..3e6d2317b5 100644 --- a/cmdline/EosCtaStub.cpp +++ b/cmdline/EosCtaStub.cpp @@ -66,9 +66,7 @@ void fillNotification(cta::eos::Notification ¬ification, int argc, const char if(argc < 2) throw Usage; - const std::string wf_command(argv[1]); - - if(wf_command == "archive") + if(const std::string wf_command = argv[1]; wf_command == "archive") { notification.mutable_wf()->set_event(cta::eos::Workflow::CLOSEW); } diff --git a/cmdline/standalone_cli_tools/common/ConnectionConfiguration.cpp b/cmdline/standalone_cli_tools/common/ConnectionConfiguration.cpp index a098356e3e..d173babcfe 100644 --- a/cmdline/standalone_cli_tools/common/ConnectionConfiguration.cpp +++ b/cmdline/standalone_cli_tools/common/ConnectionConfiguration.cpp @@ -35,8 +35,7 @@ std::unique_ptr<::eos::client::EndpointMap> ConnConfiguration::setNamespaceMap(c std::string line; for(int lineno = 1; std::getline(file, line); ++lineno) { // Strip out comments - auto pos = line.find('#'); - if(pos != std::string::npos) { + if(auto pos = line.find('#'); pos != std::string::npos) { line.resize(pos); } diff --git a/cmdline/standalone_cli_tools/restore_files/RestoreFilesCmd.cpp b/cmdline/standalone_cli_tools/restore_files/RestoreFilesCmd.cpp index ba47ee8fdb..9387f772ec 100644 --- a/cmdline/standalone_cli_tools/restore_files/RestoreFilesCmd.cpp +++ b/cmdline/standalone_cli_tools/restore_files/RestoreFilesCmd.cpp @@ -377,13 +377,11 @@ void RestoreFilesCmd::restoreDeletedFileCopyCta(const cta::admin::RecycleTapeFil // addContainerEos //------------------------------------------------------------------------------ uint64_t RestoreFilesCmd::addContainerEos(const std::string &diskInstance, const std::string &path, const std::string &sc) const { - auto c_id = containerExistsEos(diskInstance, path); - if (c_id) { + if (auto c_id = containerExistsEos(diskInstance, path); c_id) { return c_id; } auto enclosingPath = cta::utils::getEnclosingPath(path); - auto parent_id = containerExistsEos(diskInstance, enclosingPath); - if (!parent_id) { + if (auto parent_id = containerExistsEos(diskInstance, enclosingPath); !parent_id) { //parent does not exist, need to add it as well parent_id = addContainerEos(diskInstance, enclosingPath, sc); } @@ -576,8 +574,7 @@ uint64_t RestoreFilesCmd::restoreDeletedFileEos(const cta::admin::RecycleTapeFil m_log(cta::log::INFO, "Restoring file in the EOS namespace", params); getCurrentEosIds(rtfls_item.disk_instance()); - uint64_t file_id = getFileIdEos(rtfls_item.disk_instance(), rtfls_item.disk_file_path()); - if (file_id) { + if (uint64_t file_id = getFileIdEos(rtfls_item.disk_instance(), rtfls_item.disk_file_path()); file_id) { return file_id; // EOS disk file id was changed since the file was deleted, just return the new file id } diff --git a/common/CRC.cpp b/common/CRC.cpp index bae5739703..aba6d1646b 100644 --- a/common/CRC.cpp +++ b/common/CRC.cpp @@ -248,13 +248,11 @@ bool verifyCrc32cForMemoryBlockWithCrc32c( if (cnt <= 4) return false; //block is too small to be valid, cannot check CRC - const uint32_t crccmp = crc32c(crcInit, cnt-4, start); - const uint32_t crcblk= (start[cnt-4] << 0) | - (start[cnt-3] << 8) | - (start[cnt-2] << 16) | - (start[cnt-1] << 24); - - if (crccmp != crcblk) { + if (const uint32_t crccmp = crc32c(crcInit, cnt-4, start), + const uint32_t crcblk= (start[cnt-4] << 0) | + (start[cnt-3] << 8) | + (start[cnt-2] << 16) | + (start[cnt-1] << 24); crccmp != crcblk) { return false; //block CRC is incorrect } return true; diff --git a/common/config/Config.cpp b/common/config/Config.cpp index b6bef90673..d472937b37 100644 --- a/common/config/Config.cpp +++ b/common/config/Config.cpp @@ -95,8 +95,7 @@ void Config::parse(std::ifstream& file) { while (std::getline(file, line)) { // Strip out comments - auto pos = line.find('#'); - if (pos != std::string::npos) { + if (auto pos = line.find('#'); pos != std::string::npos) { line.resize(pos); } diff --git a/common/exception/Backtrace.cpp b/common/exception/Backtrace.cpp index 47fd819679..1bceceede4 100644 --- a/common/exception/Backtrace.cpp +++ b/common/exception/Backtrace.cpp @@ -118,9 +118,8 @@ cta::exception::Backtrace::Backtrace(bool fake) { void * array[200]; g_lock.lock(); size_t depth = ::backtrace(array, sizeof(array)/sizeof(void*)); - char ** strings = ::backtrace_symbols(array, depth); - if (!strings) + if (char ** strings = ::backtrace_symbols(array, depth); !strings) m_trace = ""; else { for (size_t i=0; ipw_gid, &grp_buf, gr_buffer, sizeof(gr_buffer), &grp); - if (getgrgidRet != 0 || grp == nullptr) { + if (int getgrgidRet = getgrgid_r(pwd->pw_gid, &grp_buf, gr_buffer, sizeof(gr_buffer), &grp); getgrgidRet != 0 || grp == nullptr) { cta::exception::Exception e; e.getMessage() << "Failed to " << task << ": User does not have a primary group"; throw e; } // Get information about group name from group file - int getgrnam = getgrnam_r(groupName.c_str(), &grp_buf, gr_buffer, sizeof(gr_buffer), &grp); - if (getgrnam != 0 || grp == nullptr) { + if (int getgrnam = getgrnam_r(groupName.c_str(), &grp_buf, gr_buffer, sizeof(gr_buffer), &grp); getgrnam != 0 || grp == nullptr) { cta::exception::Exception e; e.getMessage() << "Failed to " << task << ": Group name not found in group file"; throw e; diff --git a/common/utils/Regex.cpp b/common/utils/Regex.cpp index 06867eac53..87f5f88005 100644 --- a/common/utils/Regex.cpp +++ b/common/utils/Regex.cpp @@ -26,8 +26,7 @@ Regex::Regex(const std::string & re_str) : m_reStr(re_str), m_set(false) { std::string error("Could not compile regular expression: \""); error += m_reStr; error += "\""; - char re_err[1024]; - if (::regerror(rc, &m_re, re_err, sizeof (re_err))) { + if (char re_err[1024]; ::regerror(rc, &m_re, re_err, sizeof (re_err))) { error += ": "; error += re_err; } @@ -44,8 +43,7 @@ Regex::~Regex() { } std::vector Regex::exec(const std::string &s) const { - regmatch_t matches[100]; - if (REG_NOMATCH != ::regexec(&m_re, s.c_str(), 100, matches, 0)) { + if (regmatch_t matches[100]; REG_NOMATCH != ::regexec(&m_re, s.c_str(), 100, matches, 0)) { std::vector ret; for (int i = 0; i < 100; i++) { if (matches[i].rm_so != -1) { diff --git a/common/utils/utils.cpp b/common/utils/utils.cpp index e66a4be1b5..9833086576 100644 --- a/common/utils/utils.cpp +++ b/common/utils/utils.cpp @@ -211,8 +211,7 @@ std::string trimSlashes(const std::string& s) { // Find last non slash chararacter std::string::const_iterator it2; - size_t endpos = s.find_last_not_of("/"); - if (std::string::npos != endpos) { + if (size_t endpos = s.find_last_not_of("/"); std::string::npos != endpos) { it2 = endpos + 1 + s.begin(); } else { it2 = s.end(); @@ -227,8 +226,7 @@ std::string trimSlashes(const std::string& s) { std::string trimFinalSlashes(const std::string& s) { // Find last non slash chararacter std::string::const_iterator it2; - size_t endpos = s.find_last_not_of("/"); - if (std::string::npos != endpos) { + if (size_t endpos = s.find_last_not_of("/"); std::string::npos != endpos) { it2 = endpos + 1 + s.begin(); } else { it2 = s.end(); diff --git a/disk/DiskSystem.cpp b/disk/DiskSystem.cpp index 55cf6f1589..9824f97c4e 100644 --- a/disk/DiskSystem.cpp +++ b/disk/DiskSystem.cpp @@ -35,8 +35,7 @@ namespace cta::disk { // DiskSystemList::at() //------------------------------------------------------------------------------ const DiskSystem& DiskSystemList::at(const std::string& name) const { - auto dsi = std::find_if(begin(), end(), [&name](const DiskSystem& ds){ return ds.name == name; }); - if (dsi != end()) return *dsi; + if (auto dsi = std::find_if(begin(), end(), [&name](const DiskSystem& ds){ return ds.name == name; }); dsi != end()) return *dsi; throw std::out_of_range("In DiskSystemList::at(): name " + name + " not found."); } @@ -51,9 +50,8 @@ std::string DiskSystemList::getDSName(const std::string& fileURL) const { } } // Try and find the fileURL - auto pri = std::find_if(m_pointersAndRegexes.begin(), m_pointersAndRegexes.end(), - [&fileURL](const PointerAndRegex& pr){ return !pr.regex.exec(fileURL).empty(); }); - if (pri != m_pointersAndRegexes.end()) { + if (auto pri = std::find_if(m_pointersAndRegexes.begin(), m_pointersAndRegexes.end(), + [&fileURL](const PointerAndRegex& pr){ return !pr.regex.exec(fileURL).empty(); }); pri != m_pointersAndRegexes.end()) { // We found a match. Let's move the pointer and regex to the front so next file will be faster (most likely). if (pri != m_pointersAndRegexes.begin()) m_pointersAndRegexes.splice(m_pointersAndRegexes.begin(), m_pointersAndRegexes, pri); @@ -119,12 +117,16 @@ void DiskSystemFreeSpaceList::fetchDiskSystemFreeSpace(const std::set regexResult; - const auto currentTime = static_cast(std::chrono::system_clock::to_time_t(std::chrono::system_clock::now())); - if (diskInstanceSpace.lastRefreshTime + diskInstanceSpace.refreshInterval >= currentTime) { - // use the value in the catalogue, it is still fresh - freeSpace = diskSystem.diskInstanceSpace.freeSpace; - updateFreeSpaceEntry(ds, freeSpace, catalogue, updateCatalogue); - continue; + { + const auto currentTime = static_cast(std::chrono::system_clock::to_time_t( + std::chrono::system_clock::now())); + + if (diskInstanceSpace.lastRefreshTime + diskInstanceSpace.refreshInterval >= currentTime) { + // use the value in the catalogue, it is still fresh + freeSpace = diskSystem.diskInstanceSpace.freeSpace; + updateFreeSpaceEntry(ds, freeSpace, catalogue, updateCatalogue); + continue; + } } updateCatalogue = true; const auto &freeSpaceQueryUrl = getDiskSystemFreeSpaceQueryURL(diskSystem); diff --git a/eos_grpc_client/GrpcClient.cpp b/eos_grpc_client/GrpcClient.cpp index a84c14b9e1..ecb7b16751 100644 --- a/eos_grpc_client/GrpcClient.cpp +++ b/eos_grpc_client/GrpcClient.cpp @@ -240,8 +240,7 @@ eos::rpc::MDResponse GrpcClient::GetMD(eos::rpc::TYPE type, uint64_t id, const s while(true) { void *got_tag; bool ok = false; - bool ret = cq.Next(&got_tag, &ok); - if(!ret || !ok || got_tag != tag) break; + if(bool ret = cq.Next(&got_tag, &ok); !ret || !ok || got_tag != tag) break; rpc->Read(&response, tag); } if(showJson) { diff --git a/frontend/common/AdminCmd.cpp b/frontend/common/AdminCmd.cpp index d81051dfd2..d4bdd59790 100644 --- a/frontend/common/AdminCmd.cpp +++ b/frontend/common/AdminCmd.cpp @@ -857,10 +857,12 @@ void AdminCmd::processRepack_Add(xrd::Response& response) { common::dataStructures::MountPolicy mountPolicy; using MountPolicyList = std::list; MountPolicyList mountPolicies = m_catalogue.MountPolicy()->getMountPolicies(); - MountPolicyList::const_iterator repackMountPolicyItor = std::find_if(mountPolicies.begin(),mountPolicies.end(),[&mountPolicyProvidedByUser](const common::dataStructures::MountPolicy& mp) { - return mp.name == mountPolicyProvidedByUser; - }); - if(repackMountPolicyItor != mountPolicies.end()) { + if(MountPolicyList::const_iterator repackMountPolicyItor = std::find_if( + mountPolicies.begin(), + mountPolicies.end(), + [&mountPolicyProvidedByUser](const common::dataStructures::MountPolicy& mp) { + return mp.name == mountPolicyProvidedByUser; + }); repackMountPolicyItor != mountPolicies.end()) { //The mount policy exists mountPolicy = *repackMountPolicyItor; } else { diff --git a/frontend/common/AdminCmdOptions.hpp b/frontend/common/AdminCmdOptions.hpp index b53dfbe028..5fc87e101d 100644 --- a/frontend/common/AdminCmdOptions.hpp +++ b/frontend/common/AdminCmdOptions.hpp @@ -103,8 +103,7 @@ public: * @retval false The flag is either not present or is present and set to false */ bool has_flag(admin::OptionBoolean::Key option, bool* has_option = nullptr) const { - auto opt_it = m_option_bool.find(option); - if (opt_it != m_option_bool.end()) { + if (auto opt_it = m_option_bool.find(option); opt_it != m_option_bool.end()) { if (has_option != nullptr) { *has_option = true; } diff --git a/frontend/common/DriveLsResponseStream.cpp b/frontend/common/DriveLsResponseStream.cpp index 15c7f3ed6e..c310d2c15d 100644 --- a/frontend/common/DriveLsResponseStream.cpp +++ b/frontend/common/DriveLsResponseStream.cpp @@ -74,17 +74,16 @@ DriveLsResponseStream::DriveLsResponseStream(cta::catalogue::Catalogue& catalogu // Extract the SchedulerBackendName configuration if it exists std::string driveSchedulerBackendName = "unknown"; - auto config_it = - std::find_if(driveConfigs.begin(), - driveConfigs.end(), - [&driveSchedulerBackendName](const cta::catalogue::DriveConfigCatalogue::DriveConfig& config) { - if (config.keyName == "SchedulerBackendName") { - driveSchedulerBackendName = config.value; - return true; - } - return false; - }); - if (config_it == driveConfigs.end()) { + if (auto config_it = + std::find_if(driveConfigs.begin(), + driveConfigs.end(), + [&driveSchedulerBackendName](const cta::catalogue::DriveConfigCatalogue::DriveConfig& config) { + if (config.keyName == "SchedulerBackendName") { + driveSchedulerBackendName = config.value; + return true; + } + return false; + }); config_it == driveConfigs.end()) { m_lc.log(cta::log::ERR, "DriveLsStream::fillBuffer could not find SchedulerBackendName configuration for drive " + dr_it->driveName); @@ -126,17 +125,17 @@ cta::xrd::Data DriveLsResponseStream::next() { // Extract the SchedulerBackendName configuration if it exists std::string driveSchedulerBackendName = "unknown"; - auto it = std::find_if(driveConfigs.begin(), - driveConfigs.end(), - [&driveSchedulerBackendName](const cta::catalogue::DriveConfigCatalogue::DriveConfig& config) { - if (config.keyName == "SchedulerBackendName") { - driveSchedulerBackendName = config.value; - return true; - } - return false; - }); - - if (it == driveConfigs.end()) { + + if (auto it = std::find_if(driveConfigs.begin(), + driveConfigs.end(), + [&driveSchedulerBackendName]( + const cta::catalogue::DriveConfigCatalogue::DriveConfig &config) { + if (config.keyName == "SchedulerBackendName") { + driveSchedulerBackendName = config.value; + return true; + } + return false; + }); it == driveConfigs.end()) { m_lc.log(cta::log::ERR, "DriveLsResponseStream::next could not find SchedulerBackendName configuration for drive " + dr.driveName); } diff --git a/frontend/common/FrontendService.cpp b/frontend/common/FrontendService.cpp index 08e43933a5..b089e2716a 100644 --- a/frontend/common/FrontendService.cpp +++ b/frontend/common/FrontendService.cpp @@ -339,14 +339,13 @@ FrontendService::FrontendService(const std::string& configFilename) { m_repackBufferURL = repackBufferURLConf.value(); } - auto repackMaxFilesToSelectConf = config.getOptionValueUInt("cta.repack.repack_max_files_to_select"); - if (repackMaxFilesToSelectConf.has_value()) { + + if (auto repackMaxFilesToSelectConf = config.getOptionValueUInt("cta.repack.repack_max_files_to_select"); repackMaxFilesToSelectConf.has_value()) { m_repackMaxFilesToSelect = repackMaxFilesToSelectConf.value(); } // Get the verification mount policy - const auto verificationMountPolicy = config.getOptionValueStr("cta.verification.mount_policy"); - if (verificationMountPolicy.has_value()) { + if (const auto verificationMountPolicy = config.getOptionValueStr("cta.verification.mount_policy"); verificationMountPolicy.has_value()) { m_verificationMountPolicy = verificationMountPolicy.value(); } @@ -426,32 +425,25 @@ FrontendService::FrontendService(const std::string& configFilename) { // Get the gRPC-specific values, if they are set (getOptionValue returns an std::optional) std::optional tls = config.getOptionValueBool("grpc.tls.enabled"); m_tls = tls.value_or(false); // default value is false - auto TlsKey = config.getOptionValueStr("grpc.tls.server_key_path"); - if (TlsKey.has_value()) { + if (auto TlsKey = config.getOptionValueStr("grpc.tls.server_key_path"); TlsKey.has_value()) { m_tlsKey = TlsKey.value(); } - auto TlsCert = config.getOptionValueStr("grpc.tls.server_cert_path"); - if (TlsCert.has_value()) { + if (auto TlsCert = config.getOptionValueStr("grpc.tls.server_cert_path"); TlsCert.has_value()) { m_tlsCert = TlsCert.value(); } - auto TlsChain = config.getOptionValueStr("grpc.tls.chain_cert_path"); - if (TlsChain.has_value()) { + if (auto TlsChain = config.getOptionValueStr("grpc.tls.chain_cert_path"); TlsChain.has_value()) { m_tlsChain = TlsChain.value(); } - auto keytab = config.getOptionValueStr("grpc.keytab"); - if (keytab.has_value()) { + if (auto keytab = config.getOptionValueStr("grpc.keytab"); keytab.has_value()) { m_keytab = keytab.value(); } - auto servicePrincipal = config.getOptionValueStr("grpc.service_principal"); - if (servicePrincipal.has_value()) { + if (auto servicePrincipal = config.getOptionValueStr("grpc.service_principal"); servicePrincipal.has_value()) { m_servicePrincipal = servicePrincipal.value(); } - auto port = config.getOptionValueStr("grpc.port"); - if (port.has_value()) { + if (auto port = config.getOptionValueStr("grpc.port"); port.has_value()) { m_port = port.value(); } - auto threads = config.getOptionValueInt("grpc.numberofthreads"); - if (threads.has_value()) { + if (auto threads = config.getOptionValueInt("grpc.numberofthreads"); threads.has_value()) { if (threads.value() < 1) { throw exception::UserError("value of grpc.numberofthreads must be at least 1"); } diff --git a/frontend/common/TapeLsResponseStream.cpp b/frontend/common/TapeLsResponseStream.cpp index 564517847d..3b5105fc77 100644 --- a/frontend/common/TapeLsResponseStream.cpp +++ b/frontend/common/TapeLsResponseStream.cpp @@ -55,8 +55,7 @@ TapeLsResponseStream::TapeLsResponseStream(cta::catalogue::Catalogue& catalogue, } // Handle state option - auto stateOpt = request.getOptional(OptionString::STATE, &has_any); - if (stateOpt) { + if (auto stateOpt = request.getOptional(OptionString::STATE, &has_any); stateOpt) { m_searchCriteria.state = common::dataStructures::Tape::stringToState(stateOpt.value(), true); } diff --git a/frontend/common/WorkflowEvent.cpp b/frontend/common/WorkflowEvent.cpp index 71f144bf93..3fe7a427c1 100644 --- a/frontend/common/WorkflowEvent.cpp +++ b/frontend/common/WorkflowEvent.cpp @@ -288,8 +288,8 @@ void WorkflowEvent::processCLOSEW(xrd::Response& response) { m_lc.log(log::INFO, logMessage); throw exception::PbException("CLOSEW: Failed to find the extended attribute named sys.archive.file_id"); } - const std::string archiveFileIdStr = archiveFileIdItor->second; - if ((archiveFileId = strtoul(archiveFileIdStr.c_str(), nullptr, 10)) == 0) { + if (const std::string archiveFileIdStr = archiveFileIdItor->second; + (archiveFileId = strtoul(archiveFileIdStr.c_str(), nullptr, 10)) == 0) { params.add("sys.archive.file_id", archiveFileIdStr); logMessage += "sys.archive.file_id is not a positive integer"; m_lc.log(log::INFO, logMessage); @@ -352,8 +352,7 @@ void WorkflowEvent::processPREPARE(xrd::Response& response) { request.vid = m_event.wf().vid(); } - auto archiveFileId = m_event.file().archive_file_id(); - if (archiveFileId) { + if (auto archiveFileId = m_event.file().archive_file_id(); archiveFileId) { request.archiveFileID = archiveFileId; } else { @@ -412,8 +411,7 @@ void WorkflowEvent::processABORT_PREPARE(xrd::Response& response) { request.requester.name = m_event.cli().user().username(); request.requester.group = m_event.cli().user().groupname(); - auto archiveFileId = m_event.file().archive_file_id(); - if (archiveFileId) { + if (auto archiveFileId = m_event.file().archive_file_id(); archiveFileId) { request.archiveFileID = archiveFileId; } else { @@ -434,8 +432,7 @@ void WorkflowEvent::processABORT_PREPARE(xrd::Response& response) { } // first check if there is a first-class request Id set, if not, fallback to checking xattrs - std::string retrieveRequestId = m_event.file().request_objectstore_id(); - if (!retrieveRequestId.empty()) { + if (std::string retrieveRequestId = m_event.file().request_objectstore_id(); !retrieveRequestId.empty()) { request.retrieveRequestId = retrieveRequestId; } else { diff --git a/frontend/grpc/FrontendGrpcService.cpp b/frontend/grpc/FrontendGrpcService.cpp index 35d6f08074..6eb94b72ac 100644 --- a/frontend/grpc/FrontendGrpcService.cpp +++ b/frontend/grpc/FrontendGrpcService.cpp @@ -176,8 +176,7 @@ CtaRpcImpl::Archive(::grpc::ServerContext* context, const cta::xrd::Request* req sp.add("remoteHost", context->peer()); sp.add("request", "archive"); // check validate request args - const std::string storageClass = request->notification().file().storage_class(); - if (storageClass.empty()) { + if (const std::string storageClass = request->notification().file().storage_class(); storageClass.empty()) { response->set_type(cta::xrd::Response::RSP_ERR_USER); response->set_message_txt("Storage class is not set."); return ::grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, "Storage class is not set."); diff --git a/frontend/grpc/Main.cpp b/frontend/grpc/Main.cpp index 7a3fcb7f79..13c8531cbd 100644 --- a/frontend/grpc/Main.cpp +++ b/frontend/grpc/Main.cpp @@ -193,8 +193,7 @@ int main(const int argc, char *const *const argv) { lc.log(log::INFO, "TLS service certificate file: " + cert_file); cert.cert_chain = cta::utils::file2string(cert_file); - auto ca_chain = svc.getFrontendService().getTlsChain(); - if (ca_chain.has_value()) { + if (auto ca_chain = svc.getFrontendService().getTlsChain(); ca_chain.has_value()) { lc.log(log::INFO, "TLS CA chain file: " + ca_chain.value()); tls_options.pem_root_certs = cta::utils::file2string(ca_chain.value()); } else { diff --git a/frontend/grpc/ServiceKerberosAuthProcessor.cpp b/frontend/grpc/ServiceKerberosAuthProcessor.cpp index 8900508786..5c582651c2 100644 --- a/frontend/grpc/ServiceKerberosAuthProcessor.cpp +++ b/frontend/grpc/ServiceKerberosAuthProcessor.cpp @@ -61,12 +61,12 @@ return ::grpc::Status::OK; } // Skip Kerberos auth for the physics workflow events, because these will be checked inside the rpc implementation for credentials - std::unordered_set allowed {"/cta.xrd.CtaRpc/Create", - "/cta.xrd.CtaRpc/Archive", - "/cta.xrd.CtaRpc/Retrieve", - "/cta.xrd.CtaRpc/Delete", - "/cta.xrd.CtaRpc/CancelRetrieve"}; - if (allowed.contains(strAuthMetadataValue)) { + if (std::unordered_set allowed {"/cta.xrd.CtaRpc/Create", + "/cta.xrd.CtaRpc/Archive", + "/cta.xrd.CtaRpc/Retrieve", + "/cta.xrd.CtaRpc/Delete", + "/cta.xrd.CtaRpc/CancelRetrieve"}; + allowed.contains(strAuthMetadataValue)) { return ::grpc::Status::OK; } /* diff --git a/frontend/grpc/TokenStorage.cpp b/frontend/grpc/TokenStorage.cpp index 7123f4f286..90d8d33904 100644 --- a/frontend/grpc/TokenStorage.cpp +++ b/frontend/grpc/TokenStorage.cpp @@ -27,9 +27,8 @@ void cta::frontend::grpc::server::TokenStorage::store(const std::string& strToke bool cta::frontend::grpc::server::TokenStorage::validate(const std::string& strToken) const { std::lock_guard lck(m_mtxLockStorage); - std::string strDecodedToken = cta::utils::base64decode(strToken); - if (m_umapTokens.contains(strDecodedToken)) { + if (std::string strDecodedToken = cta::utils::base64decode(strToken); m_umapTokens.contains(strDecodedToken)) { return true; } return false; diff --git a/mediachanger/CommonMarshal.cpp b/mediachanger/CommonMarshal.cpp index eaf19bb655..be85d42ae7 100644 --- a/mediachanger/CommonMarshal.cpp +++ b/mediachanger/CommonMarshal.cpp @@ -54,10 +54,8 @@ size_t marshal(char *const dst, const size_t dstLen, const MessageHeader &src) { marshalUint32(src.lenOrStatus, p); // Calculate the number of bytes actually marshalled - const size_t nbBytesMarshalled = p - dst; - // Check that the number of bytes marshalled was what was expected - if(totalLen != nbBytesMarshalled) { + if(const size_t nbBytesMarshalled = p - dst; totalLen != nbBytesMarshalled) { cta::exception::Exception ex; ex.getMessage() << "Failed to marshal MessageHeader" ": Mismatch between expected total length and actual" diff --git a/mediachanger/io.cpp b/mediachanger/io.cpp index f86f56d39b..96efe009a3 100644 --- a/mediachanger/io.cpp +++ b/mediachanger/io.cpp @@ -79,8 +79,7 @@ int createListenerSock( struct in_addr networkAddress; - const int rc = inet_pton(AF_INET, addr.c_str(), &networkAddress); - if(0 >= rc) { + if(const int rc = inet_pton(AF_INET, addr.c_str(), &networkAddress); 0 >= rc) { cta::exception::Exception ex; ex.getMessage() << "Failed to create listener socket:" " Failed to convert string to network address: value=" << addr; @@ -222,8 +221,7 @@ int createLocalhostListenerSock(const unsigned short port) { const char *addr = "127.0.0.1"; struct in_addr networkAddress; - const int rc = inet_pton(AF_INET, addr, &networkAddress); - if(0 >= rc) { + if(const int rc = inet_pton(AF_INET, addr, &networkAddress); 0 >= rc) { cta::exception::Exception ex; ex.getMessage() << "Failed to create listener socket:" " Failed to convert string to network address: value=" << addr; @@ -394,9 +392,8 @@ mediachanger::IpAndPort getSockIpPort(const int socketFd) { struct sockaddr_in address; memset(&address, '\0', sizeof(address)); - socklen_t addressLen = sizeof(address); - if(getsockname(socketFd, (struct sockaddr*)&address, &addressLen) < 0) { + if(socklen_t addressLen = sizeof(address); getsockname(socketFd, (struct sockaddr*)&address, &addressLen) < 0) { cta::exception::Exception ex; ex.getMessage() << "Failed to get socket name: socketFd=" << socketFd << ": " << cta::utils::errnoToString(errno); @@ -422,9 +419,8 @@ mediachanger::IpAndPort getPeerIpPort(const int socketFd) { struct sockaddr_in address; memset(&address, '\0', sizeof(address)); - socklen_t addressLen = sizeof(address); - if(getpeername(socketFd, (struct sockaddr*)&address, &addressLen) < 0) { + if(socklen_t addressLen = sizeof(address); getpeername(socketFd, (struct sockaddr*)&address, &addressLen) < 0) { cta::exception::Exception ex; ex.getMessage() << ": Failed to get peer name: socketFd=" << socketFd << ": " << cta::utils::errnoToString(errno); @@ -460,10 +456,10 @@ std::string getSockHostName(const int socketFd) { char hostName[HOSTNAMEBUFLEN]; char serviceName[SERVICENAMEBUFLEN]; - const int error = getnameinfo((const struct sockaddr*)&address, addressLen, - hostName, sizeof(hostName), serviceName, sizeof(serviceName), 0); - if(error != 0) { + if (const int error = getnameinfo((const struct sockaddr *) &address, addressLen, + hostName, sizeof(hostName), serviceName, sizeof(serviceName), 0); + error != 0) { cta::exception::Exception ex; ex.getMessage() << ": Failed to get host information by address" @@ -514,7 +510,9 @@ void getSockIpHostnamePort( const int rc = getnameinfo((const struct sockaddr*)&address, addressLen, hostName, hostNameLen, serviceName, sizeof(serviceName), 0); - if(rc != 0) { + if (const int rc = getnameinfo((const struct sockaddr *) &address, addressLen, + hostName, hostNameLen, serviceName, sizeof(serviceName), 0); + rc != 0) { cta::exception::Exception ex; ex.getMessage() << ": Failed to get host information by address" diff --git a/mediachanger/librmc/Cdomainname.cpp b/mediachanger/librmc/Cdomainname.cpp index 2fbcbfeb90..d69d8f2c3c 100644 --- a/mediachanger/librmc/Cdomainname.cpp +++ b/mediachanger/librmc/Cdomainname.cpp @@ -33,14 +33,13 @@ int Cdomainname(char* name, int namelen) { struct hostent* hp; char* p; - FILE* fd; /* * try looking in /etc/resolv.conf * putting this here and assuming that it is correct, eliminates * calls to gethostbyname, and therefore DNS lookups. This helps * those on dialup systems. */ - if ((fd = fopen("/etc/resolv.conf", "r")) != nullptr) { + if (FILE* fd; (fd = fopen("/etc/resolv.conf", "r")) != nullptr) { char line[300]; while (fgets(line, sizeof(line), fd) != nullptr) { if ((strncmp(line, "domain", 6) == 0 || strncmp(line, "search", 6) == 0) && line[6] == ' ') { diff --git a/mediachanger/rmcd/rmc_serv.cpp b/mediachanger/rmcd/rmc_serv.cpp index 1a2017f3c9..9af7f8499a 100644 --- a/mediachanger/rmcd/rmc_serv.cpp +++ b/mediachanger/rmcd/rmc_serv.cpp @@ -108,8 +108,8 @@ int rmc_main(const char* const robot) { *first_space = '\0'; } } - int ret = snprintf(g_localhost, CA_MAXHOSTNAMELEN, "%s.%s", localhost, domainname); - if (ret < 0 || ret >= CA_MAXHOSTNAMELEN) { + if (int ret = snprintf(g_localhost, CA_MAXHOSTNAMELEN, "%s.%s", localhost, domainname); + ret < 0 || ret >= CA_MAXHOSTNAMELEN) { rmc_logit(func, "localhost.domainname exceeds maximum length\n"); } rmc_logit(func, "found the following localhost.domainname: %s", g_localhost); @@ -200,8 +200,7 @@ int rmc_main(const char* const robot) { /* main loop */ while (1) { // Check for connections - int ret = poll(&pfd, 1, RMC_CHECKI * 1000); - if (ret < 0) { + if (int ret = poll(&pfd, 1, RMC_CHECKI * 1000); ret < 0) { perror("poll() error"); continue; } else if (ret == 0) { @@ -329,12 +328,12 @@ static int rmc_getreq(const int s, int* const req_type, char* const req_data, ch rmc_logit(func, RMC02, "getpeername", neterror()); return ERMCUNREC; } - struct hostent hbuf; - struct hostent* hp = nullptr; - char buffer[1024]; - char client_ip[INET6_ADDRSTRLEN]; - int h_err; - if (gethostbyaddr_r((void*) (&from.sin_addr), + if (struct hostent hbuf; + struct hostent* hp = nullptr; + char buffer[1024]; + char client_ip[INET6_ADDRSTRLEN]; + int h_err; + gethostbyaddr_r((void*) (&from.sin_addr), sizeof(struct in_addr), from.sin_family, &hbuf, diff --git a/objectstore/Algorithms.hpp b/objectstore/Algorithms.hpp index e5948246cd..151c805063 100644 --- a/objectstore/Algorithms.hpp +++ b/objectstore/Algorithms.hpp @@ -222,10 +222,11 @@ public: timingList.insertAndReset("ownershipAdditionTime", t); m_agentReference.addBatchToOwnership(candidateElementsAddresses, m_backend); // We can now attempt to switch ownership of elements - auto failedOwnershipSwitchElements = ContainerTraits::switchElementsOwnershipAndStatus(candidateElements, - m_agentReference.getAgentAddress(), - cont.getAddressIfSet(), timingList, t, lc, newStatus); - if (failedOwnershipSwitchElements.empty()) { + if (auto failedOwnershipSwitchElements = ContainerTraits::switchElementsOwnershipAndStatus( + candidateElements, + m_agentReference.getAgentAddress(), + cont.getAddressIfSet(), timingList, t, lc, newStatus); + failedOwnershipSwitchElements.empty()) { timingList.insertAndReset("updateResultProcessingTime", t); // This is the easy case (and most common case). Everything went through fine. ContainerTraits::removeReferencesAndCommit(cont, candidateElementsAddresses, lc); @@ -362,9 +363,11 @@ public: localTimingList.insertAndReset("ownershipAdditionTime", t); m_agentReference.addBatchToOwnership(candidateElementsAddresses, m_backend); // We can now attempt to switch ownership of elements - auto failedOwnershipSwitchElements = ContainerTraits::switchElementsOwnership(candidateElements, m_agentReference.getAgentAddress(), - cont.getAddressIfSet(), localTimingList, t, lc); - if (failedOwnershipSwitchElements.empty()) { + if (auto failedOwnershipSwitchElements = ContainerTraits::switchElementsOwnership(candidateElements, + m_agentReference.getAgentAddress(), + cont.getAddressIfSet(), + localTimingList, t, lc); + failedOwnershipSwitchElements.empty()) { localTimingList.insertAndReset("updateResultProcessingTime", t); // This is the easy case (and most common case). Everything went through fine. ContainerTraits::removeReferencesAndCommit(cont, candidateElementsAddresses, lc); diff --git a/objectstore/ArchiveQueue.cpp b/objectstore/ArchiveQueue.cpp index edd28b3457..1e1ef05c0c 100644 --- a/objectstore/ArchiveQueue.cpp +++ b/objectstore/ArchiveQueue.cpp @@ -87,16 +87,16 @@ bool ArchiveQueue::checkMapsAndShardsCoherency() { jobsExpectedFromShardsPointers += aqs.shardjobscount(); } uint64_t totalBytes = m_payload.archivejobstotalsize(); - uint64_t totalJobs = m_payload.archivejobscount(); // The sum of shards should be equal to the summary - if (totalBytes != bytesFromShardPointers || + if (uint64_t totalJobs = m_payload.archivejobscount(); + totalBytes != bytesFromShardPointers || totalJobs != jobsExpectedFromShardsPointers) return false; // Check that we have coherent queue summaries - ValueCountMapUint64 priorityMap(m_payload.mutable_prioritymap()); - ValueCountMapUint64 minArchiveRequestAgeMap(m_payload.mutable_minarchiverequestagemap()); - ValueCountMapString mountPolicyNameMap(m_payload.mutable_mountpolicynamemap()); - if (priorityMap.total() != m_payload.archivejobscount() || + if (ValueCountMapUint64 priorityMap(m_payload.mutable_prioritymap()), + ValueCountMapUint64 minArchiveRequestAgeMap(m_payload.mutable_minarchiverequestagemap()), + ValueCountMapString mountPolicyNameMap(m_payload.mutable_mountpolicynamemap()); + priorityMap.total() != m_payload.archivejobscount() || minArchiveRequestAgeMap.total() != m_payload.archivejobscount() || mountPolicyNameMap.total() != m_payload.archivejobscount() ) @@ -339,8 +339,8 @@ void ArchiveQueue::addJobsAndCommit(std::list & jobsToAdd, AgentRefere ArchiveQueueShard aqs(m_objectStore); serializers::ArchiveQueueShardPointer * aqsp = nullptr; bool newShard=false; - uint64_t shardCount = m_payload.archivequeueshards_size(); - if (shardCount && m_payload.archivequeueshards(shardCount - 1).shardjobscount() < c_maxShardSize) { + if (uint64_t shardCount = m_payload.archivequeueshards_size(); + shardCount && m_payload.archivequeueshards(shardCount - 1).shardjobscount() < c_maxShardSize) { auto & shardPointer=m_payload.archivequeueshards(shardCount - 1); aqs.setAddress(shardPointer.address()); // include-locking does not check existence of the object in the object store. @@ -360,8 +360,8 @@ void ArchiveQueue::addJobsAndCommit(std::list & jobsToAdd, AgentRefere } // Validate that the shard is as expected from the pointer. If not we need to // rebuild the queue and restart the shard selection. - auto shardSummary = aqs.getJobsSummary(); - if (shardPointer.shardbytescount() != shardSummary.bytes || + if (auto shardSummary = aqs.getJobsSummary(); + shardPointer.shardbytescount() != shardSummary.bytes || shardPointer.shardjobscount() != shardSummary.jobs) { log::ScopedParamContainer params(lc); params.add("archiveQueueObject", getAddressIfSet()) diff --git a/objectstore/RetrieveQueue.cpp b/objectstore/RetrieveQueue.cpp index 91ccd24ddc..37498f3ce2 100644 --- a/objectstore/RetrieveQueue.cpp +++ b/objectstore/RetrieveQueue.cpp @@ -67,17 +67,17 @@ bool RetrieveQueue::checkMapsAndShardsCoherency() { bytesFromShardPointers += aqs.shardbytescount(); jobsExpectedFromShardsPointers += aqs.shardjobscount(); } - uint64_t totalBytes = m_payload.retrievejobstotalsize(); - uint64_t totalJobs = m_payload.retrievejobscount(); // The sum of shards should be equal to the summary - if (totalBytes != bytesFromShardPointers || + if (uint64_t totalBytes = m_payload.retrievejobstotalsize(), + uint64_t totalJobs = m_payload.retrievejobscount(); + totalBytes != bytesFromShardPointers || totalJobs != jobsExpectedFromShardsPointers) return false; // Check that we have coherent queue summaries - ValueCountMapUint64 priorityMap(m_payload.mutable_prioritymap()); - ValueCountMapUint64 minRetrieveRequestAgeMap(m_payload.mutable_minretrieverequestagemap()); - ValueCountMapString mountPolicyNameMap(m_payload.mutable_mountpolicynamemap()); - if (priorityMap.total() != m_payload.retrievejobscount() || + if (ValueCountMapUint64 priorityMap(m_payload.mutable_prioritymap()), + ValueCountMapUint64 minRetrieveRequestAgeMap(m_payload.mutable_minretrieverequestagemap()), + ValueCountMapString mountPolicyNameMap(m_payload.mutable_mountpolicynamemap()); + priorityMap.total() != m_payload.retrievejobscount() || minRetrieveRequestAgeMap.total() != m_payload.retrievejobscount() || mountPolicyNameMap.total() != m_payload.retrievejobscount() ) diff --git a/objectstore/RetrieveQueueAlgorithms.hpp b/objectstore/RetrieveQueueAlgorithms.hpp index dc9bfc75b0..ec1cfbec57 100644 --- a/objectstore/RetrieveQueueAlgorithms.hpp +++ b/objectstore/RetrieveQueueAlgorithms.hpp @@ -420,8 +420,7 @@ trimContainerIfNeeded(Container &cont, ScopedExclusiveLock &contLock, const ContainerIdentifier &cId, log::LogContext &lc) { if(!cont.isEmpty()) { - auto si = cont.getJobsSummary().sleepInfo; - if (si) { + if (auto si = cont.getJobsSummary().sleepInfo; si) { log::ScopedParamContainer params(lc); params.add("tapeVid", cId) .add("queueObject", cont.getAddressIfSet()) diff --git a/objectstore/Sorter.cpp b/objectstore/Sorter.cpp index 13567afa86..e6f7866768 100644 --- a/objectstore/Sorter.cpp +++ b/objectstore/Sorter.cpp @@ -343,8 +343,8 @@ std::set> Sorter::getCandidateVidsToTransfer(RetrieveRe } std::string Sorter::getContainerID(RetrieveRequestInfosAccessorInterface& requestAccessor, const std::string& vid, const uint32_t copyNb){ - serializers::RetrieveJobStatus rjs = requestAccessor.getJobStatus(copyNb); - if(rjs == serializers::RetrieveJobStatus::RJS_ToReportToRepackForSuccess || rjs == serializers::RetrieveJobStatus::RJS_ToReportToRepackForFailure) + if(serializers::RetrieveJobStatus rjs = requestAccessor.getJobStatus(copyNb); + rjs == serializers::RetrieveJobStatus::RJS_ToReportToRepackForSuccess || rjs == serializers::RetrieveJobStatus::RJS_ToReportToRepackForFailure) return requestAccessor.getRepackAddress(); return vid; } diff --git a/plugin-manager/PluginManager.hpp b/plugin-manager/PluginManager.hpp index b46f5d71af..d2a0b6f66a 100644 --- a/plugin-manager/PluginManager.hpp +++ b/plugin-manager/PluginManager.hpp @@ -70,8 +70,7 @@ public: dlerror(); // to clear any old error conditions m_pFun = dlsym(m_pHandler, strEntryPointName.c_str()); - char* pcError = dlerror(); - if (pcError) { + if (char* pcError = dlerror(); pcError) { throw std::runtime_error(pcError); } return *this; diff --git a/rdbms/Login.cpp b/rdbms/Login.cpp index dff42b69b8..b3b06e899f 100644 --- a/rdbms/Login.cpp +++ b/rdbms/Login.cpp @@ -286,8 +286,8 @@ bool Login::postgresqlHasPassword(const std::string& connectionDetails) { if (result.size() < 2) { throw exception::Exception(std::string("Invalid connection string: Correct format is ") + s_fileFormat); } - std::string usernamePassword = result[1]; - if (usernamePassword.find(":") == std::string::npos) { + if (std::string usernamePassword = result[1]; + usernamePassword.find(":") == std::string::npos) { // No password provided, no need to hide it return false; } diff --git a/rdbms/wrapper/OcciColumn.cpp b/rdbms/wrapper/OcciColumn.cpp index 2f40bba544..2735e5e55c 100644 --- a/rdbms/wrapper/OcciColumn.cpp +++ b/rdbms/wrapper/OcciColumn.cpp @@ -134,16 +134,19 @@ void OcciColumn::copyStrIntoField(const size_t index, const std::string& str) { // setFieldValueToRaw //------------------------------------------------------------------------------ void OcciColumn::setFieldValueToRaw(size_t index, const std::string &blob) { - size_t maxlen = m_maxFieldLength < 2000 ? m_maxFieldLength : 2000; - if(blob.length() + 2 > maxlen) { - throw exception::Exception("Blob length=" + std::to_string(blob.length()) + - " exceeds maximum field length (" + std::to_string(maxlen-2) + ") bytes" + " colName=" + m_colName); + { + size_t maxlen = m_maxFieldLength < 2000 ? m_maxFieldLength : 2000; + if (blob.length() + 2 > maxlen) { + throw exception::Exception("Blob length=" + std::to_string(blob.length()) + + " exceeds maximum field length (" + std::to_string(maxlen - 2) + ") bytes" + + " colName=" + m_colName); + } } uint16_t len = blob.length(); char *const buf = getBuffer(); char *const element = buf + index * m_maxFieldLength; memcpy(element, &len, 2); memcpy(element + 2, blob.c_str(), len); -} + } } // namespace cta::rdbms::wrapper diff --git a/rdbms/wrapper/ParamNameToIdx.cpp b/rdbms/wrapper/ParamNameToIdx.cpp index e39a0fbad8..ca380397a5 100644 --- a/rdbms/wrapper/ParamNameToIdx.cpp +++ b/rdbms/wrapper/ParamNameToIdx.cpp @@ -34,8 +34,8 @@ ParamNameToIdx::ParamNameToIdx(const std::string &sql) { std::smatch match; std::string::const_iterator searchStart(sql.cbegin()); while (std::regex_search(searchStart, sql.cend(), match, pattern)) { - auto matchPos = std::distance(sql.cbegin(), searchStart) + match.position(); - if (matchPos > 0 && ':' == *(sql.cbegin() + matchPos -1)){ + if (auto matchPos = std::distance(sql.cbegin(), searchStart) + match.position(); + matchPos > 0 && ':' == *(sql.cbegin() + matchPos -1)){ searchStart = match.suffix().first; continue; } diff --git a/rdbms/wrapper/PostgresConn.cpp b/rdbms/wrapper/PostgresConn.cpp index bf60217fca..1d8696d76b 100644 --- a/rdbms/wrapper/PostgresConn.cpp +++ b/rdbms/wrapper/PostgresConn.cpp @@ -45,8 +45,7 @@ PostgresConn::PostgresConn(const rdbms::Login& login) : m_dbNamespace(login.dbNa m_pgsqlConn = nullptr; throw exception::Exception("Connection failed: " + pqmsgstr); } - const int sVer = PQserverVersion(m_pgsqlConn); - if (sVer < 90500) { + if (const int sVer = PQserverVersion(m_pgsqlConn); sVer < 90500) { PQfinish(m_pgsqlConn); m_pgsqlConn = nullptr; const int maj = (sVer / 10000) % 100; @@ -168,8 +167,7 @@ std::list PostgresConn::getSequenceNames() { throwDBIfNotStatus(res.get(), PGRES_TUPLES_OK, "Listing Sequences in the DB"); - const int num_fields = PQnfields(res.get()); - if (1 != num_fields) { + if (const int num_fields = PQnfields(res.get()); 1 != num_fields) { throw exception::Exception("number fields wrong during list sequences: Got " + std::to_string(num_fields)); } @@ -248,8 +246,7 @@ std::list PostgresConn::getTableNames() { throwDBIfNotStatus(res.get(), PGRES_TUPLES_OK, "Listing table names in the DB"); - const int num_fields = PQnfields(res.get()); - if (1 != num_fields) { + if (const int num_fields = PQnfields(res.get()); 1 != num_fields) { throw exception::Exception("number fields wrong during list tables: Got " + std::to_string(num_fields)); } diff --git a/rdbms/wrapper/PostgresRset.cpp b/rdbms/wrapper/PostgresRset.cpp index f3dde39da8..c14fdfb43d 100644 --- a/rdbms/wrapper/PostgresRset.cpp +++ b/rdbms/wrapper/PostgresRset.cpp @@ -190,8 +190,7 @@ bool PostgresRset::columnExists(const std::string& colName) const { if (auto it = m_columnPQindexCache.find(colName); it != m_columnPQindexCache.end()) { return true; } - int idx = PQfnumber(m_resItr->get(), colName.c_str()); - if (idx < 0) { + if (int idx = PQfnumber(m_resItr->get(), colName.c_str()); idx < 0) { return false; } return true; @@ -364,8 +363,7 @@ bool PostgresRset::next() { // as a Rset is intended for an executeQuery only. if (PGRES_TUPLES_OK == m_resItr->rcode() && 0 == PQntuples(m_resItr->get())) { - const std::string stringValue = PQcmdTuples(m_resItr->get()); - if (!stringValue.empty()) { + if (const std::string stringValue = PQcmdTuples(m_resItr->get()); !stringValue.empty()) { m_stmt.setAffectedRows(utils::toUint64(stringValue)); } m_resItr->clear(); diff --git a/rdbms/wrapper/PostgresStmt.cpp b/rdbms/wrapper/PostgresStmt.cpp index 3b3aa0dc1a..602366111c 100644 --- a/rdbms/wrapper/PostgresStmt.cpp +++ b/rdbms/wrapper/PostgresStmt.cpp @@ -499,8 +499,8 @@ void PostgresStmt::CountAndReformatSqlBinds(const std::string& common_sql, std:: std::ostringstream oss; while (std::regex_search(searchStart, common_sql.cend(), match, pattern)) { // skip all matches which have a second colon in front e.g. ::name (reserved for type casting in postgres) - auto matchPos = std::distance(common_sql.cbegin(), searchStart) + match.position(); - if (matchPos > 0 && ':' == *(common_sql.cbegin() + matchPos - 1)) { + if (auto matchPos = std::distance(common_sql.cbegin(), searchStart) + match.position(); + matchPos > 0 && ':' == *(common_sql.cbegin() + matchPos - 1)) { oss << match.prefix(); oss << match.str(); searchStart = match.suffix().first; diff --git a/rdbms/wrapper/SqliteConn.cpp b/rdbms/wrapper/SqliteConn.cpp index ec50484879..5ea9666f58 100644 --- a/rdbms/wrapper/SqliteConn.cpp +++ b/rdbms/wrapper/SqliteConn.cpp @@ -84,8 +84,8 @@ void SqliteConn::close() { threading::MutexLocker locker(m_mutex); if(nullptr != m_sqliteConn) { - const int closeRc = sqlite3_close(m_sqliteConn); - if(SQLITE_OK != closeRc) { + if(const int closeRc = sqlite3_close(m_sqliteConn); + SQLITE_OK != closeRc) { exception::Exception ex; ex.getMessage() << "Failed to close SQLite connection: " << Sqlite::rcToStr(closeRc); throw ex; @@ -239,8 +239,7 @@ std::map> SqliteConn::getColumns(const std auto stmt = createStmt(sql); stmt->bindString(":TABLE_NAME", tableName); - auto rset = stmt->executeQuery(); - if (rset->next()) { + if (auto rset = stmt->executeQuery(); rset->next()) { auto tableSql = rset->columnOptionalString("SQL").value(); tableSql += std::string(","); // hack for parsing std::string::size_type searchPosComma = 0; @@ -369,8 +368,7 @@ std::list SqliteConn::getConstraintNames(const std::string &tableNa )SQL"; auto stmt = createStmt(sql); stmt->bindString(":TABLE_NAME", tableName); - auto rset = stmt->executeQuery(); - if (rset->next()) { + if (auto rset = stmt->executeQuery(); rset->next()) { auto tableSql = rset->columnOptionalString("SQL").value(); tableSql += std::string(","); // hack for parsing std::string::size_type searchPosComma = 0; diff --git a/rdbms/wrapper/SqliteStmt.cpp b/rdbms/wrapper/SqliteStmt.cpp index bc51c32c25..e77ddddab4 100644 --- a/rdbms/wrapper/SqliteStmt.cpp +++ b/rdbms/wrapper/SqliteStmt.cpp @@ -98,8 +98,8 @@ void SqliteStmt::clear() { threading::MutexLocker locker(m_mutex); if(nullptr != m_stmt) { - const int resetRc = sqlite3_reset(m_stmt); - if(SQLITE_OK != resetRc) { + if(const int resetRc = sqlite3_reset(m_stmt); + SQLITE_OK != resetRc) { exception::Exception ex; ex.getMessage() <<"sqlite3_reset failed: " << Sqlite::rcToStr(resetRc); } @@ -122,8 +122,8 @@ void SqliteStmt::close() { threading::MutexLocker locker(m_mutex); if (nullptr != m_stmt) { - const int finalizeRc = sqlite3_finalize(m_stmt); - if (SQLITE_OK != finalizeRc) { + if (const int finalizeRc = sqlite3_finalize(m_stmt); + SQLITE_OK != finalizeRc) { exception::Exception ex; ex.getMessage() <<"sqlite3_finalize failed: " << Sqlite::rcToStr(finalizeRc); } diff --git a/scheduler/OStoreDB/OStoreDB.cpp b/scheduler/OStoreDB/OStoreDB.cpp index ea51f09a9f..4b9af0ce5b 100644 --- a/scheduler/OStoreDB/OStoreDB.cpp +++ b/scheduler/OStoreDB/OStoreDB.cpp @@ -313,23 +313,25 @@ void OStoreDB::fetchMountInfo(SchedulerDatabase::TapeMountDecisionInfo& tmdi, } // If there are files queued, we create an entry for this tape pool in the // mount candidates list. - cta::objectstore::ArchiveQueue::JobsSummary aqueueJobsSummary = aqueue.getJobsSummary(); - if (aqueueJobsSummary.jobs) { - tmdi.potentialMounts.emplace_back(); - auto& m = tmdi.potentialMounts.back(); - m.tapePool = aqp.tapePool; - m.type = cta::common::dataStructures::MountType::ArchiveForUser; - m.bytesQueued = aqueueJobsSummary.bytes; - m.filesQueued = aqueueJobsSummary.jobs; - m.oldestJobStartTime = aqueueJobsSummary.oldestJobStartTime; - m.youngestJobStartTime = aqueueJobsSummary.youngestJobStartTime; - //By default, we get the mountPolicies from the objectstore's queue counters - m.priority = aqueueJobsSummary.priority; - m.minRequestAge = aqueueJobsSummary.minArchiveRequestAge; - m.mountPolicyCountMap = aqueueJobsSummary.mountPolicyCountMap; - m.logicalLibrary = ""; - } else { - tmdi.queueTrimRequired = true; + { + cta::objectstore::ArchiveQueue::JobsSummary aqueueJobsSummary = aqueue.getJobsSummary(); + if (aqueueJobsSummary.jobs) { + tmdi.potentialMounts.emplace_back(); + auto &m = tmdi.potentialMounts.back(); + m.tapePool = aqp.tapePool; + m.type = cta::common::dataStructures::MountType::ArchiveForUser; + m.bytesQueued = aqueueJobsSummary.bytes; + m.filesQueued = aqueueJobsSummary.jobs; + m.oldestJobStartTime = aqueueJobsSummary.oldestJobStartTime; + m.youngestJobStartTime = aqueueJobsSummary.youngestJobStartTime; + //By default, we get the mountPolicies from the objectstore's queue counters + m.priority = aqueueJobsSummary.priority; + m.minRequestAge = aqueueJobsSummary.minArchiveRequestAge; + m.mountPolicyCountMap = aqueueJobsSummary.mountPolicyCountMap; + m.logicalLibrary = ""; + } else { + tmdi.queueTrimRequired = true; + } } if (queueLockTime > 1 || queueFetchTime > 1) { @@ -369,22 +371,24 @@ void OStoreDB::fetchMountInfo(SchedulerDatabase::TapeMountDecisionInfo& tmdi, } // If there are files queued, we create an entry for this tape pool in the // mount candidates list. - cta::objectstore::ArchiveQueue::JobsSummary aqueueRepackJobsSummary = aqueue.getJobsSummary(); - if (aqueueRepackJobsSummary.jobs) { - tmdi.potentialMounts.emplace_back(); - auto& m = tmdi.potentialMounts.back(); - m.tapePool = aqp.tapePool; - m.type = cta::common::dataStructures::MountType::ArchiveForRepack; - m.bytesQueued = aqueueRepackJobsSummary.bytes; - m.filesQueued = aqueueRepackJobsSummary.jobs; - m.oldestJobStartTime = aqueueRepackJobsSummary.oldestJobStartTime; - m.youngestJobStartTime = aqueueRepackJobsSummary.youngestJobStartTime; - m.priority = aqueueRepackJobsSummary.priority; - m.minRequestAge = aqueueRepackJobsSummary.minArchiveRequestAge; - m.mountPolicyCountMap = aqueueRepackJobsSummary.mountPolicyCountMap; - m.logicalLibrary = ""; - } else { - tmdi.queueTrimRequired = true; + { + cta::objectstore::ArchiveQueue::JobsSummary aqueueRepackJobsSummary = aqueue.getJobsSummary(); + if (aqueueRepackJobsSummary.jobs) { + tmdi.potentialMounts.emplace_back(); + auto &m = tmdi.potentialMounts.back(); + m.tapePool = aqp.tapePool; + m.type = cta::common::dataStructures::MountType::ArchiveForRepack; + m.bytesQueued = aqueueRepackJobsSummary.bytes; + m.filesQueued = aqueueRepackJobsSummary.jobs; + m.oldestJobStartTime = aqueueRepackJobsSummary.oldestJobStartTime; + m.youngestJobStartTime = aqueueRepackJobsSummary.youngestJobStartTime; + m.priority = aqueueRepackJobsSummary.priority; + m.minRequestAge = aqueueRepackJobsSummary.minArchiveRequestAge; + m.mountPolicyCountMap = aqueueRepackJobsSummary.mountPolicyCountMap; + m.logicalLibrary = ""; + } else { + tmdi.queueTrimRequired = true; + } } if (queueLockTime > 1 || queueFetchTime > 1) { @@ -425,8 +429,7 @@ void OStoreDB::fetchMountInfo(SchedulerDatabase::TapeMountDecisionInfo& tmdi, } // If there are files queued, we create an entry for this retrieve queue in the // mount candidates list. - auto rqSummary = rqueue.getJobsSummary(); - if (rqSummary.jobs) { + if (auto rqSummary = rqueue.getJobsSummary(); rqSummary.jobs) { //Getting the default mountPolicies parameters from the queue summary uint64_t minRetrieveRequestAge = rqSummary.minRetrieveRequestAge; uint64_t priority = rqSummary.priority; @@ -4248,8 +4251,7 @@ void OStoreDB::ArchiveMount::setJobBatchTransferred( while (jobsBatchItor != jobsBatch.end()) { try { castFromSchedDBJob(jobsBatchItor->get())->waitAsyncSucceed(); - auto repackInfo = castFromSchedDBJob(jobsBatchItor->get())->getRepackInfoAfterAsyncSuccess(); - if (repackInfo.isRepack) { + if (auto repackInfo = castFromSchedDBJob(jobsBatchItor->get())->getRepackInfoAfterAsyncSuccess(); repackInfo.isRepack) { jobsToQueueForReportingToRepack.insert(castFromSchedDBJob(jobsBatchItor->get())); } else { if (castFromSchedDBJob(jobsBatchItor->get())->isLastAfterAsyncSuccess()) { diff --git a/scheduler/Scheduler.cpp b/scheduler/Scheduler.cpp index 1acec1a692..ec0f52e15c 100644 --- a/scheduler/Scheduler.cpp +++ b/scheduler/Scheduler.cpp @@ -978,8 +978,7 @@ common::dataStructures::DesiredDriveState Scheduler::getDesiredDriveState(const "In Scheduler::getDesiredDriveState(): checking driveName: " + driveName + " against existing: " + driveState.driveName); if (driveState.driveName == driveName) { - const auto schedulerDbTime = t.secs(); - if (schedulerDbTime > 1) { + if (const auto schedulerDbTime = t.secs(); schedulerDbTime > 1) { log::ScopedParamContainer spc(lc); spc.add("drive", driveName).add("schedulerDbTime", schedulerDbTime); lc.log(log::DEBUG, "In Scheduler::getDesiredDriveState(): success."); @@ -2048,9 +2047,7 @@ bool Scheduler::getNextMountDryRun(const std::string& logicalLibraryName, double catalogueTime = 0; double checkLogicalAndPhysicalLibrariesTime = 0; - bool validForMount = - checkLogicalAndPhysicalLibraryValidForMount(logicalLibraryName, checkLogicalAndPhysicalLibrariesTime, lc); - if (!validForMount) { + if (!checkLogicalAndPhysicalLibraryValidForMount(logicalLibraryName, checkLogicalAndPhysicalLibrariesTime, lc)) { return false; } @@ -2226,9 +2223,7 @@ std::unique_ptr Scheduler::getNextMount(const std::string& logicalLib double checkLogicalAndPhysicalLibrariesTime = 0; double catalogueTime = 0; - bool validForMount = - checkLogicalAndPhysicalLibraryValidForMount(logicalLibraryName, checkLogicalAndPhysicalLibrariesTime, lc); - if (!validForMount) { + if (!checkLogicalAndPhysicalLibraryValidForMount(logicalLibraryName, checkLogicalAndPhysicalLibrariesTime, lc)) { return std::unique_ptr(); } diff --git a/scheduler/rdbms/RelationalDB.cpp b/scheduler/rdbms/RelationalDB.cpp index be8824b6f3..43cb34ff0c 100644 --- a/scheduler/rdbms/RelationalDB.cpp +++ b/scheduler/rdbms/RelationalDB.cpp @@ -972,8 +972,7 @@ RelationalDB::getNextSuccessfulArchiveRepackReportBatch(log::LogContext& lc) { // ------------------------------------------ // calling the deletion for the jobSrcUrls // ------------------------------------------ - bool deletionOk = deleteDiskFiles(jobSrcUrls, lc); - if (!deletionOk){ + if (!deleteDiskFiles(jobSrcUrls, lc)){ txn.abort(); return ret; } @@ -1148,8 +1147,7 @@ RelationalDB::getNextFailedArchiveRepackReportBatch(log::LogContext& lc) { // ------------------------------------------ // calling the deletion for the jobSrcUrls // ------------------------------------------ - bool deletionOk = deleteDiskFiles(jobSrcUrls, lc); - if (!deletionOk){ + if (!deleteDiskFiles(jobSrcUrls, lc)){ lc.log(cta::log::WARNING, "In RelationalDB::getNextFailedArchiveRepackReportBatch(): Failed to delete files from disk."); } diff --git a/scheduler/rdbms/RepackRequest.cpp b/scheduler/rdbms/RepackRequest.cpp index 4d9a6f5e00..83f59a2f2e 100644 --- a/scheduler/rdbms/RepackRequest.cpp +++ b/scheduler/rdbms/RepackRequest.cpp @@ -115,10 +115,9 @@ namespace cta::schedulerdb { std::vector > rrRowBatchToTransfer; std::vector > rrRowBatchNoRecall; while (subReqItor != repackSubrequests.end() && nbSubReqProcessed < 500) { - auto &rsr = *subReqItor; // Requests marked as deleted are guaranteed to have already been created => we will not re-attempt. - if (!srmap.at(rsr.fSeq)->isSubreqDeleted) { + if (auto &rsr = *subReqItor; !srmap.at(rsr.fSeq)->isSubreqDeleted) { try { auto conn = m_connPool.getConn(); RetrieveRequest rr(conn, lc); @@ -467,16 +466,13 @@ namespace cta::schedulerdb { } common::dataStructures::RepackInfo::Status RepackRequest::getCurrentStatus() const { - bool finishedExpansion = repackInfo.isExpandFinished; - - bool allRetrieveDone = - (repackInfo.retrievedFiles + repackInfo.failedFilesToRetrieve) >= repackInfo.totalFilesToRetrieve; - - bool allArchiveDone = - (repackInfo.archivedFiles + repackInfo.failedFilesToArchive + m_failedToCreateArchiveReq) >= - repackInfo.totalFilesToArchive; - - if (finishedExpansion && allRetrieveDone && allArchiveDone) { + if (bool finishedExpansion = repackInfo.isExpandFinished, + bool allRetrieveDone = + (repackInfo.retrievedFiles + repackInfo.failedFilesToRetrieve) >= repackInfo.totalFilesToRetrieve, + bool allArchiveDone = + (repackInfo.archivedFiles + repackInfo.failedFilesToArchive + m_failedToCreateArchiveReq) >= + repackInfo.totalFilesToArchive; + finishedExpansion && allRetrieveDone && allArchiveDone) { if (repackInfo.failedFilesToRetrieve > 0 || repackInfo.failedFilesToArchive > 0) { return common::dataStructures::RepackInfo::Status::Failed; } else { diff --git a/scheduler/rdbms/postgres/ArchiveJobQueue.cpp b/scheduler/rdbms/postgres/ArchiveJobQueue.cpp index 5d44d48c12..c3fdc76f09 100644 --- a/scheduler/rdbms/postgres/ArchiveJobQueue.cpp +++ b/scheduler/rdbms/postgres/ArchiveJobQueue.cpp @@ -368,9 +368,9 @@ uint64_t ArchiveJobQueueRow::updateFailedJobStatus(Transaction& txn, bool isRepa void ArchiveJobQueueRow::updateJobRowFailureLog(const std::string& reason, bool is_report_log) { std::string failureLog = cta::utils::getCurrentLocalTime() + " " + cta::utils::getShortHostname() + " " + reason; - auto& logField = is_report_log ? reportFailureLogs : failureLogs; - if (logField.has_value()) { + if (auto& logField = is_report_log ? reportFailureLogs : failureLogs; + logField.has_value()) { logField.value() += failureLog; } else { logField.emplace(failureLog); diff --git a/scheduler/rdbms/postgres/RetrieveJobQueue.cpp b/scheduler/rdbms/postgres/RetrieveJobQueue.cpp index aa0cbe2310..accdfc05d8 100644 --- a/scheduler/rdbms/postgres/RetrieveJobQueue.cpp +++ b/scheduler/rdbms/postgres/RetrieveJobQueue.cpp @@ -324,9 +324,9 @@ uint64_t RetrieveJobQueueRow::updateFailedJobStatus(Transaction& txn, bool isRep void RetrieveJobQueueRow::updateJobRowFailureLog(const std::string& reason, bool is_report_log) { std::string failureLog = cta::utils::getCurrentLocalTime() + " " + cta::utils::getShortHostname() + " " + reason; - auto& logField = is_report_log ? reportFailureLogs : failureLogs; - if (logField.has_value()) { + if (auto& logField = is_report_log ? reportFailureLogs : failureLogs; + logField.has_value()) { logField.value() += failureLog; } else { logField.emplace(failureLog); diff --git a/scheduler/rdbms/schema/CreateSchemaCmdLineArgs.cpp b/scheduler/rdbms/schema/CreateSchemaCmdLineArgs.cpp index 37ce78ce49..9ce634668c 100644 --- a/scheduler/rdbms/schema/CreateSchemaCmdLineArgs.cpp +++ b/scheduler/rdbms/schema/CreateSchemaCmdLineArgs.cpp @@ -79,10 +79,8 @@ CreateSchemaCmdLineArgs::CreateSchemaCmdLineArgs(const int argc, char *const *co } // Calculate the number of non-option ARGV-elements - const int nbArgs = argc - optind; - // Check the number of arguments - if(nbArgs != 1) { + if(const int nbArgs = argc - optind; nbArgs != 1) { exception::CommandLineNotParsed ex; ex.getMessage() << "Wrong number of command-line arguments: expected=1 actual=" << nbArgs; throw ex; diff --git a/scheduler/rdbms/schema/DropSchemaCmd.cpp b/scheduler/rdbms/schema/DropSchemaCmd.cpp index 135723ba64..e4d1cb7976 100644 --- a/scheduler/rdbms/schema/DropSchemaCmd.cpp +++ b/scheduler/rdbms/schema/DropSchemaCmd.cpp @@ -183,8 +183,7 @@ bool DropSchemaCmd::isProductionSet(cta::rdbms::Conn& conn) { SELECT CTA_SCHEDULER.IS_PRODUCTION AS IS_PRODUCTION FROM CTA_SCHEDULER )SQL"; auto stmt = conn.createStmt(sql); - auto rset = stmt.executeQuery(); - if (rset.next()) { + if (auto rset = stmt.executeQuery(); rset.next()) { return rset.columnBool("IS_PRODUCTION"); } else { return false; // The table is empty diff --git a/scheduler/rdbms/schema/SchedulerSchema.cpp b/scheduler/rdbms/schema/SchedulerSchema.cpp index a2baaab58a..bb9c68f289 100644 --- a/scheduler/rdbms/schema/SchedulerSchema.cpp +++ b/scheduler/rdbms/schema/SchedulerSchema.cpp @@ -47,8 +47,7 @@ std::map> SchedulerSchema::getSchemaVersion() " ([[:digit:]]+)," " ([[:digit:]]+)\\);" ); - auto version = schemaVersionRegex.exec(sql); - if (3 == version.size()) { + if (auto version = schemaVersionRegex.exec(sql); 3 == version.size()) { schemaVersion.insert(std::make_pair("SCHEMA_VERSION_MAJOR", cta::utils::toUint64(version[1].c_str()))); schemaVersion.insert(std::make_pair("SCHEMA_VERSION_MINOR", cta::utils::toUint64(version[2].c_str()))); } else { diff --git a/statistics/StatisticsUpdateCmdLineArgs.cpp b/statistics/StatisticsUpdateCmdLineArgs.cpp index e17460edfd..b1f7808544 100644 --- a/statistics/StatisticsUpdateCmdLineArgs.cpp +++ b/statistics/StatisticsUpdateCmdLineArgs.cpp @@ -76,10 +76,8 @@ StatisticsUpdateCmdLineArgs::StatisticsUpdateCmdLineArgs(const int argc, char *c } // Calculate the number of non-option ARGV-elements - const int nbArgs = argc - optind; - // Check the number of arguments - if (nbArgs != 1) { + if (const int nbArgs = argc - optind; nbArgs != 1) { exception::CommandLineNotParsed ex; ex.getMessage() << "Wrong number of command-line arguments: expected=1 actual=" << nbArgs; throw ex; diff --git a/tapeserver/castor/tape/tapeserver/SCSI/Device.cpp b/tapeserver/castor/tape/tapeserver/SCSI/Device.cpp index 9f039b2b02..c0d0474866 100644 --- a/tapeserver/castor/tape/tapeserver/SCSI/Device.cpp +++ b/tapeserver/castor/tape/tapeserver/SCSI/Device.cpp @@ -86,8 +86,8 @@ std::string SCSI::DeviceVector::readfile(const std::string& path) { SCSI::DeviceInfo::DeviceFile SCSI::DeviceVector::readDeviceFile(const std::string& path) { DeviceInfo::DeviceFile ret; - std::string file = readfile(path); - if (!::sscanf(file.c_str(), "%u:%u\n", &ret.major, &ret.minor)) + if (std::string file = readfile(path); + !::sscanf(file.c_str(), "%u:%u\n", &ret.major, &ret.minor)) throw cta::exception::Exception(std::string("Could not parse file: ") + path); return ret; } @@ -240,8 +240,8 @@ SCSI::DeviceInfo SCSI::DeviceVector::getDeviceInfo(const char * path) { /* Get the major and minor number of the device file */ ret.sg = readDeviceFile(ret.sysfs_entry + "/generic/dev"); /* Check that we have an agreement with the actual device file */ - DeviceInfo::DeviceFile realFile = statDeviceFile(ret.sg_dev); - if (ret.sg != realFile) { + if (DeviceInfo::DeviceFile realFile = statDeviceFile(ret.sg_dev); + ret.sg != realFile) { std::stringstream err; err << "Mismatch between sysfs info and actual device file: " << ret.sysfs_entry + "/generic/dev" << " indicates " diff --git a/tapeserver/castor/tape/tapeserver/daemon/CleanerSession.cpp b/tapeserver/castor/tape/tapeserver/daemon/CleanerSession.cpp index 6b06199b07..f010d2ee66 100644 --- a/tapeserver/castor/tape/tapeserver/daemon/CleanerSession.cpp +++ b/tapeserver/castor/tape/tapeserver/daemon/CleanerSession.cpp @@ -225,8 +225,8 @@ void castor::tape::tapeserver::daemon::CleanerSession::cleanDrive(drive::DriveIn void castor::tape::tapeserver::daemon::CleanerSession::logAndClearTapeAlerts(drive::DriveInterface &drive) noexcept { std::string errorMessage; try { - std::vector tapeAlertCodes = drive.getTapeAlertCodes(); - if (!tapeAlertCodes.empty()) { + if (std::vector tapeAlertCodes = drive.getTapeAlertCodes(); + !tapeAlertCodes.empty()) { size_t alertNumber = 0; // Log tape alerts in the logs. std::vector tapeAlerts = drive.getTapeAlerts(tapeAlertCodes); diff --git a/tapeserver/castor/tape/tapeserver/daemon/DataTransferSession.cpp b/tapeserver/castor/tape/tapeserver/daemon/DataTransferSession.cpp index 37ca049b2c..be92665521 100644 --- a/tapeserver/castor/tape/tapeserver/daemon/DataTransferSession.cpp +++ b/tapeserver/castor/tape/tapeserver/daemon/DataTransferSession.cpp @@ -145,8 +145,8 @@ castor::tape::tapeserver::daemon::DataTransferSession::execute() { lc.log(cta::log::INFO, "Transition from down to up detected. Will check if a tape is in the drive."); if (!emptyDriveProbe.driveIsEmpty()) { std::string errorMsg = "A tape was detected in the drive. Putting the drive down."; - std::optional probeErrorMsg = emptyDriveProbe.getProbeErrorMsg(); - if (probeErrorMsg) { + if (std::optional probeErrorMsg = emptyDriveProbe.getProbeErrorMsg(); + probeErrorMsg) { errorMsg = probeErrorMsg.value(); } putDriveDown(errorMsg, nullptr, lc); diff --git a/tapeserver/castor/tape/tapeserver/daemon/MigrationReportPacker.cpp b/tapeserver/castor/tape/tapeserver/daemon/MigrationReportPacker.cpp index 0b264e3d61..620a91fb73 100644 --- a/tapeserver/castor/tape/tapeserver/daemon/MigrationReportPacker.cpp +++ b/tapeserver/castor/tape/tapeserver/daemon/MigrationReportPacker.cpp @@ -560,8 +560,8 @@ void MigrationReportPacker::WorkerThread::run() { // Drain the FIFO if necessary. We know that m_continue will be // set by ReportEndofSessionWithErrors or ReportEndofSession // TODO devise a more generic mechanism - uint64_t leftOverReportCount = m_parent.m_fifo.size(); - if (leftOverReportCount != 0) { + if (uint64_t leftOverReportCount = m_parent.m_fifo.size(); + leftOverReportCount != 0) { cta::log::ScopedParamContainer params(lc); params.add("leftOverReportCount", leftOverReportCount); params.add("MigrationReportPacker.m_continue", m_parent.m_continue); diff --git a/tapeserver/castor/tape/tapeserver/daemon/RecallTaskInjector.cpp b/tapeserver/castor/tape/tapeserver/daemon/RecallTaskInjector.cpp index bb04c48508..d30b1e3a1a 100644 --- a/tapeserver/castor/tape/tapeserver/daemon/RecallTaskInjector.cpp +++ b/tapeserver/castor/tape/tapeserver/daemon/RecallTaskInjector.cpp @@ -244,8 +244,7 @@ void RecallTaskInjector::injectBulkRecalls() { m_files--; m_bytes -= job->archiveFile.fileSize; } - bool reservationSuccess = reserveSpaceForNextJobBatch(retrieveJobsBatch); - if (!reservationSuccess) { + if (!reserveSpaceForNextJobBatch(retrieveJobsBatch)) { m_watchdog.addToErrorCount("Info_diskSpaceReservationFailure"); return; } diff --git a/tapeserver/castor/tape/tapeserver/daemon/TapeWriteSingleThread.cpp b/tapeserver/castor/tape/tapeserver/daemon/TapeWriteSingleThread.cpp index 9ab1abc7c3..e48ccdd710 100644 --- a/tapeserver/castor/tape/tapeserver/daemon/TapeWriteSingleThread.cpp +++ b/tapeserver/castor/tape/tapeserver/daemon/TapeWriteSingleThread.cpp @@ -499,8 +499,8 @@ void castor::tape::tapeserver::daemon::TapeWriteSingleThread::run() { // If it's not the error we're looking for, we will go about our business // in the catch section. dynamic cast will throw, and we'll do ourselves // if the error code is not the one we want. - const auto& en = dynamic_cast(e); - if (en.errorNumber() != ENOSPC) { + if (const auto& en = dynamic_cast(e); + en.errorNumber() != ENOSPC) { throw 0; } else { isTapeFull = true; diff --git a/tapeserver/castor/tape/tapeserver/daemon/TapeWriteTask.cpp b/tapeserver/castor/tape/tapeserver/daemon/TapeWriteTask.cpp index 67cb5be914..64c32a2a72 100644 --- a/tapeserver/castor/tape/tapeserver/daemon/TapeWriteTask.cpp +++ b/tapeserver/castor/tape/tapeserver/daemon/TapeWriteTask.cpp @@ -257,8 +257,8 @@ void TapeWriteTask::execute(const std::unique_ptr(e); - if (en.errorNumber() != ENOSPC) { + if (const auto& en = dynamic_cast(e); + en.errorNumber() != ENOSPC) { throw; } else { doReportJobError = false; diff --git a/tapeserver/castor/tape/tapeserver/drive/DriveGeneric.cpp b/tapeserver/castor/tape/tapeserver/drive/DriveGeneric.cpp index 9cc10e9804..5665630311 100644 --- a/tapeserver/castor/tape/tapeserver/drive/DriveGeneric.cpp +++ b/tapeserver/castor/tape/tapeserver/drive/DriveGeneric.cpp @@ -2101,8 +2101,7 @@ std::map drive::DriveIBM3592::getQualityStats() { while (logParameter < endPage) { SCSI::Structures::logSenseParameter_t& logPageParam = *(SCSI::Structures::logSenseParameter_t*) logParameter; - const int val = logPageParam.getU64Value(); - if (val != 0) { + if (const int val = logPageParam.getU64Value(); val != 0) { switch (SCSI::Structures::toU16(logPageParam.header.parameterCode)) { case SCSI::performanceCharacteristicsQualitySummaryPage::driveEfficiency: qualityStats["lifetimeDriveEfficiencyPrct"] = 100 - (val - 1) * 100 / 254.0; @@ -2154,8 +2153,7 @@ std::map drive::DriveIBM3592::getQualityStats() { while (logParameter < endPage) { SCSI::Structures::logSenseParameter_t& logPageParam = *(SCSI::Structures::logSenseParameter_t*) logParameter; - const int val = logPageParam.getU64Value(); - if (val != 0) { + if (const int val = logPageParam.getU64Value(); val != 0) { switch (SCSI::Structures::toU16(logPageParam.header.parameterCode)) { case SCSI::performanceCharacteristicsQualitySummaryPage::driveEfficiency: qualityStats["mountDriveEfficiencyPrct"] = 100 - (float) (val - 1) * 100 / 254.0; @@ -2301,8 +2299,7 @@ std::map drive::DriveLTO::getQualityStats() { while (logParameter < endPage) { SCSI::Structures::logSenseParameter_t& logPageParam = *(SCSI::Structures::logSenseParameter_t*) logParameter; - const int val = logPageParam.getU64Value(); - if (val != 0) { + if (const int val = logPageParam.getU64Value(); val != 0) { switch (SCSI::Structures::toU16(logPageParam.header.parameterCode)) { case SCSI::performanceCharacteristicsQualitySummaryPage::driveEfficiency: qualityStats["lifetimeDriveEfficiencyPrct"] = 100 - (val - 1) * 100 / 254.0; @@ -2353,8 +2350,7 @@ std::map drive::DriveLTO::getQualityStats() { while (logParameter < endPage) { SCSI::Structures::logSenseParameter_t& logPageParam = *(SCSI::Structures::logSenseParameter_t*) logParameter; - const int val = logPageParam.getU64Value(); - if (val != 0) { + if (const int val = logPageParam.getU64Value(); val != 0) { switch (SCSI::Structures::toU16(logPageParam.header.parameterCode)) { case SCSI::performanceCharacteristicsQualitySummaryPage::driveEfficiency: qualityStats["mountDriveEfficiencyPrct"] = 100 - (float) (val - 1) * 100 / 254.0; diff --git a/tapeserver/castor/tape/tapeserver/file/EnstoreLargeReadSession.cpp b/tapeserver/castor/tape/tapeserver/file/EnstoreLargeReadSession.cpp index 107ab33567..0e5436c1ed 100644 --- a/tapeserver/castor/tape/tapeserver/file/EnstoreLargeReadSession.cpp +++ b/tapeserver/castor/tape/tapeserver/file/EnstoreLargeReadSession.cpp @@ -40,8 +40,8 @@ ReadSession(drive, volInfo, useLbp) { // Throw away the end and validate the beggining as a normal VOL1 size_t blockSize = 256 * 1024; char* data = new char[blockSize + 1]; - size_t bytes_read = m_drive.readBlock(data, blockSize); - if (bytes_read < sizeof(vol1)) { + if (size_t bytes_read = m_drive.readBlock(data, blockSize); + bytes_read < sizeof(vol1)) { delete[] data; throw cta::exception::Exception("Too few bytes read from label"); } diff --git a/tapeserver/castor/tape/tapeserver/file/EnstoreReadSession.cpp b/tapeserver/castor/tape/tapeserver/file/EnstoreReadSession.cpp index bc68998336..b186c6e771 100644 --- a/tapeserver/castor/tape/tapeserver/file/EnstoreReadSession.cpp +++ b/tapeserver/castor/tape/tapeserver/file/EnstoreReadSession.cpp @@ -38,8 +38,8 @@ EnstoreReadSession::EnstoreReadSession(tapeserver::drive::DriveInterface &drive, // Throw away the end and validate the beggining as a normal VOL1 size_t blockSize = 256 * 1024; char* data = new char[blockSize + 1]; - size_t bytes_read = m_drive.readBlock(data, blockSize); - if (bytes_read < sizeof(vol1)) { + if (size_t bytes_read = m_drive.readBlock(data, blockSize); + bytes_read < sizeof(vol1)) { delete[] data; throw cta::exception::Exception("Too few bytes read from label"); } diff --git a/tapeserver/castor/tape/tapeserver/file/HeaderChecker.cpp b/tapeserver/castor/tape/tapeserver/file/HeaderChecker.cpp index cd70a15801..34e9d74a7e 100644 --- a/tapeserver/castor/tape/tapeserver/file/HeaderChecker.cpp +++ b/tapeserver/castor/tape/tapeserver/file/HeaderChecker.cpp @@ -141,7 +141,7 @@ std::string HeaderChecker::checkVolumeLabel(tapeserver::drive::DriveInterface &d size_t blockSize = 256 * 1024; auto data = std::make_unique(blockSize + 1); size_t bytes_read = drive.readBlock(data.get(), blockSize); - if (bytes_read < sizeof(vol1)) { + if (size_t bytes_read = drive.readBlock(data.get(), blockSize); bytes_read < sizeof(vol1)) { throw cta::exception::Exception("Too few bytes read from label"); } memcpy(&vol1, data.get(), sizeof(vol1)); diff --git a/tapeserver/castor/tape/tapeserver/system/FileWrappers.cpp b/tapeserver/castor/tape/tapeserver/system/FileWrappers.cpp index 4b8743d59c..73a84484ab 100644 --- a/tapeserver/castor/tape/tapeserver/system/FileWrappers.cpp +++ b/tapeserver/castor/tape/tapeserver/system/FileWrappers.cpp @@ -205,9 +205,9 @@ int System::stDeviceFile::ioctlRequestSense(sg_io_hdr_t* sgio_h) { errno = EINVAL; return -1; } - SCSI::Structures::requestSenseData_t &requestSenseData = - *reinterpret_cast(sgio_h->dxferp); - if(sizeof(requestSenseData) > sgio_h->dxfer_len) { + if(SCSI::Structures::requestSenseData_t &requestSenseData = + *reinterpret_cast(sgio_h->dxferp); + sizeof(requestSenseData) > sgio_h->dxfer_len) { errno = EINVAL; return -1; } @@ -220,9 +220,9 @@ int System::stDeviceFile::ioctlLogSelect(sg_io_hdr_t * sgio_h) { return -1; } /* we check CDB structure only and do not need to replay */ - SCSI::Structures::logSelectCDB_t & cdb = + if (SCSI::Structures::logSelectCDB_t & cdb = *(SCSI::Structures::logSelectCDB_t *) sgio_h->cmdp; - if (1 != cdb.PCR || 0x3 != cdb.PC) { + 1 != cdb.PCR || 0x3 != cdb.PC) { errno = EINVAL; return -1; } @@ -953,23 +953,24 @@ int System::stDeviceFile::ioctlModSense6(sg_io_hdr_t * sgio_h) { errno = EINVAL; return -1; } - SCSI::Structures::modeSense6CDB_t & cdb = - *(SCSI::Structures::modeSense6CDB_t *) sgio_h->cmdp; - - switch (cdb.pageCode) { - case SCSI::modeSensePages::deviceConfiguration: - return modeSenseDeviceConfiguration(sgio_h); - case SCSI::modeSensePages::controlDataProtection: - return modeSenseControlDataProtection(sgio_h); + { + SCSI::Structures::modeSense6CDB_t &cdb = + *(SCSI::Structures::modeSense6CDB_t *) sgio_h->cmdp; + switch (cdb.pageCode) { + case SCSI::modeSensePages::deviceConfiguration: + return modeSenseDeviceConfiguration(sgio_h); + case SCSI::modeSensePages::controlDataProtection: + return modeSenseControlDataProtection(sgio_h); + } } errno = EINVAL; return -1; } int System::stDeviceFile::modeSenseDeviceConfiguration(sg_io_hdr_t * sgio_h) { - SCSI::Structures::modeSense6CDB_t & cdb = - *(SCSI::Structures::modeSense6CDB_t *) sgio_h->cmdp; - if (SCSI::modeSensePages::deviceConfiguration != cdb.pageCode) { + if (SCSI::Structures::modeSense6CDB_t & cdb = + *(SCSI::Structures::modeSense6CDB_t *) sgio_h->cmdp; + SCSI::modeSensePages::deviceConfiguration != cdb.pageCode) { errno = EINVAL; return -1; } @@ -1037,14 +1038,14 @@ int System::stDeviceFile::ioctlModSelect6(sg_io_hdr_t * sgio_h) { SCSI::Structures::modeParameterBlockDecriptor_t & blockDescriptor = *(SCSI::Structures::modeParameterBlockDecriptor_t *) (data+sizeof(header)); - - unsigned char * modeSelectBlock = data+sizeof(header)+sizeof(blockDescriptor); - - switch (modeSelectBlock[0]&0x3F) { // only 6bits are the page code - case SCSI::modeSensePages::deviceConfiguration: - return modeSelectDeviceConfiguration(sgio_h); - case SCSI::modeSensePages::controlDataProtection: - return modeSelectControlDataProtection(sgio_h); + { + unsigned char *modeSelectBlock = data + sizeof(header) + sizeof(blockDescriptor); + switch (modeSelectBlock[0] & 0x3F) { // only 6bits are the page code + case SCSI::modeSensePages::deviceConfiguration: + return modeSelectDeviceConfiguration(sgio_h); + case SCSI::modeSensePages::controlDataProtection: + return modeSelectControlDataProtection(sgio_h); + } } errno = EINVAL; return -1; @@ -1116,10 +1117,10 @@ int System::stOracleT10000Device::ioctlInquiry(sg_io_hdr_t * sgio_h) { errno = EINVAL; return -1; } - SCSI::Structures::inquiryCDB_t & cdb = - *(SCSI::Structures::inquiryCDB_t *) sgio_h->cmdp; - if (0 == cdb.EVPD && 0 == cdb.pageCode) { + if (SCSI::Structures::inquiryCDB_t & cdb = + *(SCSI::Structures::inquiryCDB_t *) sgio_h->cmdp; + 0 == cdb.EVPD && 0 == cdb.pageCode) { /* the Standard Inquiry Data is returned*/ SCSI::Structures::inquiryData_t & inqData = *(SCSI::Structures::inquiryData_t *) sgio_h->dxferp; @@ -1168,10 +1169,10 @@ int System::stIBM3592DeviceFile::ioctlInquiry(sg_io_hdr_t * sgio_h) { errno = EINVAL; return -1; } - SCSI::Structures::inquiryCDB_t & cdb = - *(SCSI::Structures::inquiryCDB_t *) sgio_h->cmdp; - if (0 == cdb.EVPD && 0 == cdb.pageCode) { + if (SCSI::Structures::inquiryCDB_t & cdb = + *(SCSI::Structures::inquiryCDB_t *) sgio_h->cmdp; + 0 == cdb.EVPD && 0 == cdb.pageCode) { /* the Standard Inquiry Data is returned*/ SCSI::Structures::inquiryData_t & inqData = *(SCSI::Structures::inquiryData_t *) sgio_h->dxferp; diff --git a/tapeserver/daemon/DriveHandler.cpp b/tapeserver/daemon/DriveHandler.cpp index 06e066166b..648b58041b 100644 --- a/tapeserver/daemon/DriveHandler.cpp +++ b/tapeserver/daemon/DriveHandler.cpp @@ -300,8 +300,8 @@ SubprocessHandler::ProcessingStatus DriveHandler::processEvent() { // Read from the socket pair try { serializers::WatchdogMessage message; - auto datagram = m_socketPair->receive(server::SocketPair::Side::child); - if (!message.ParseFromString(datagram)) { + if (auto datagram = m_socketPair->receive(server::SocketPair::Side::child); + !message.ParseFromString(datagram)) { // Use the tolerant parser to assess the situation. message.ParsePartialFromString(datagram); throw cta::exception::Exception(std::string("In SubprocessHandler::ProcessingStatus(): could not parse message: ") + @@ -661,10 +661,10 @@ int DriveHandler::runChild() { // 2) If the previous session crashed, we might want to run a cleaner session, depending // on the previous state - const std::set statesRequiringCleaner = { - SessionState::Mounting, SessionState::Running, SessionState::Unmounting - }; - if (m_previousSession == PreviousSession::Crashed && statesRequiringCleaner.count(m_previousState)) { + if (const std::set statesRequiringCleaner = { + SessionState::Mounting, SessionState::Running, SessionState::Unmounting + }; + m_previousSession == PreviousSession::Crashed && statesRequiringCleaner.count(m_previousState)) { // Set session type to cleanup m_sessionType = SessionType::Cleanup; if (m_previousVid.empty()) { @@ -826,9 +826,9 @@ SubprocessHandler::ProcessingStatus DriveHandler::shutdown() { return exitShutdown(); } - std::set statesRequiringCleaner = { SessionState::Mounting, - SessionState::Running, SessionState::Unmounting }; - if (statesRequiringCleaner.count(m_previousState)) { + if (const std::set statesRequiringCleaner = { SessionState::Mounting, + SessionState::Running, SessionState::Unmounting }; + statesRequiringCleaner.count(m_previousState)) { if (m_sessionVid.empty()) { m_lc.log(log::ERR, "In DriveHandler::shutdown(): Should run cleaner but VID is missing. Do nothing."); } diff --git a/tapeserver/daemon/DriveHandlerStateReporter.cpp b/tapeserver/daemon/DriveHandlerStateReporter.cpp index 3e0b821790..c3bba63fd7 100644 --- a/tapeserver/daemon/DriveHandlerStateReporter.cpp +++ b/tapeserver/daemon/DriveHandlerStateReporter.cpp @@ -89,10 +89,10 @@ SessionVid DriveHandlerStateReporter::processScheduling(const serializers::Watch // Check the transition is expected. This is non-fatal as the drive session has the last word anyway. log::ScopedParamContainer params(*m_lc); params.add("tapeDrive", m_driveName); - const std::set expectedStates = { - SessionState::StartingUp, SessionState::Scheduling - }; - if (!expectedStates.count(sessionState) || + if (const std::set expectedStates = { + SessionState::StartingUp, SessionState::Scheduling + }; + !expectedStates.count(sessionState) || sessionType != SessionType::Undetermined || static_cast(message.sessiontype()) != SessionType::Undetermined) { params.add("PreviousState", session::toString(sessionState)) @@ -129,10 +129,10 @@ SessionVid DriveHandlerStateReporter::processMounting(const serializers::Watchdo // As usual, subprocess has the last word. log::ScopedParamContainer params(*m_lc); params.add("tapeDrive", m_driveName); - const std::set expectedNewTypes = { - SessionType::Archive, SessionType::Retrieve, SessionType::Label - }; - if (sessionState != SessionState::Scheduling || + if (const std::set expectedNewTypes = { + SessionType::Archive, SessionType::Retrieve, SessionType::Label + }; + sessionState != SessionState::Scheduling || sessionType != SessionType::Undetermined || !expectedNewTypes.count(static_cast(message.sessiontype()))) { params.add("PreviousState", session::toString(sessionState)) @@ -155,8 +155,8 @@ SessionVid DriveHandlerStateReporter::processRunning(const serializers::Watchdog const std::set expectedStates = { SessionState::Mounting, SessionState::Running }; - std::set expectedTypes = {SessionType::Archive, SessionType::Retrieve, SessionType::Label}; - if (!expectedStates.count(sessionState) || + if (const std::set expectedTypes = {SessionType::Archive, SessionType::Retrieve, SessionType::Label}; + !expectedStates.count(sessionState) || !expectedTypes.count(sessionType) || (sessionType != static_cast(message.sessiontype()))) { params.add("PreviousState", session::toString(sessionState)) @@ -181,14 +181,14 @@ SessionVid DriveHandlerStateReporter::processUnmounting(const serializers::Watch // As usual, subprocess has the last word. log::ScopedParamContainer params(*m_lc); params.add("tapeDrive", m_driveName); - const std::set> expectedStateTypes = { - std::make_tuple(SessionState::Running, SessionType::Archive), - std::make_tuple(SessionState::Running, SessionType::Retrieve), - std::make_tuple(SessionState::Running, SessionType::Label), - std::make_tuple(SessionState::Checking, SessionType::Cleanup) - }; // (all types of sessions can unmount). - if (!expectedStateTypes.count(std::make_tuple(sessionState, sessionType))) { + if (const std::set> expectedStateTypes = { + std::make_tuple(SessionState::Running, SessionType::Archive), + std::make_tuple(SessionState::Running, SessionType::Retrieve), + std::make_tuple(SessionState::Running, SessionType::Label), + std::make_tuple(SessionState::Checking, SessionType::Cleanup) + }; + !expectedStateTypes.count(std::make_tuple(sessionState, sessionType))) { params.add("PreviousState", session::toString(sessionState)) .add("PreviousType", session::toString(sessionType)) .add("NewState", session::toString(static_cast(message.sessionstate()))) @@ -223,10 +223,11 @@ SessionVid DriveHandlerStateReporter::processShuttingDown(const serializers::Wat // As usual, subprocess has the last word. log::ScopedParamContainer params(*m_lc); params.add("tapeDrive", m_driveName); - const std::set expectedStates = { - SessionState::Unmounting, SessionState::DrainingToDisk - }; - if (!expectedStates.count(sessionState)) { + + if (const std::set expectedStates = { + SessionState::Unmounting, SessionState::DrainingToDisk + }; + !expectedStates.count(sessionState)) { params.add("PreviousState", session::toString(sessionState)) .add("PreviousType", session::toString(sessionType)) .add("NewState", session::toString(static_cast(message.sessionstate()))) diff --git a/tapeserver/daemon/ProcessManager.cpp b/tapeserver/daemon/ProcessManager.cpp index 66333c5d55..2a69a2c7f2 100644 --- a/tapeserver/daemon/ProcessManager.cpp +++ b/tapeserver/daemon/ProcessManager.cpp @@ -66,20 +66,20 @@ int ProcessManager::run() { // not need an initialization. while(true) { // Manage sigChild requests - auto sigChildStatus = runSigChildManagement(); - if (sigChildStatus.doExit) return sigChildStatus.exitCode; + if (auto sigChildStatus = runSigChildManagement(); + sigChildStatus.doExit) return sigChildStatus.exitCode; // Manage shutdown requests and completions. - auto shutdownStatus = runShutdownManagement(); - if (shutdownStatus.doExit) return shutdownStatus.exitCode; + if (auto shutdownStatus = runShutdownManagement(); + shutdownStatus.doExit) return shutdownStatus.exitCode; // Manage kill requests. - auto killStatus = runKillManagement(); - if (killStatus.doExit) return killStatus.exitCode; + if (auto killStatus = runKillManagement(); + killStatus.doExit) return killStatus.exitCode; // Manage fork requests - auto forkStatus = runForkManagement(); - if (forkStatus.doExit) return forkStatus.exitCode; + if (auto forkStatus = runForkManagement(); + forkStatus.doExit) return forkStatus.exitCode; // Manage refresh logger requests - auto refreshLoggerStatus = runRefreshLoggerManagement(); - if (refreshLoggerStatus.doExit) return refreshLoggerStatus.exitCode; + if (auto refreshLoggerStatus = runRefreshLoggerManagement(); + refreshLoggerStatus.doExit) return refreshLoggerStatus.exitCode; // All subprocesses requests have been handled. We can now switch to the // event handling per se. runEventLoop(); @@ -103,16 +103,16 @@ cta::log::LogContext& ProcessManager::logContext() { ProcessManager::RunPartStatus ProcessManager::runShutdownManagement() { // Check the current statuses for shutdown requests // If any process requests a shutdown, we will trigger it in all. - bool anyAskedShutdown = - std::count_if(m_subprocessHandlers.cbegin(), m_subprocessHandlers.cend(), [this](const SubprocessAndStatus& i) { - if(i.status.shutdownRequested) { - cta::log::ScopedParamContainer params(m_logContext); - params.add("SubprocessName", i.handler->index); - m_logContext.log(log::INFO, "Subprocess requested shutdown"); - } - return i.status.shutdownRequested; - }); - if (anyAskedShutdown) { + if (bool anyAskedShutdown = + std::count_if(m_subprocessHandlers.cbegin(), m_subprocessHandlers.cend(), [this](const SubprocessAndStatus& i) { + if(i.status.shutdownRequested) { + cta::log::ScopedParamContainer params(m_logContext); + params.add("SubprocessName", i.handler->index); + m_logContext.log(log::INFO, "Subprocess requested shutdown"); + } + return i.status.shutdownRequested; + }); + anyAskedShutdown) { for(auto & sp: m_subprocessHandlers) { sp.status = sp.handler->shutdown(); cta::log::ScopedParamContainer params(m_logContext); @@ -136,16 +136,16 @@ ProcessManager::RunPartStatus ProcessManager::runShutdownManagement() { ProcessManager::RunPartStatus ProcessManager::runKillManagement() { // If any process asks for a kill, we kill all sub processes and exit - bool anyAskedKill = - std::count_if(m_subprocessHandlers.cbegin(), m_subprocessHandlers.cend(), [this](const SubprocessAndStatus& i) { - if(i.status.killRequested) { - cta::log::ScopedParamContainer params(m_logContext); - params.add("SubprocessName", i.handler->index); - m_logContext.log(log::INFO, "Subprocess requested kill"); - } - return i.status.killRequested; - }); - if (anyAskedKill) { + if (bool anyAskedKill = + std::count_if(m_subprocessHandlers.cbegin(), m_subprocessHandlers.cend(), [this](const SubprocessAndStatus& i) { + if(i.status.killRequested) { + cta::log::ScopedParamContainer params(m_logContext); + params.add("SubprocessName", i.handler->index); + m_logContext.log(log::INFO, "Subprocess requested kill"); + } + return i.status.killRequested; + }); + anyAskedKill) { for (auto& sp : m_subprocessHandlers) { sp.handler->kill(); cta::log::ScopedParamContainer params(m_logContext); @@ -206,16 +206,16 @@ ProcessManager::RunPartStatus ProcessManager::runForkManagement() { ProcessManager::RunPartStatus ProcessManager::runSigChildManagement() { // If any process handler received sigChild, we signal it to all processes. Typically, this is // done by the signal handler - bool sigChild = - std::count_if(m_subprocessHandlers.cbegin(), m_subprocessHandlers.cend(), [this](const SubprocessAndStatus& i) { - if(i.status.sigChild) { - cta::log::ScopedParamContainer params(m_logContext); - params.add("SubprocessName", i.handler->index); - m_logContext.log(log::INFO, "Handler received SIGCHILD. Propagating to all handlers."); - } - return i.status.sigChild; - }); - if (sigChild) { + if (bool sigChild = + std::count_if(m_subprocessHandlers.cbegin(), m_subprocessHandlers.cend(), [this](const SubprocessAndStatus& i) { + if(i.status.sigChild) { + cta::log::ScopedParamContainer params(m_logContext); + params.add("SubprocessName", i.handler->index); + m_logContext.log(log::INFO, "Handler received SIGCHILD. Propagating to all handlers."); + } + return i.status.sigChild; + }); + sigChild) { for(auto & sp: m_subprocessHandlers) { sp.status = sp.handler->processSigChild(); cta::log::ScopedParamContainer params(m_logContext); diff --git a/tapeserver/readtp/ReadtpCmd.cpp b/tapeserver/readtp/ReadtpCmd.cpp index 0e2f8b9110..4674f162f0 100644 --- a/tapeserver/readtp/ReadtpCmd.cpp +++ b/tapeserver/readtp/ReadtpCmd.cpp @@ -169,8 +169,7 @@ std::list ReadtpCmd::readListFromFile(const std::string &filename) while(std::getline(file, line)) { // Strip out comments - auto pos = line.find('#'); - if(pos != std::string::npos) { + if(auto pos = line.find('#'); pos != std::string::npos) { line.resize(pos); } diff --git a/tests/ImmutableFileTestCmdLineArgs.cpp b/tests/ImmutableFileTestCmdLineArgs.cpp index 74ca6b33cb..c790458c81 100644 --- a/tests/ImmutableFileTestCmdLineArgs.cpp +++ b/tests/ImmutableFileTestCmdLineArgs.cpp @@ -76,10 +76,8 @@ ImmutableFileTestCmdLineArgs::ImmutableFileTestCmdLineArgs(const int argc, char } // Calculate the number of non-option ARGV-elements - const int nbArgs = argc - optind; - // Check the number of arguments - if(nbArgs != 1) { + if(const int nbArgs = argc - optind; nbArgs != 1) { exception::CommandLineNotParsed ex; ex.getMessage() << "Wrong number of command-line arguments: expected=1 actual=" << nbArgs; throw ex; diff --git a/xroot_plugins/XrdSsiCtaServiceProvider.cpp b/xroot_plugins/XrdSsiCtaServiceProvider.cpp index 97292e01d0..bf5007fbc4 100644 --- a/xroot_plugins/XrdSsiCtaServiceProvider.cpp +++ b/xroot_plugins/XrdSsiCtaServiceProvider.cpp @@ -39,8 +39,8 @@ bool XrdSsiCtaServiceProvider::Init(XrdSsiLogger *logP, XrdSsiCluster *clsP, con // Set XRootD SSI Protobuf logging level from config file XrdSsiPb::Config config(cfgFn); - auto loglevel = config.getOptionList("cta.log.ssi"); - if(loglevel.empty()) { + if(auto loglevel = config.getOptionList("cta.log.ssi"); + loglevel.empty()) { XrdSsiPb::Log::SetLogLevel("info"); } else { XrdSsiPb::Log::SetLogLevel(loglevel); -- GitLab From af5070322d8c40b83ee258ab65705d888c2ce2b9 Mon Sep 17 00:00:00 2001 From: guenther Date: Thu, 27 Nov 2025 19:48:22 +0100 Subject: [PATCH 02/12] fix typos --- catalogue/rdbms/RdbmsCatalogueGetArchiveFilesItor.cpp | 2 +- catalogue/rdbms/RdbmsCatalogueGetFileRecycleLogItor.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/catalogue/rdbms/RdbmsCatalogueGetArchiveFilesItor.cpp b/catalogue/rdbms/RdbmsCatalogueGetArchiveFilesItor.cpp index cafad4b83a..ee7cf8c719 100644 --- a/catalogue/rdbms/RdbmsCatalogueGetArchiveFilesItor.cpp +++ b/catalogue/rdbms/RdbmsCatalogueGetArchiveFilesItor.cpp @@ -113,7 +113,7 @@ RdbmsCatalogueGetArchiveFilesItor::RdbmsCatalogueGetArchiveFilesItor( searchCriteria.diskInstance.has_value() || searchCriteria.vid.has_value() || searchCriteria.diskFileIds.has_value() || - searchCriteria.fSeq.has_value();) { + searchCriteria.fSeq.has_value()) { sql += R"SQL( WHERE )SQL"; } diff --git a/catalogue/rdbms/RdbmsCatalogueGetFileRecycleLogItor.cpp b/catalogue/rdbms/RdbmsCatalogueGetFileRecycleLogItor.cpp index b24014959d..1372e62839 100644 --- a/catalogue/rdbms/RdbmsCatalogueGetFileRecycleLogItor.cpp +++ b/catalogue/rdbms/RdbmsCatalogueGetFileRecycleLogItor.cpp @@ -73,7 +73,7 @@ RdbmsCatalogueGetFileRecycleLogItor::RdbmsCatalogueGetFileRecycleLogItor( searchCriteria.diskInstance.has_value() || searchCriteria.recycleLogTimeMin.has_value() || searchCriteria.recycleLogTimeMax.has_value() || - searchCriteria.vo.has_value();) { + searchCriteria.vo.has_value()) { sql += R"SQL( WHERE )SQL"; } -- GitLab From fe6e6c76f189b823411839082b35bc0327269f3a Mon Sep 17 00:00:00 2001 From: guenther Date: Thu, 27 Nov 2025 20:01:29 +0100 Subject: [PATCH 03/12] fix bugs in init statements --- objectstore/ArchiveQueue.cpp | 18 ++++++++++-------- objectstore/RetrieveQueue.cpp | 18 ++++++++++-------- 2 files changed, 20 insertions(+), 16 deletions(-) diff --git a/objectstore/ArchiveQueue.cpp b/objectstore/ArchiveQueue.cpp index 1e1ef05c0c..6567ce1dfe 100644 --- a/objectstore/ArchiveQueue.cpp +++ b/objectstore/ArchiveQueue.cpp @@ -93,14 +93,16 @@ bool ArchiveQueue::checkMapsAndShardsCoherency() { totalJobs != jobsExpectedFromShardsPointers) return false; // Check that we have coherent queue summaries - if (ValueCountMapUint64 priorityMap(m_payload.mutable_prioritymap()), - ValueCountMapUint64 minArchiveRequestAgeMap(m_payload.mutable_minarchiverequestagemap()), - ValueCountMapString mountPolicyNameMap(m_payload.mutable_mountpolicynamemap()); - priorityMap.total() != m_payload.archivejobscount() || - minArchiveRequestAgeMap.total() != m_payload.archivejobscount() || - mountPolicyNameMap.total() != m_payload.archivejobscount() - ) - return false; + { + ValueCountMapUint64 priorityMap(m_payload.mutable_prioritymap()); + ValueCountMapUint64 minArchiveRequestAgeMap(m_payload.mutable_minarchiverequestagemap()); + ValueCountMapString mountPolicyNameMap(m_payload.mutable_mountpolicynamemap()); + if (priorityMap.total() != m_payload.archivejobscount() || + minArchiveRequestAgeMap.total() != m_payload.archivejobscount() || + mountPolicyNameMap.total() != m_payload.archivejobscount() + ) + return false; + } return true; } diff --git a/objectstore/RetrieveQueue.cpp b/objectstore/RetrieveQueue.cpp index 37498f3ce2..c1ea2fcb1d 100644 --- a/objectstore/RetrieveQueue.cpp +++ b/objectstore/RetrieveQueue.cpp @@ -74,14 +74,16 @@ bool RetrieveQueue::checkMapsAndShardsCoherency() { totalJobs != jobsExpectedFromShardsPointers) return false; // Check that we have coherent queue summaries - if (ValueCountMapUint64 priorityMap(m_payload.mutable_prioritymap()), - ValueCountMapUint64 minRetrieveRequestAgeMap(m_payload.mutable_minretrieverequestagemap()), - ValueCountMapString mountPolicyNameMap(m_payload.mutable_mountpolicynamemap()); - priorityMap.total() != m_payload.retrievejobscount() || - minRetrieveRequestAgeMap.total() != m_payload.retrievejobscount() || - mountPolicyNameMap.total() != m_payload.retrievejobscount() - ) - return false; + { + ValueCountMapUint64 priorityMap(m_payload.mutable_prioritymap()); + ValueCountMapUint64 minRetrieveRequestAgeMap(m_payload.mutable_minretrieverequestagemap()); + ValueCountMapString mountPolicyNameMap(m_payload.mutable_mountpolicynamemap()); + if (priorityMap.total() != m_payload.retrievejobscount() || + minRetrieveRequestAgeMap.total() != m_payload.retrievejobscount() || + mountPolicyNameMap.total() != m_payload.retrievejobscount() + ) + return false; + } return true; } -- GitLab From 2ad91506007d96f7ec8e4d5dde73203acadcf2b4 Mon Sep 17 00:00:00 2001 From: guenther Date: Thu, 27 Nov 2025 20:05:38 +0100 Subject: [PATCH 04/12] fix bugs in init statements --- objectstore/RetrieveQueue.cpp | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/objectstore/RetrieveQueue.cpp b/objectstore/RetrieveQueue.cpp index c1ea2fcb1d..2431f14be2 100644 --- a/objectstore/RetrieveQueue.cpp +++ b/objectstore/RetrieveQueue.cpp @@ -68,11 +68,13 @@ bool RetrieveQueue::checkMapsAndShardsCoherency() { jobsExpectedFromShardsPointers += aqs.shardjobscount(); } // The sum of shards should be equal to the summary - if (uint64_t totalBytes = m_payload.retrievejobstotalsize(), - uint64_t totalJobs = m_payload.retrievejobscount(); - totalBytes != bytesFromShardPointers || - totalJobs != jobsExpectedFromShardsPointers) - return false; + { + uint64_t totalBytes = m_payload.retrievejobstotalsize(); + uint64_t totalJobs = m_payload.retrievejobscount(); + if (totalBytes != bytesFromShardPointers || + totalJobs != jobsExpectedFromShardsPointers) + return false; + } // Check that we have coherent queue summaries { ValueCountMapUint64 priorityMap(m_payload.mutable_prioritymap()); -- GitLab From 4199d36b8be4da6b18c8a3a6efeb5eb7d398fe1a Mon Sep 17 00:00:00 2001 From: guenther Date: Thu, 27 Nov 2025 20:16:27 +0100 Subject: [PATCH 05/12] fix bugs in init statements --- common/CRC.cpp | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/common/CRC.cpp b/common/CRC.cpp index aba6d1646b..8139abf079 100644 --- a/common/CRC.cpp +++ b/common/CRC.cpp @@ -247,15 +247,17 @@ bool verifyCrc32cForMemoryBlockWithCrc32c( const uint32_t crcInit, const uint32_t cnt, const uint8_t *start) { if (cnt <= 4) return false; //block is too small to be valid, cannot check CRC - - if (const uint32_t crccmp = crc32c(crcInit, cnt-4, start), - const uint32_t crcblk= (start[cnt-4] << 0) | - (start[cnt-3] << 8) | - (start[cnt-2] << 16) | - (start[cnt-1] << 24); crccmp != crcblk) { - return false; //block CRC is incorrect + { + const uint32_t crccmp = crc32c(crcInit, cnt - 4, start), + const uint32_t crcblk = (start[cnt - 4] << 0) | + (start[cnt - 3] << 8) | + (start[cnt - 2] << 16) | + (start[cnt - 1] << 24); + if (crccmp != crcblk) { + return false; //block CRC is incorrect + } } return true; -} +}å } // namespace cta -- GitLab From 495ff80753ae6f522c4d8f95ff84f9738da3ff68 Mon Sep 17 00:00:00 2001 From: guenther Date: Thu, 27 Nov 2025 20:34:11 +0100 Subject: [PATCH 06/12] fix bugs in init statements --- common/CRC.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/CRC.cpp b/common/CRC.cpp index 8139abf079..a3a87870b4 100644 --- a/common/CRC.cpp +++ b/common/CRC.cpp @@ -258,6 +258,6 @@ bool verifyCrc32cForMemoryBlockWithCrc32c( } } return true; -}å +} } // namespace cta -- GitLab From ad03c55792e519834917abdfa44ebd7d3a5f94cd Mon Sep 17 00:00:00 2001 From: guenther Date: Thu, 27 Nov 2025 20:37:24 +0100 Subject: [PATCH 07/12] fix bugs in init statements --- common/CRC.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common/CRC.cpp b/common/CRC.cpp index a3a87870b4..754abd8c49 100644 --- a/common/CRC.cpp +++ b/common/CRC.cpp @@ -248,7 +248,7 @@ bool verifyCrc32cForMemoryBlockWithCrc32c( if (cnt <= 4) return false; //block is too small to be valid, cannot check CRC { - const uint32_t crccmp = crc32c(crcInit, cnt - 4, start), + const uint32_t crccmp = crc32c(crcInit, cnt - 4, start); const uint32_t crcblk = (start[cnt - 4] << 0) | (start[cnt - 3] << 8) | (start[cnt - 2] << 16) | -- GitLab From 90ca6779a562e89e21b3c3ca71e97b1c460f730f Mon Sep 17 00:00:00 2001 From: guenther Date: Thu, 27 Nov 2025 20:47:22 +0100 Subject: [PATCH 08/12] fix bugs in init statements --- mediachanger/rmcd/rmc_serv.cpp | 44 ++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/mediachanger/rmcd/rmc_serv.cpp b/mediachanger/rmcd/rmc_serv.cpp index 9af7f8499a..f1e0369369 100644 --- a/mediachanger/rmcd/rmc_serv.cpp +++ b/mediachanger/rmcd/rmc_serv.cpp @@ -328,28 +328,30 @@ static int rmc_getreq(const int s, int* const req_type, char* const req_data, ch rmc_logit(func, RMC02, "getpeername", neterror()); return ERMCUNREC; } - if (struct hostent hbuf; - struct hostent* hp = nullptr; - char buffer[1024]; - char client_ip[INET6_ADDRSTRLEN]; - int h_err; - gethostbyaddr_r((void*) (&from.sin_addr), - sizeof(struct in_addr), - from.sin_family, - &hbuf, - buffer, - sizeof(buffer), - &hp, - &h_err) != 0 || - hp == nullptr) { - if (inet_ntop(AF_INET, &from.sin_addr, client_ip, sizeof(client_ip)) == nullptr) { - perror("inet_ntop"); - return ERMCUNREC; + { + struct hostent hbuf; + struct hostent *hp = nullptr; + char buffer[1024]; + char client_ip[INET6_ADDRSTRLEN]; + int h_err; + if (gethostbyaddr_r((void *) (&from.sin_addr), + sizeof(struct in_addr), + from.sin_family, + &hbuf, + buffer, + sizeof(buffer), + &hp, + &h_err) != 0 || + hp == nullptr) { + if (inet_ntop(AF_INET, &from.sin_addr, client_ip, sizeof(client_ip)) == nullptr) { + perror("inet_ntop"); + return ERMCUNREC; + } + // Duplicate the strings to prevent undefined behaviour after exiting function + *clienthost = strdup(client_ip); + } else { + *clienthost = strdup(hp->h_name); } - // Duplicate the strings to prevent undefined behaviour after exiting function - *clienthost = strdup(client_ip); - } else { - *clienthost = strdup(hp->h_name); } return 0; } else { -- GitLab From f929efa8e4fe6b83c9ce1bdffee6a43d9d18328b Mon Sep 17 00:00:00 2001 From: guenther Date: Thu, 27 Nov 2025 20:51:49 +0100 Subject: [PATCH 09/12] fix bugs in init statements --- mediachanger/io.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/mediachanger/io.cpp b/mediachanger/io.cpp index 96efe009a3..f9bc0998f2 100644 --- a/mediachanger/io.cpp +++ b/mediachanger/io.cpp @@ -507,8 +507,6 @@ void getSockIpHostnamePort( { char serviceName[SERVICENAMEBUFLEN]; - const int rc = getnameinfo((const struct sockaddr*)&address, addressLen, - hostName, hostNameLen, serviceName, sizeof(serviceName), 0); if (const int rc = getnameinfo((const struct sockaddr *) &address, addressLen, hostName, hostNameLen, serviceName, sizeof(serviceName), 0); -- GitLab From 8e307177125784013b3ae92949e42e523dfcf007 Mon Sep 17 00:00:00 2001 From: guenther Date: Thu, 27 Nov 2025 20:54:15 +0100 Subject: [PATCH 10/12] fix bugs in init statements --- scheduler/rdbms/RepackRequest.cpp | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/scheduler/rdbms/RepackRequest.cpp b/scheduler/rdbms/RepackRequest.cpp index 83f59a2f2e..f0cccb1ee0 100644 --- a/scheduler/rdbms/RepackRequest.cpp +++ b/scheduler/rdbms/RepackRequest.cpp @@ -466,20 +466,21 @@ namespace cta::schedulerdb { } common::dataStructures::RepackInfo::Status RepackRequest::getCurrentStatus() const { - if (bool finishedExpansion = repackInfo.isExpandFinished, - bool allRetrieveDone = - (repackInfo.retrievedFiles + repackInfo.failedFilesToRetrieve) >= repackInfo.totalFilesToRetrieve, - bool allArchiveDone = - (repackInfo.archivedFiles + repackInfo.failedFilesToArchive + m_failedToCreateArchiveReq) >= - repackInfo.totalFilesToArchive; - finishedExpansion && allRetrieveDone && allArchiveDone) { - if (repackInfo.failedFilesToRetrieve > 0 || repackInfo.failedFilesToArchive > 0) { - return common::dataStructures::RepackInfo::Status::Failed; - } else { - return common::dataStructures::RepackInfo::Status::Complete; + { + bool finishedExpansion = repackInfo.isExpandFinished; + bool allRetrieveDone = + (repackInfo.retrievedFiles + repackInfo.failedFilesToRetrieve) >= repackInfo.totalFilesToRetrieve; + bool allArchiveDone = + (repackInfo.archivedFiles + repackInfo.failedFilesToArchive + m_failedToCreateArchiveReq) >= + repackInfo.totalFilesToArchive; + if (finishedExpansion && allRetrieveDone && allArchiveDone) { + if (repackInfo.failedFilesToRetrieve > 0 || repackInfo.failedFilesToArchive > 0) { + return common::dataStructures::RepackInfo::Status::Failed; + } else { + return common::dataStructures::RepackInfo::Status::Complete; + } } } - if (repackInfo.retrievedFiles > 0 || repackInfo.failedFilesToRetrieve > 0 || repackInfo.archivedFiles > 0 || repackInfo.failedFilesToArchive > 0) { return common::dataStructures::RepackInfo::Status::Running; -- GitLab From bd3509a7284d156a7493e5d4046e9ce786b25241 Mon Sep 17 00:00:00 2001 From: guenther Date: Fri, 28 Nov 2025 23:37:13 +0100 Subject: [PATCH 11/12] fix bugs in init statements --- catalogue/DatabaseMetadataGetter.cpp | 14 +- catalogue/rdbms/RdbmsSchemaCatalogue.cpp | 12 +- catalogue/rdbms/RdbmsTapeFileCatalogue.cpp | 27 +- cmdline/CtaAdminParsedCmd.cpp | 16 +- disk/DiskSystem.cpp | 20 +- frontend/common/AdminCmd.cpp | 30 +- frontend/common/DriveLsResponseStream.cpp | 59 ++-- objectstore/Algorithms.hpp | 270 +++++++++--------- .../tapeserver/daemon/DataTransferSession.cpp | 5 +- tapeserver/daemon/ProcessManager.cpp | 109 +++---- 10 files changed, 297 insertions(+), 265 deletions(-) diff --git a/catalogue/DatabaseMetadataGetter.cpp b/catalogue/DatabaseMetadataGetter.cpp index ed4230a534..d489807d32 100644 --- a/catalogue/DatabaseMetadataGetter.cpp +++ b/catalogue/DatabaseMetadataGetter.cpp @@ -94,12 +94,14 @@ SchemaVersion DatabaseMetadataGetter::getCatalogueVersion(){ auto rset2 = stmt2.executeQuery(); if(rset2.next()){ auto schemaStatus = rset2.columnString("STATUS"); - if (auto schemaVersionMajorNext = rset2.columnOptionalUint64("NEXT_SCHEMA_VERSION_MAJOR"), - schemaVersionMinorNext = rset2.columnOptionalUint64("NEXT_SCHEMA_VERSION_MINOR"); - schemaVersionMajorNext.has_value() && schemaVersionMinorNext.has_value()) { - schemaVersionBuilder.nextSchemaVersionMajor(schemaVersionMajorNext.value()) - .nextSchemaVersionMinor(schemaVersionMinorNext.value()) - .status(schemaStatus); + { + auto schemaVersionMajorNext = rset2.columnOptionalUint64("NEXT_SCHEMA_VERSION_MAJOR"); + auto schemaVersionMinorNext = rset2.columnOptionalUint64("NEXT_SCHEMA_VERSION_MINOR"); + if (schemaVersionMajorNext.has_value() && schemaVersionMinorNext.has_value()) { + schemaVersionBuilder.nextSchemaVersionMajor(schemaVersionMajorNext.value()) + .nextSchemaVersionMinor(schemaVersionMinorNext.value()) + .status(schemaStatus); + } } } } catch (const cta::exception::Exception&){ diff --git a/catalogue/rdbms/RdbmsSchemaCatalogue.cpp b/catalogue/rdbms/RdbmsSchemaCatalogue.cpp index de719abf9a..5ea6001095 100644 --- a/catalogue/rdbms/RdbmsSchemaCatalogue.cpp +++ b/catalogue/rdbms/RdbmsSchemaCatalogue.cpp @@ -54,11 +54,13 @@ SchemaVersion RdbmsSchemaCatalogue::getSchemaVersion() const { schemaVersionBuilder.schemaVersionMajor(rset.columnUint64("SCHEMA_VERSION_MAJOR")) .schemaVersionMinor(rset.columnUint64("SCHEMA_VERSION_MINOR")) .status(rset.columnString("STATUS")); - if (auto schemaVersionMajorNext = rset.columnOptionalUint64("NEXT_SCHEMA_VERSION_MAJOR"), - schemaVersionMinorNext = rset.columnOptionalUint64("NEXT_SCHEMA_VERSION_MINOR"); - schemaVersionMajorNext.has_value() && schemaVersionMinorNext.has_value()) { - schemaVersionBuilder.nextSchemaVersionMajor(schemaVersionMajorNext.value()) - .nextSchemaVersionMinor(schemaVersionMinorNext.value()); + { + auto schemaVersionMajorNext = rset.columnOptionalUint64("NEXT_SCHEMA_VERSION_MAJOR"); + auto schemaVersionMinorNext = rset.columnOptionalUint64("NEXT_SCHEMA_VERSION_MINOR"); + if (schemaVersionMajorNext.has_value() && schemaVersionMinorNext.has_value()) { + schemaVersionBuilder.nextSchemaVersionMajor(schemaVersionMajorNext.value()) + .nextSchemaVersionMinor(schemaVersionMinorNext.value()); + } } return schemaVersionBuilder.build(); } else { diff --git a/catalogue/rdbms/RdbmsTapeFileCatalogue.cpp b/catalogue/rdbms/RdbmsTapeFileCatalogue.cpp index 6bea3438b7..2ecc645b0c 100644 --- a/catalogue/rdbms/RdbmsTapeFileCatalogue.cpp +++ b/catalogue/rdbms/RdbmsTapeFileCatalogue.cpp @@ -212,18 +212,21 @@ common::dataStructures::RetrieveFileQueueCriteria RdbmsTapeFileCatalogue::prepar ex.getMessage() << "File with archive file ID " << archiveFileId << " does not exist in CTA namespace"; throw ex; } - if (const auto nonBrokenState = std::find_if( - std::begin(tapeFileStateList), std::end(tapeFileStateList), - [](const std::pair& state) { - return (state.second != "BROKEN") - && (state.second != "BROKEN_PENDING") - && (state.second != "EXPORTED") - && (state.second != "EXPORTED_PENDING"); - }); - nonBrokenState != std::end(tapeFileStateList)) { - ex.getMessage() << "WARNING: The requested file is on tape " << nonBrokenState->first - << ", which is temporarily unavailable (" << nonBrokenState->second << "). Please retry later."; - throw ex; + { + const auto nonBrokenState = std::find_if( + std::begin(tapeFileStateList), std::end(tapeFileStateList), + [](const std::pair &state) { + return (state.second != "BROKEN") + && (state.second != "BROKEN_PENDING") + && (state.second != "EXPORTED") + && (state.second != "EXPORTED_PENDING"); + }); + if (nonBrokenState != std::end(tapeFileStateList)) { + ex.getMessage() << "WARNING: The requested file is on tape " << nonBrokenState->first + << ", which is temporarily unavailable (" << nonBrokenState->second + << "). Please retry later."; + throw ex; + } } const auto& [brokenTape, brokenState] = tapeFileStateList.front(); //All tape files are on broken tapes, just generate an error about the first diff --git a/cmdline/CtaAdminParsedCmd.cpp b/cmdline/CtaAdminParsedCmd.cpp index 6b7793ef57..4f9890dc97 100644 --- a/cmdline/CtaAdminParsedCmd.cpp +++ b/cmdline/CtaAdminParsedCmd.cpp @@ -151,14 +151,16 @@ void CtaAdminParsedCmd::parseOptions(int start, int argc, const char* const* con // Check if the value is '--all' if (std::string(argv[i]) == "--all" || std::string(argv[i]) == "-a") { // Find the OPT_FLAG type --all option explicitly - if (auto flag_it = std::find_if( - options.begin(), options.end(), - [](const Option& opt) { + { + auto flag_it = std::find_if( + options.begin(), options.end(), + [](const Option &opt) { return opt.get_type() == Option::OPT_FLAG && (opt == opt_all); - }); - flag_it != options.end()) { - addOption(*flag_it, ""); // Add --all as a flag option - continue; // Move to the next argument + }); + if (flag_it != options.end()) { + addOption(*flag_it, ""); // Add --all as a flag option + continue; // Move to the next argument + } } throwUsage("Invalid use of '--all'"); } diff --git a/disk/DiskSystem.cpp b/disk/DiskSystem.cpp index 9824f97c4e..00aaf465a0 100644 --- a/disk/DiskSystem.cpp +++ b/disk/DiskSystem.cpp @@ -35,7 +35,10 @@ namespace cta::disk { // DiskSystemList::at() //------------------------------------------------------------------------------ const DiskSystem& DiskSystemList::at(const std::string& name) const { - if (auto dsi = std::find_if(begin(), end(), [&name](const DiskSystem& ds){ return ds.name == name; }); dsi != end()) return *dsi; + { + auto dsi = std::find_if(begin(), end(), [&name](const DiskSystem &ds) { return ds.name == name; }); + if (dsi != end()) return *dsi; + } throw std::out_of_range("In DiskSystemList::at(): name " + name + " not found."); } @@ -50,12 +53,15 @@ std::string DiskSystemList::getDSName(const std::string& fileURL) const { } } // Try and find the fileURL - if (auto pri = std::find_if(m_pointersAndRegexes.begin(), m_pointersAndRegexes.end(), - [&fileURL](const PointerAndRegex& pr){ return !pr.regex.exec(fileURL).empty(); }); pri != m_pointersAndRegexes.end()) { - // We found a match. Let's move the pointer and regex to the front so next file will be faster (most likely). - if (pri != m_pointersAndRegexes.begin()) - m_pointersAndRegexes.splice(m_pointersAndRegexes.begin(), m_pointersAndRegexes, pri); - return pri->ds.name; + { + auto pri = std::find_if(m_pointersAndRegexes.begin(), m_pointersAndRegexes.end(), + [&fileURL](const PointerAndRegex &pr) { return !pr.regex.exec(fileURL).empty(); }); + if (pri != m_pointersAndRegexes.end()) { + // We found a match. Let's move the pointer and regex to the front so next file will be faster (most likely). + if (pri != m_pointersAndRegexes.begin()) + m_pointersAndRegexes.splice(m_pointersAndRegexes.begin(), m_pointersAndRegexes, pri); + return pri->ds.name; + } } throw std::out_of_range("In DiskSystemList::getDSNAme(): not match for fileURL"); } diff --git a/frontend/common/AdminCmd.cpp b/frontend/common/AdminCmd.cpp index d4bdd59790..41f798b988 100644 --- a/frontend/common/AdminCmd.cpp +++ b/frontend/common/AdminCmd.cpp @@ -855,21 +855,23 @@ void AdminCmd::processRepack_Add(xrd::Response& response) { //Get the mountpolicy from the catalogue common::dataStructures::MountPolicy mountPolicy; - using MountPolicyList = std::list; - MountPolicyList mountPolicies = m_catalogue.MountPolicy()->getMountPolicies(); - if(MountPolicyList::const_iterator repackMountPolicyItor = std::find_if( - mountPolicies.begin(), - mountPolicies.end(), - [&mountPolicyProvidedByUser](const common::dataStructures::MountPolicy& mp) { - return mp.name == mountPolicyProvidedByUser; - }); repackMountPolicyItor != mountPolicies.end()) { - //The mount policy exists - mountPolicy = *repackMountPolicyItor; - } else { - //The mount policy does not exist, throw a user error - throw exception::UserError("The mount policy name provided does not match any existing mount policy."); + { + using MountPolicyList = std::list; + MountPolicyList mountPolicies = m_catalogue.MountPolicy()->getMountPolicies(); + MountPolicyList::const_iterator repackMountPolicyItor = std::find_if( + mountPolicies.begin(), + mountPolicies.end(), + [&mountPolicyProvidedByUser](const common::dataStructures::MountPolicy &mp) { + return mp.name == mountPolicyProvidedByUser; + }); + if (repackMountPolicyItor != mountPolicies.end()) { + //The mount policy exists + mountPolicy = *repackMountPolicyItor; + } else { + //The mount policy does not exist, throw a user error + throw exception::UserError("The mount policy name provided does not match any existing mount policy."); + } } - if(const auto buff = getOptional(OptionString::BUFFERURL); buff.has_value()) { //The buffer is provided by the user bufferURL = buff.value(); diff --git a/frontend/common/DriveLsResponseStream.cpp b/frontend/common/DriveLsResponseStream.cpp index c310d2c15d..f98e094f10 100644 --- a/frontend/common/DriveLsResponseStream.cpp +++ b/frontend/common/DriveLsResponseStream.cpp @@ -74,21 +74,24 @@ DriveLsResponseStream::DriveLsResponseStream(cta::catalogue::Catalogue& catalogu // Extract the SchedulerBackendName configuration if it exists std::string driveSchedulerBackendName = "unknown"; - if (auto config_it = - std::find_if(driveConfigs.begin(), - driveConfigs.end(), - [&driveSchedulerBackendName](const cta::catalogue::DriveConfigCatalogue::DriveConfig& config) { - if (config.keyName == "SchedulerBackendName") { - driveSchedulerBackendName = config.value; - return true; - } - return false; - }); config_it == driveConfigs.end()) { - m_lc.log(cta::log::ERR, - "DriveLsStream::fillBuffer could not find SchedulerBackendName configuration for drive " + + { + auto config_it = + std::find_if(driveConfigs.begin(), + driveConfigs.end(), + [&driveSchedulerBackendName]( + const cta::catalogue::DriveConfigCatalogue::DriveConfig &config) { + if (config.keyName == "SchedulerBackendName") { + driveSchedulerBackendName = config.value; + return true; + } + return false; + }); + if (config_it == driveConfigs.end()) { + m_lc.log(cta::log::ERR, + "DriveLsStream::fillBuffer could not find SchedulerBackendName configuration for drive " + dr_it->driveName); + } } - if (m_schedulerBackendName.value_or("") != driveSchedulerBackendName) { shouldKeep = false; } @@ -125,21 +128,23 @@ cta::xrd::Data DriveLsResponseStream::next() { // Extract the SchedulerBackendName configuration if it exists std::string driveSchedulerBackendName = "unknown"; - - if (auto it = std::find_if(driveConfigs.begin(), - driveConfigs.end(), - [&driveSchedulerBackendName]( - const cta::catalogue::DriveConfigCatalogue::DriveConfig &config) { - if (config.keyName == "SchedulerBackendName") { - driveSchedulerBackendName = config.value; - return true; - } - return false; - }); it == driveConfigs.end()) { - m_lc.log(cta::log::ERR, - "DriveLsResponseStream::next could not find SchedulerBackendName configuration for drive " + dr.driveName); + { + auto it = std::find_if(driveConfigs.begin(), + driveConfigs.end(), + [&driveSchedulerBackendName]( + const cta::catalogue::DriveConfigCatalogue::DriveConfig &config) { + if (config.keyName == "SchedulerBackendName") { + driveSchedulerBackendName = config.value; + return true; + } + return false; + }); + if (it == driveConfigs.end()) { + m_lc.log(cta::log::ERR, + "DriveLsResponseStream::next could not find SchedulerBackendName configuration for drive " + + dr.driveName); + } } - cta::xrd::Data data; auto driveItem = data.mutable_drls_item(); diff --git a/objectstore/Algorithms.hpp b/objectstore/Algorithms.hpp index 151c805063..aebe245c05 100644 --- a/objectstore/Algorithms.hpp +++ b/objectstore/Algorithms.hpp @@ -222,71 +222,73 @@ public: timingList.insertAndReset("ownershipAdditionTime", t); m_agentReference.addBatchToOwnership(candidateElementsAddresses, m_backend); // We can now attempt to switch ownership of elements - if (auto failedOwnershipSwitchElements = ContainerTraits::switchElementsOwnershipAndStatus( + { + auto failedOwnershipSwitchElements = ContainerTraits::switchElementsOwnershipAndStatus( candidateElements, m_agentReference.getAgentAddress(), cont.getAddressIfSet(), timingList, t, lc, newStatus); - failedOwnershipSwitchElements.empty()) { - timingList.insertAndReset("updateResultProcessingTime", t); - // This is the easy case (and most common case). Everything went through fine. - ContainerTraits::removeReferencesAndCommit(cont, candidateElementsAddresses, lc); - timingList.insertAndReset("containerUpdateTime", t); - contSummaryAfter = ContainerTraits::getContainerSummary(cont); - // We skip the container trimming as we do not have the contId. - // trimming might release the lock - if (contLock.isLocked()) contLock.release(); - timingList.insertAndReset("containerUnlockTime", t); - // All jobs are validated - ret.summary += candidateElements.summary; - ret.elements.insertBack(std::move(candidateElements.elements)); - timingList.insertAndReset("structureProcessingTime", t); - } else { - // For the failed files, we have to differentiate the not owned or not existing ones from other error cases. - // For the not owned, not existing and those successfully switched, we have to de reference them form the container. - // For other cases, we will leave the elements referenced in the container, as we cannot ensure de-referencing is safe. - std::set::ElementAddress> elementsNotToDereferenceFromContainer; - std::set::ElementAddress> elementsNotToReport; - std::list::ElementAddress> elementsToDereferenceFromAgent; - for (auto &e: failedOwnershipSwitchElements) { - try { - std::rethrow_exception(e.failure); - } catch (cta::exception::NoSuchObject &) { - elementsToDereferenceFromAgent.push_back(ContainerTraits::getElementAddress(*e.element)); - elementsNotToReport.insert(ContainerTraits::getElementAddress(*e.element)); - } catch (Backend::WrongPreviousOwner &) { - elementsToDereferenceFromAgent.push_back(ContainerTraits::getElementAddress(*e.element)); - elementsNotToReport.insert(ContainerTraits::getElementAddress(*e.element)); - } catch (Backend::CouldNotUnlock&) { - // Do nothing, this element was indeed OK. + if (failedOwnershipSwitchElements.empty()) { + timingList.insertAndReset("updateResultProcessingTime", t); + // This is the easy case (and most common case). Everything went through fine. + ContainerTraits::removeReferencesAndCommit(cont, candidateElementsAddresses, lc); + timingList.insertAndReset("containerUpdateTime", t); + contSummaryAfter = ContainerTraits::getContainerSummary(cont); + // We skip the container trimming as we do not have the contId. + // trimming might release the lock + if (contLock.isLocked()) contLock.release(); + timingList.insertAndReset("containerUnlockTime", t); + // All jobs are validated + ret.summary += candidateElements.summary; + ret.elements.insertBack(std::move(candidateElements.elements)); + timingList.insertAndReset("structureProcessingTime", t); + } else { + // For the failed files, we have to differentiate the not owned or not existing ones from other error cases. + // For the not owned, not existing and those successfully switched, we have to de reference them form the container. + // For other cases, we will leave the elements referenced in the container, as we cannot ensure de-referencing is safe. + std::set::ElementAddress> elementsNotToDereferenceFromContainer; + std::set::ElementAddress> elementsNotToReport; + std::list::ElementAddress> elementsToDereferenceFromAgent; + for (auto &e: failedOwnershipSwitchElements) { + try { + std::rethrow_exception(e.failure); + } catch (cta::exception::NoSuchObject &) { + elementsToDereferenceFromAgent.push_back(ContainerTraits::getElementAddress(*e.element)); + elementsNotToReport.insert(ContainerTraits::getElementAddress(*e.element)); + } catch (Backend::WrongPreviousOwner &) { + elementsToDereferenceFromAgent.push_back(ContainerTraits::getElementAddress(*e.element)); + elementsNotToReport.insert(ContainerTraits::getElementAddress(*e.element)); + } catch (Backend::CouldNotUnlock &) { + // Do nothing, this element was indeed OK. + } + catch (...) { + // This is a different error, so we will leave the reference to the element in the container + elementsNotToDereferenceFromContainer.insert(ContainerTraits::getElementAddress(*e.element)); + elementsToDereferenceFromAgent.push_back(ContainerTraits::getElementAddress(*e.element)); + elementsNotToReport.insert(ContainerTraits::getElementAddress(*e.element)); + } } - catch (...) { - // This is a different error, so we will leave the reference to the element in the container - elementsNotToDereferenceFromContainer.insert(ContainerTraits::getElementAddress(*e.element)); - elementsToDereferenceFromAgent.push_back(ContainerTraits::getElementAddress(*e.element)); - elementsNotToReport.insert(ContainerTraits::getElementAddress(*e.element)); + // We are done with the sorting. Apply the decisions... + std::list::ElementAddress> elementsToDereferenceFromContainer; + for (auto &e: candidateElements.elements) { + if (!elementsNotToDereferenceFromContainer.count(ContainerTraits::getElementAddress(e))) { + elementsToDereferenceFromContainer.push_back(ContainerTraits::getElementAddress(e)); + } } - } - // We are done with the sorting. Apply the decisions... - std::list::ElementAddress> elementsToDereferenceFromContainer; - for (auto & e: candidateElements.elements) { - if (!elementsNotToDereferenceFromContainer.count(ContainerTraits::getElementAddress(e))) { - elementsToDereferenceFromContainer.push_back(ContainerTraits::getElementAddress(e)); - } - } - timingList.insertAndReset("updateResultProcessingTime", t); - ContainerTraits::removeReferencesAndCommit(cont, elementsToDereferenceFromContainer, lc); - timingList.insertAndReset("containerUpdateTime", t); - contSummaryAfter = ContainerTraits::getContainerSummary(cont); - if (contLock.isLocked()) contLock.release(); - timingList.insertAndReset("containerUnlockTime", t); - m_agentReference.removeBatchFromOwnership(elementsToDereferenceFromAgent, m_backend); - for (auto & e: candidateElements.elements) { - if (!elementsNotToReport.count(ContainerTraits::getElementAddress(e))) { - ret.summary += ContainerTraits::getElementSummary(e); - ret.elements.insertBack(std::move(e)); + timingList.insertAndReset("updateResultProcessingTime", t); + ContainerTraits::removeReferencesAndCommit(cont, elementsToDereferenceFromContainer, lc); + timingList.insertAndReset("containerUpdateTime", t); + contSummaryAfter = ContainerTraits::getContainerSummary(cont); + if (contLock.isLocked()) contLock.release(); + timingList.insertAndReset("containerUnlockTime", t); + m_agentReference.removeBatchFromOwnership(elementsToDereferenceFromAgent, m_backend); + for (auto &e: candidateElements.elements) { + if (!elementsNotToReport.count(ContainerTraits::getElementAddress(e))) { + ret.summary += ContainerTraits::getElementSummary(e); + ret.elements.insertBack(std::move(e)); + } } + timingList.insertAndReset("structureProcessingTime", t); } - timingList.insertAndReset("structureProcessingTime", t); } } { @@ -363,85 +365,87 @@ public: localTimingList.insertAndReset("ownershipAdditionTime", t); m_agentReference.addBatchToOwnership(candidateElementsAddresses, m_backend); // We can now attempt to switch ownership of elements - if (auto failedOwnershipSwitchElements = ContainerTraits::switchElementsOwnership(candidateElements, - m_agentReference.getAgentAddress(), - cont.getAddressIfSet(), - localTimingList, t, lc); - failedOwnershipSwitchElements.empty()) { - localTimingList.insertAndReset("updateResultProcessingTime", t); - // This is the easy case (and most common case). Everything went through fine. - ContainerTraits::removeReferencesAndCommit(cont, candidateElementsAddresses, lc); - localTimingList.insertAndReset("containerUpdateTime", t); - contSummaryAfter = ContainerTraits::getContainerSummary(cont); - // If we emptied the container, we have to trim it. - didTrim = ContainerTraits::trimContainerIfNeeded(cont, contLock, contId, lc); - localTimingList.insertAndReset("containerTrimmingTime", t); - // trimming might release the lock - if (contLock.isLocked()) contLock.release(); - localTimingList.insertAndReset("containerUnlockTime", t); - // All jobs are validated - ret.summary += candidateElements.summary; - unfulfilledCriteria -= candidateElements.summary; - ret.elements.insertBack(std::move(candidateElements.elements)); - localTimingList.insertAndReset("structureProcessingTime", t); - } else { - // For the failed files, we have to differentiate the not owned or not existing ones from other error cases. - // For the not owned, not existing and those successfully switched, we have to de reference them form the container. - // For other cases, we will leave the elements referenced in the container, as we cannot ensure de-referencing is safe. - std::set::ElementAddress> elementsNotToDereferenceFromContainer; - std::set::ElementAddress> elementsNotToReport; - std::list::ElementAddress> elementsToDereferenceFromAgent; - for (auto &e: failedOwnershipSwitchElements) { - try { - std::rethrow_exception(e.failure); - } catch (cta::exception::NoSuchObject &) { - elementsToDereferenceFromAgent.push_back(ContainerTraits::getElementAddress(*e.element)); - elementsNotToReport.insert(ContainerTraits::getElementAddress(*e.element)); - } catch (Backend::WrongPreviousOwner &) { - elementsToDereferenceFromAgent.push_back(ContainerTraits::getElementAddress(*e.element)); - elementsNotToReport.insert(ContainerTraits::getElementAddress(*e.element)); - } catch (Backend::CouldNotUnlock&) { - // Do nothing, this element was indeed OK. + { + auto failedOwnershipSwitchElements = ContainerTraits::switchElementsOwnership(candidateElements, + m_agentReference.getAgentAddress(), + cont.getAddressIfSet(), + localTimingList, t, lc); + if (failedOwnershipSwitchElements.empty()) { + localTimingList.insertAndReset("updateResultProcessingTime", t); + // This is the easy case (and most common case). Everything went through fine. + ContainerTraits::removeReferencesAndCommit(cont, candidateElementsAddresses, lc); + localTimingList.insertAndReset("containerUpdateTime", t); + contSummaryAfter = ContainerTraits::getContainerSummary(cont); + // If we emptied the container, we have to trim it. + didTrim = ContainerTraits::trimContainerIfNeeded(cont, contLock, contId, lc); + localTimingList.insertAndReset("containerTrimmingTime", t); + // trimming might release the lock + if (contLock.isLocked()) contLock.release(); + localTimingList.insertAndReset("containerUnlockTime", t); + // All jobs are validated + ret.summary += candidateElements.summary; + unfulfilledCriteria -= candidateElements.summary; + ret.elements.insertBack(std::move(candidateElements.elements)); + localTimingList.insertAndReset("structureProcessingTime", t); + } else { + // For the failed files, we have to differentiate the not owned or not existing ones from other error cases. + // For the not owned, not existing and those successfully switched, we have to de reference them form the container. + // For other cases, we will leave the elements referenced in the container, as we cannot ensure de-referencing is safe. + std::set::ElementAddress> elementsNotToDereferenceFromContainer; + std::set::ElementAddress> elementsNotToReport; + std::list::ElementAddress> elementsToDereferenceFromAgent; + for (auto &e: failedOwnershipSwitchElements) { + try { + std::rethrow_exception(e.failure); + } catch (cta::exception::NoSuchObject &) { + elementsToDereferenceFromAgent.push_back(ContainerTraits::getElementAddress(*e.element)); + elementsNotToReport.insert(ContainerTraits::getElementAddress(*e.element)); + } catch (Backend::WrongPreviousOwner &) { + elementsToDereferenceFromAgent.push_back(ContainerTraits::getElementAddress(*e.element)); + elementsNotToReport.insert(ContainerTraits::getElementAddress(*e.element)); + } catch (Backend::CouldNotUnlock &) { + // Do nothing, this element was indeed OK. + } + catch (...) { + // This is a different error, so we will leave the reference to the element in the container + elementsNotToDereferenceFromContainer.insert(ContainerTraits::getElementAddress(*e.element)); + elementsToDereferenceFromAgent.push_back(ContainerTraits::getElementAddress(*e.element)); + elementsNotToReport.insert(ContainerTraits::getElementAddress(*e.element)); + elementsToSkip.insert(ContainerTraits::getElementAddress(*e.element)); + // If we get this kind of situation, we do not try to carry on, as it becomes too complex. + unexpectedException = true; + } } - catch (...) { - // This is a different error, so we will leave the reference to the element in the container - elementsNotToDereferenceFromContainer.insert(ContainerTraits::getElementAddress(*e.element)); - elementsToDereferenceFromAgent.push_back(ContainerTraits::getElementAddress(*e.element)); - elementsNotToReport.insert(ContainerTraits::getElementAddress(*e.element)); - elementsToSkip.insert(ContainerTraits::getElementAddress(*e.element)); - // If we get this kind of situation, we do not try to carry on, as it becomes too complex. - unexpectedException = true; + // We are done with the sorting. Apply the decisions... + std::list::ElementAddress> elementsToDereferenceFromContainer; + for (auto &e: candidateElements.elements) { + if (!elementsNotToDereferenceFromContainer.count(ContainerTraits::getElementAddress(e))) { + elementsToDereferenceFromContainer.push_back(ContainerTraits::getElementAddress(e)); + } } - } - // We are done with the sorting. Apply the decisions... - std::list::ElementAddress> elementsToDereferenceFromContainer; - for (auto & e: candidateElements.elements) { - if (!elementsNotToDereferenceFromContainer.count(ContainerTraits::getElementAddress(e))) { - elementsToDereferenceFromContainer.push_back(ContainerTraits::getElementAddress(e)); - } - } - localTimingList.insertAndReset("updateResultProcessingTime", t); - ContainerTraits::removeReferencesAndCommit(cont, elementsToDereferenceFromContainer, lc); - localTimingList.insertAndReset("containerUpdateTime", t); - contSummaryAfter = ContainerTraits::getContainerSummary(cont); - // If we emptied the container, we have to trim it. - ContainerTraits::trimContainerIfNeeded(cont, contLock, contId, lc); - localTimingList.insertAndReset("containerTrimmingTime", t); - // trimming might release the lock - if (contLock.isLocked()) contLock.release(); - localTimingList.insertAndReset("containerUnlockTime", t); - m_agentReference.removeBatchFromOwnership(elementsToDereferenceFromAgent, m_backend); - typename ContainerTraits::PoppedElementsSummary batchSummary = candidateElements.summary; - for (auto & e: candidateElements.elements) { - if (!elementsNotToReport.count(ContainerTraits::getElementAddress(e))) { - ret.elements.insertBack(std::move(e)); - } else { - batchSummary -= ContainerTraits::getElementSummary(e); + localTimingList.insertAndReset("updateResultProcessingTime", t); + ContainerTraits::removeReferencesAndCommit(cont, elementsToDereferenceFromContainer, lc); + localTimingList.insertAndReset("containerUpdateTime", t); + contSummaryAfter = ContainerTraits::getContainerSummary(cont); + // If we emptied the container, we have to trim it. + ContainerTraits::trimContainerIfNeeded(cont, contLock, contId, lc); + localTimingList.insertAndReset("containerTrimmingTime", t); + // trimming might release the lock + if (contLock.isLocked()) contLock.release(); + localTimingList.insertAndReset("containerUnlockTime", t); + m_agentReference.removeBatchFromOwnership(elementsToDereferenceFromAgent, m_backend); + typename ContainerTraits::PoppedElementsSummary batchSummary = candidateElements.summary; + for (auto &e: candidateElements.elements) { + if (!elementsNotToReport.count(ContainerTraits::getElementAddress(e))) { + ret.elements.insertBack(std::move(e)); + } else { + batchSummary -= ContainerTraits::getElementSummary(e); + } } + ret.summary += batchSummary; + unfulfilledCriteria -= batchSummary; + localTimingList.insertAndReset("structureProcessingTime", t); } - ret.summary += batchSummary; - unfulfilledCriteria -= batchSummary; - localTimingList.insertAndReset("structureProcessingTime", t); } log::ScopedParamContainer params(lc); params.add("C", ContainerTraits::c_containerTypeName) diff --git a/tapeserver/castor/tape/tapeserver/daemon/DataTransferSession.cpp b/tapeserver/castor/tape/tapeserver/daemon/DataTransferSession.cpp index be92665521..67549fafd5 100644 --- a/tapeserver/castor/tape/tapeserver/daemon/DataTransferSession.cpp +++ b/tapeserver/castor/tape/tapeserver/daemon/DataTransferSession.cpp @@ -145,10 +145,7 @@ castor::tape::tapeserver::daemon::DataTransferSession::execute() { lc.log(cta::log::INFO, "Transition from down to up detected. Will check if a tape is in the drive."); if (!emptyDriveProbe.driveIsEmpty()) { std::string errorMsg = "A tape was detected in the drive. Putting the drive down."; - if (std::optional probeErrorMsg = emptyDriveProbe.getProbeErrorMsg(); - probeErrorMsg) { - errorMsg = probeErrorMsg.value(); - } + errorMsg += emptyDriveProbe.getProbeErrorMsg().value_or(""); putDriveDown(errorMsg, nullptr, lc); continue; } else { diff --git a/tapeserver/daemon/ProcessManager.cpp b/tapeserver/daemon/ProcessManager.cpp index 2a69a2c7f2..67017e0959 100644 --- a/tapeserver/daemon/ProcessManager.cpp +++ b/tapeserver/daemon/ProcessManager.cpp @@ -103,22 +103,25 @@ cta::log::LogContext& ProcessManager::logContext() { ProcessManager::RunPartStatus ProcessManager::runShutdownManagement() { // Check the current statuses for shutdown requests // If any process requests a shutdown, we will trigger it in all. - if (bool anyAskedShutdown = - std::count_if(m_subprocessHandlers.cbegin(), m_subprocessHandlers.cend(), [this](const SubprocessAndStatus& i) { - if(i.status.shutdownRequested) { - cta::log::ScopedParamContainer params(m_logContext); - params.add("SubprocessName", i.handler->index); - m_logContext.log(log::INFO, "Subprocess requested shutdown"); - } - return i.status.shutdownRequested; - }); - anyAskedShutdown) { - for(auto & sp: m_subprocessHandlers) { - sp.status = sp.handler->shutdown(); - cta::log::ScopedParamContainer params(m_logContext); - params.add("SubprocessName", sp.handler->index) - .add("ShutdownComplete", sp.status.shutdownComplete); - m_logContext.log(log::INFO, "Signaled shutdown to subprocess handler"); + { + bool anyAskedShutdown = + std::count_if(m_subprocessHandlers.cbegin(), m_subprocessHandlers.cend(), + [this](const SubprocessAndStatus &i) { + if (i.status.shutdownRequested) { + cta::log::ScopedParamContainer params(m_logContext); + params.add("SubprocessName", i.handler->index); + m_logContext.log(log::INFO, "Subprocess requested shutdown"); + } + return i.status.shutdownRequested; + }); + if (anyAskedShutdown) { + for (auto &sp: m_subprocessHandlers) { + sp.status = sp.handler->shutdown(); + cta::log::ScopedParamContainer params(m_logContext); + params.add("SubprocessName", sp.handler->index) + .add("ShutdownComplete", sp.status.shutdownComplete); + m_logContext.log(log::INFO, "Signaled shutdown to subprocess handler"); + } } } // If all processes completed their shutdown, we can exit @@ -136,26 +139,29 @@ ProcessManager::RunPartStatus ProcessManager::runShutdownManagement() { ProcessManager::RunPartStatus ProcessManager::runKillManagement() { // If any process asks for a kill, we kill all sub processes and exit - if (bool anyAskedKill = - std::count_if(m_subprocessHandlers.cbegin(), m_subprocessHandlers.cend(), [this](const SubprocessAndStatus& i) { - if(i.status.killRequested) { - cta::log::ScopedParamContainer params(m_logContext); - params.add("SubprocessName", i.handler->index); - m_logContext.log(log::INFO, "Subprocess requested kill"); - } - return i.status.killRequested; - }); - anyAskedKill) { - for (auto& sp : m_subprocessHandlers) { - sp.handler->kill(); - cta::log::ScopedParamContainer params(m_logContext); - params.add("SubprocessName", sp.handler->index); - m_logContext.log(log::INFO, "Instructed handler to kill subprocess"); + { + bool anyAskedKill = + std::count_if(m_subprocessHandlers.cbegin(), m_subprocessHandlers.cend(), + [this](const SubprocessAndStatus &i) { + if (i.status.killRequested) { + cta::log::ScopedParamContainer params(m_logContext); + params.add("SubprocessName", i.handler->index); + m_logContext.log(log::INFO, "Subprocess requested kill"); + } + return i.status.killRequested; + }); + if (anyAskedKill) { + for (auto &sp: m_subprocessHandlers) { + sp.handler->kill(); + cta::log::ScopedParamContainer params(m_logContext); + params.add("SubprocessName", sp.handler->index); + m_logContext.log(log::INFO, "Instructed handler to kill subprocess"); + } + RunPartStatus ret; + ret.doExit = true; + ret.exitCode = EXIT_SUCCESS; + return ret; } - RunPartStatus ret; - ret.doExit = true; - ret.exitCode = EXIT_SUCCESS; - return ret; } return RunPartStatus(); } @@ -206,21 +212,24 @@ ProcessManager::RunPartStatus ProcessManager::runForkManagement() { ProcessManager::RunPartStatus ProcessManager::runSigChildManagement() { // If any process handler received sigChild, we signal it to all processes. Typically, this is // done by the signal handler - if (bool sigChild = - std::count_if(m_subprocessHandlers.cbegin(), m_subprocessHandlers.cend(), [this](const SubprocessAndStatus& i) { - if(i.status.sigChild) { - cta::log::ScopedParamContainer params(m_logContext); - params.add("SubprocessName", i.handler->index); - m_logContext.log(log::INFO, "Handler received SIGCHILD. Propagating to all handlers."); - } - return i.status.sigChild; - }); - sigChild) { - for(auto & sp: m_subprocessHandlers) { - sp.status = sp.handler->processSigChild(); - cta::log::ScopedParamContainer params(m_logContext); - params.add("SubprocessName", sp.handler->index); - m_logContext.log(log::INFO, "Propagated SIGCHILD."); + { + bool sigChild = + std::count_if(m_subprocessHandlers.cbegin(), m_subprocessHandlers.cend(), + [this](const SubprocessAndStatus &i) { + if (i.status.sigChild) { + cta::log::ScopedParamContainer params(m_logContext); + params.add("SubprocessName", i.handler->index); + m_logContext.log(log::INFO, "Handler received SIGCHILD. Propagating to all handlers."); + } + return i.status.sigChild; + }); + if (sigChild) { + for (auto &sp: m_subprocessHandlers) { + sp.status = sp.handler->processSigChild(); + cta::log::ScopedParamContainer params(m_logContext); + params.add("SubprocessName", sp.handler->index); + m_logContext.log(log::INFO, "Propagated SIGCHILD."); + } } } // If all processes completed their shutdown, we can exit -- GitLab From 5bd337c268cada6cc7fdca91850e2d2807572be2 Mon Sep 17 00:00:00 2001 From: guenther Date: Sat, 29 Nov 2025 00:08:08 +0100 Subject: [PATCH 12/12] fix bugs in init statements --- tapeserver/castor/tape/tapeserver/file/HeaderChecker.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/tapeserver/castor/tape/tapeserver/file/HeaderChecker.cpp b/tapeserver/castor/tape/tapeserver/file/HeaderChecker.cpp index 34e9d74a7e..c59a6241b2 100644 --- a/tapeserver/castor/tape/tapeserver/file/HeaderChecker.cpp +++ b/tapeserver/castor/tape/tapeserver/file/HeaderChecker.cpp @@ -140,7 +140,6 @@ std::string HeaderChecker::checkVolumeLabel(tapeserver::drive::DriveInterface &d size_t blockSize = 256 * 1024; auto data = std::make_unique(blockSize + 1); - size_t bytes_read = drive.readBlock(data.get(), blockSize); if (size_t bytes_read = drive.readBlock(data.get(), blockSize); bytes_read < sizeof(vol1)) { throw cta::exception::Exception("Too few bytes read from label"); } -- GitLab