From a6f5d06abb0b49d1fdea7b667523380922298ab9 Mon Sep 17 00:00:00 2001 From: pecornilleau Date: Fri, 29 Sep 2023 02:05:45 +0200 Subject: [PATCH 01/10] EVM: Bench: cleanup unecessary tpg calculations --- .../benchmarks/scripts/analysis/analysis.js | 58 +++++++++---------- 1 file changed, 27 insertions(+), 31 deletions(-) diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js index 1f43ac726a58..b3a3cd4086d3 100644 --- a/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js @@ -2,14 +2,12 @@ // // SPDX-License-Identifier: MIT -const { ChartJSNodeCanvas } = require('chartjs-node-canvas'); const { is_transfer, is_create, is_transaction, BASE_GAS } = require('./utils') // const { ChartConfiguration } = require('chart') const fs = require('fs'); const number_formatter_compact = Intl.NumberFormat('en', { notation: 'compact', compactDisplay: 'long' }); const number_formatter = Intl.NumberFormat('en', {}); -const RUN_TRANSACTION_OVERHEAD = 560_000 module.exports = { init_analysis, check_result, process_record } @@ -18,15 +16,13 @@ function init_analysis() { // total amount of gas consumed total_gas: 0, // total amount of ticks used in run_transaction_ticks - total_ticks_tx: 0, - tick_per_gas: [], - run_transaction_overhead: [], + sputnik_ticks: 0, + pure_transfers_ticks: [], init: 0, decode: 0, signatures: [], nb_kernel_run: 0, nb_call: 0, - nb_create: 0, nb_transfer: 0, kernel_runs: [] @@ -35,22 +31,23 @@ function init_analysis() { } function print_analysis(infos) { - const tickPerGas = infos.total_ticks_tx / infos.total_gas console.info(`-------------------------------------------------------`) console.info(`Kernels infos`) - console.info(`Overall tick per gas: ~${tickPerGas.toFixed()}`) - console.info(`Tick per gas: ${pp_avg_max(infos.tick_per_gas)}`) - console.info(`Signature verification: ${pp_avg_max(infos.signatures)}`) + console.info(`----------------------------------`) console.info(`Decoding: ${pp(infos.decode)} ticks`) console.info(`Initialisation: ${pp(infos.init)} ticks`) - console.info(`transfer overhead: ${pp_avg_max(infos.run_transaction_overhead)} `) + console.info(`Signature verification: ${pp_avg_max(infos.signatures)}`) + console.info(`Transfer tick cost: ${pp_avg_max(infos.pure_transfers_ticks)} `) console.info(`-------------------------------------------------------`) - console.info(`Benchmark run infos`) + console.info(`Benchmark run stats`) + console.info(`----------------------------------`) + console.info(`Total gas in execution: ${pp(infos.total_gas)}`) + console.info(`Total ticks in sputnik: ${pp(infos.sputnik_ticks)}`) console.info(`Number of tx: ${infos.signatures.length}`) console.info(`Number of kernel run: ${infos.nb_kernel_run}`) console.info(`Number of transfers: ${infos.nb_transfer}`) - console.info(`Number of create: ${infos.nb_create}`) - console.info(`Number of call: ${infos.nb_call}`) + console.info(`Number of create/call: ${infos.nb_call}`) + console.info(`Number of kernel run: ${infos.nb_kernel_run}`) console.info(`-------------------------------------------------------`) } @@ -61,45 +58,44 @@ function process_record(record, acc) { } function process_bench_record(record, acc) { - if (!isNaN(record.interpreter_decode_ticks)) acc.init = record.interpreter_decode_ticks - if (!isNaN(record.interpreter_init_ticks)) acc.decode = record.interpreter_init_ticks - if (!isNaN(record.interpreter_decode_ticks)) acc.nb_kernel_run += 1 + if (!isNaN(record.interpreter_decode_ticks)) { + acc.nb_kernel_run += 1 + acc.decode = Math.max(acc.decode, record.interpreter_decode_ticks) + acc.init = Math.max(acc.init, record.interpreter_init_ticks) + } if (!isNaN(record.kernel_run_ticks)) acc.kernel_runs.push(record.kernel_run_ticks) } function process_transaction_record(record, acc) { acc.signatures.push(record.signature_verification_ticks) if (is_transfer(record)) process_transfer(record, acc) - else if (is_create(record)) process_create(record, acc) - else process_call(record, acc) + else process_execution(record, acc) } function process_transfer(record, acc) { - acc.run_transaction_overhead.push(record.run_transaction_ticks) + acc.pure_transfers_ticks.push(record.run_transaction_ticks) acc.nb_transfer++ } -function process_create(record, acc) { - acc.nb_create++ - -} -function process_call(record, acc) { +function process_execution(record, acc) { acc.nb_call++ let gas = record.gas_cost - BASE_GAS - let ticks = record.run_transaction_ticks - RUN_TRANSACTION_OVERHEAD - acc.total_gas += gas - acc.total_ticks_tx += ticks - acc.tick_per_gas.push(ticks / gas) + if (!isNaN(record.gas_cost)) acc.total_gas += gas + if (!isNaN(record.sputnik_runtime_ticks)) acc.sputnik_ticks += record.sputnik_runtime_ticks } function check_result(infos) { - const tickPerGas = infos.total_ticks_tx / infos.total_gas + const tickPerGas = infos.sputnik_ticks / infos.total_gas print_analysis(infos) const is_error = tickPerGas > 2000 if (is_error) { - console.error(`Tick per gas too high!`) + console.info(`-------------------------------------------------------`) + console.error(`WARNING: tpg too high (${tickPerGas})`) + console.info(`-------------------------------------------------------`) return 1 + } else { + console.log(`Global tpg: ${tickPerGas}`) } return 0 } -- GitLab From 8c5b7d97460362e5391fd63f49994881ea086416 Mon Sep 17 00:00:00 2001 From: pecornilleau Date: Thu, 2 Nov 2023 11:20:28 +0100 Subject: [PATCH 02/10] EVM: Bench: filter data --- etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js index b3a3cd4086d3..6d9b4d70b1a6 100644 --- a/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js @@ -36,8 +36,8 @@ function print_analysis(infos) { console.info(`----------------------------------`) console.info(`Decoding: ${pp(infos.decode)} ticks`) console.info(`Initialisation: ${pp(infos.init)} ticks`) - console.info(`Signature verification: ${pp_avg_max(infos.signatures)}`) - console.info(`Transfer tick cost: ${pp_avg_max(infos.pure_transfers_ticks)} `) + console.info(`Signature verification: ${pp_avg_max(infos.signatures.filter((x) => !!x))}`) + console.info(`Transfer tick cost: ${pp_avg_max(infos.pure_transfers_ticks.filter((x) => !!x))} `) console.info(`-------------------------------------------------------`) console.info(`Benchmark run stats`) console.info(`----------------------------------`) -- GitLab From a0cc8ae73cc74242c839cd3fe79fe0ad656e0172 Mon Sep 17 00:00:00 2001 From: pecornilleau Date: Tue, 7 Nov 2023 18:04:10 +0100 Subject: [PATCH 03/10] EVM: Bench: linear regression utils --- .../benchmarks/scripts/analysis/utils.js | 26 ++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/utils.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/utils.js index 288bfb3d58c4..41935c11f163 100644 --- a/etherlink/kernel_evm/benchmarks/scripts/analysis/utils.js +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/utils.js @@ -5,8 +5,9 @@ const BASE_GAS = 21000 const CREATE_STORAGE_CUTOFF = 600_000 +const MLR = require("ml-regression-multivariate-linear") -module.exports = { is_transfer, is_create, is_transaction, BASE_GAS } +module.exports = { is_transfer, is_create, is_transaction, BASE_GAS, make_lr, print_lr } function is_transfer(record) { return record.gas_cost == BASE_GAS @@ -17,4 +18,27 @@ function is_create(record) { function is_transaction(record) { return !record.benchmark_name.includes("(all)") +} + +function make_lr(data, select_x, select_y) { + + var X = [] + var Y = [] + for (datum of data) { + let x = select_x(datum) + let y = select_y(datum) + if (!!x && !!y) { + X.push([x]) + Y.push([y]) + } + } + if (X.length > 0) { + let mlr = new MLR(X, Y) + return mlr + } +} + +function print_lr(lr, var_name = "size") { + if (!!lr) return `Y = ${lr.weights[1][0].toFixed()} + ${lr.weights[0][0].toFixed()} * ${var_name}` + else return "no linear regression available" } \ No newline at end of file -- GitLab From 81e4be0d5a997a5fa312d613092b06cf1b8aabfc Mon Sep 17 00:00:00 2001 From: pecornilleau Date: Thu, 2 Nov 2023 16:58:46 +0100 Subject: [PATCH 04/10] EVM: Bench: analyze fetch data --- .../kernel_evm/benchmarks/package-lock.json | 97 +++++++++++++++++++ etherlink/kernel_evm/benchmarks/package.json | 3 +- .../benchmarks/scripts/analysis/analysis.js | 26 +++-- .../benchmarks/scripts/analysis/fetch.js | 25 +++++ .../benchmarks/scripts/analysis/utils.js | 14 ++- 5 files changed, 157 insertions(+), 8 deletions(-) create mode 100644 etherlink/kernel_evm/benchmarks/scripts/analysis/fetch.js diff --git a/etherlink/kernel_evm/benchmarks/package-lock.json b/etherlink/kernel_evm/benchmarks/package-lock.json index 193fdacdb279..d905dfb4a51c 100644 --- a/etherlink/kernel_evm/benchmarks/package-lock.json +++ b/etherlink/kernel_evm/benchmarks/package-lock.json @@ -17,6 +17,7 @@ "ethereumjs-wallet": "^1.0.2", "ethers": "^6.7.1", "keccak": "^3.0.3", + "ml-regression-multivariate-linear": "^2.0.4", "rlp": "^2.2.7", "solc": "^0.8.21" }, @@ -705,6 +706,11 @@ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, + "node_modules/is-any-array": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-any-array/-/is-any-array-2.0.1.tgz", + "integrity": "sha512-UtilS7hLRu++wb/WBAw9bNuP1Eg04Ivn1vERJck8zJthEvXCBEBpGR/33u/xLKWEQf95803oalHrVDptcAvFdQ==" + }, "node_modules/is-fullwidth-code-point": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", @@ -867,6 +873,49 @@ "node": ">=10" } }, + "node_modules/ml-array-max": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/ml-array-max/-/ml-array-max-1.2.4.tgz", + "integrity": "sha512-BlEeg80jI0tW6WaPyGxf5Sa4sqvcyY6lbSn5Vcv44lp1I2GR6AWojfUvLnGTNsIXrZ8uqWmo8VcG1WpkI2ONMQ==", + "dependencies": { + "is-any-array": "^2.0.0" + } + }, + "node_modules/ml-array-min": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/ml-array-min/-/ml-array-min-1.2.3.tgz", + "integrity": "sha512-VcZ5f3VZ1iihtrGvgfh/q0XlMobG6GQ8FsNyQXD3T+IlstDv85g8kfV0xUG1QPRO/t21aukaJowDzMTc7j5V6Q==", + "dependencies": { + "is-any-array": "^2.0.0" + } + }, + "node_modules/ml-array-rescale": { + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/ml-array-rescale/-/ml-array-rescale-1.3.7.tgz", + "integrity": "sha512-48NGChTouvEo9KBctDfHC3udWnQKNKEWN0ziELvY3KG25GR5cA8K8wNVzracsqSW1QEkAXjTNx+ycgAv06/1mQ==", + "dependencies": { + "is-any-array": "^2.0.0", + "ml-array-max": "^1.2.4", + "ml-array-min": "^1.2.3" + } + }, + "node_modules/ml-matrix": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/ml-matrix/-/ml-matrix-6.11.0.tgz", + "integrity": "sha512-7jr9NmFRkaUxbKslfRu3aZOjJd2LkSitCGv+QH9PF0eJoEG7jIpjXra1Vw8/kgao8+kHCSsJONG6vfWmXQ+/Eg==", + "dependencies": { + "is-any-array": "^2.0.1", + "ml-array-rescale": "^1.3.7" + } + }, + "node_modules/ml-regression-multivariate-linear": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/ml-regression-multivariate-linear/-/ml-regression-multivariate-linear-2.0.4.tgz", + "integrity": "sha512-/vShPAlP+mB7P2mC5TuXwObSJNl/UBI71/bszt9ilTg6yLKy6btDLpAYyJNa6t+JnL5a7q+Yy4dCltfpvqXRIw==", + "dependencies": { + "ml-matrix": "^6.10.1" + } + }, "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", @@ -1910,6 +1959,11 @@ "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" }, + "is-any-array": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/is-any-array/-/is-any-array-2.0.1.tgz", + "integrity": "sha512-UtilS7hLRu++wb/WBAw9bNuP1Eg04Ivn1vERJck8zJthEvXCBEBpGR/33u/xLKWEQf95803oalHrVDptcAvFdQ==" + }, "is-fullwidth-code-point": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", @@ -2030,6 +2084,49 @@ "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", "optional": true }, + "ml-array-max": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/ml-array-max/-/ml-array-max-1.2.4.tgz", + "integrity": "sha512-BlEeg80jI0tW6WaPyGxf5Sa4sqvcyY6lbSn5Vcv44lp1I2GR6AWojfUvLnGTNsIXrZ8uqWmo8VcG1WpkI2ONMQ==", + "requires": { + "is-any-array": "^2.0.0" + } + }, + "ml-array-min": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/ml-array-min/-/ml-array-min-1.2.3.tgz", + "integrity": "sha512-VcZ5f3VZ1iihtrGvgfh/q0XlMobG6GQ8FsNyQXD3T+IlstDv85g8kfV0xUG1QPRO/t21aukaJowDzMTc7j5V6Q==", + "requires": { + "is-any-array": "^2.0.0" + } + }, + "ml-array-rescale": { + "version": "1.3.7", + "resolved": "https://registry.npmjs.org/ml-array-rescale/-/ml-array-rescale-1.3.7.tgz", + "integrity": "sha512-48NGChTouvEo9KBctDfHC3udWnQKNKEWN0ziELvY3KG25GR5cA8K8wNVzracsqSW1QEkAXjTNx+ycgAv06/1mQ==", + "requires": { + "is-any-array": "^2.0.0", + "ml-array-max": "^1.2.4", + "ml-array-min": "^1.2.3" + } + }, + "ml-matrix": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/ml-matrix/-/ml-matrix-6.11.0.tgz", + "integrity": "sha512-7jr9NmFRkaUxbKslfRu3aZOjJd2LkSitCGv+QH9PF0eJoEG7jIpjXra1Vw8/kgao8+kHCSsJONG6vfWmXQ+/Eg==", + "requires": { + "is-any-array": "^2.0.1", + "ml-array-rescale": "^1.3.7" + } + }, + "ml-regression-multivariate-linear": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/ml-regression-multivariate-linear/-/ml-regression-multivariate-linear-2.0.4.tgz", + "integrity": "sha512-/vShPAlP+mB7P2mC5TuXwObSJNl/UBI71/bszt9ilTg6yLKy6btDLpAYyJNa6t+JnL5a7q+Yy4dCltfpvqXRIw==", + "requires": { + "ml-matrix": "^6.10.1" + } + }, "ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", diff --git a/etherlink/kernel_evm/benchmarks/package.json b/etherlink/kernel_evm/benchmarks/package.json index f9d4f875021c..1d1b6e6c00a0 100644 --- a/etherlink/kernel_evm/benchmarks/package.json +++ b/etherlink/kernel_evm/benchmarks/package.json @@ -12,7 +12,8 @@ "solc": "^0.8.21", "csv-parse": "5.5.0", "csv-stringify": "^6.4.2", - "commander": "^11.0.0" + "commander": "^11.0.0", + "ml-regression-multivariate-linear": "^2.0.4" }, "optionalDependencies": { "chartjs-node-canvas": "^4.1.6", diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js index 6d9b4d70b1a6..4a9c1699fa7b 100644 --- a/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js @@ -4,7 +4,8 @@ const { is_transfer, is_create, is_transaction, BASE_GAS } = require('./utils') // const { ChartConfiguration } = require('chart') -const fs = require('fs'); +const fs = require('fs') +const fetch = require('./fetch') const number_formatter_compact = Intl.NumberFormat('en', { notation: 'compact', compactDisplay: 'long' }); const number_formatter = Intl.NumberFormat('en', {}); @@ -24,13 +25,18 @@ function init_analysis() { nb_kernel_run: 0, nb_call: 0, nb_transfer: 0, - kernel_runs: [] + kernel_runs: [], + fetch_data: [] }; return empty } function print_analysis(infos) { + console.info(`-------------------------------------------------------`) + console.info(`Fetch Analysis`) + console.info(`----------------------------------`) + let error_fetch = fetch.print_fetch_analysis(infos) console.info(`-------------------------------------------------------`) console.info(`Kernels infos`) console.info(`----------------------------------`) @@ -49,7 +55,7 @@ function print_analysis(infos) { console.info(`Number of create/call: ${infos.nb_call}`) console.info(`Number of kernel run: ${infos.nb_kernel_run}`) console.info(`-------------------------------------------------------`) - + return error_fetch } function process_record(record, acc) { @@ -64,6 +70,14 @@ function process_bench_record(record, acc) { acc.init = Math.max(acc.init, record.interpreter_init_ticks) } if (!isNaN(record.kernel_run_ticks)) acc.kernel_runs.push(record.kernel_run_ticks) + if (!isNaN(record.fetch_blueprint_ticks) && !isNaN(record.nb_tx)) { + acc.fetch_data.push({ + ticks: record.fetch_blueprint_ticks, + size: record.inbox_size, + nb_tx: record.nb_tx, + benchmark_name: record.benchmark_name + }) + } } function process_transaction_record(record, acc) { @@ -87,11 +101,11 @@ function process_execution(record, acc) { function check_result(infos) { const tickPerGas = infos.sputnik_ticks / infos.total_gas - print_analysis(infos) - const is_error = tickPerGas > 2000 + let nb_errors = print_analysis(infos) + const is_error = nb_errors > 0 if (is_error) { console.info(`-------------------------------------------------------`) - console.error(`WARNING: tpg too high (${tickPerGas})`) + console.error(`WARNING: too many model underestimation (${nb_errors})`) console.info(`-------------------------------------------------------`) return 1 } else { diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/fetch.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/fetch.js new file mode 100644 index 000000000000..37cee067d0e5 --- /dev/null +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/fetch.js @@ -0,0 +1,25 @@ +// SPDX-FileCopyrightText: 2023 Marigold +// +// SPDX-License-Identifier: MIT + +const path = require('node:path') +module.exports = { print_fetch_analysis } +const fs = require('fs'); +const csv = require('csv-stringify/sync') +const utils = require("./utils") +const OUTPUT = 'fetch_data.csv' +const UPPER_BOUND = 1_000_000_000 + +function print_fetch_analysis(infos, dir = "analysis_result") { + // prepare data + const csv_config = { + header: true, + columns: ["benchmark_name", "size", "nb_tx", "ticks"] + } + fs.mkdirSync(dir, { recursive: true }) + fs.writeFileSync(path.format({ dir, name: OUTPUT }), csv.stringify(infos.fetch_data, csv_config)) + + // compute errors + console.log(`current model: Y = ${UPPER_BOUND}`) + return utils.print_summary_errors(infos.fetch_data, datum => { return datum.ticks - UPPER_BOUND }) +} diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/utils.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/utils.js index 41935c11f163..5ebbf0305b9f 100644 --- a/etherlink/kernel_evm/benchmarks/scripts/analysis/utils.js +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/utils.js @@ -7,7 +7,7 @@ const CREATE_STORAGE_CUTOFF = 600_000 const MLR = require("ml-regression-multivariate-linear") -module.exports = { is_transfer, is_create, is_transaction, BASE_GAS, make_lr, print_lr } +module.exports = { is_transfer, is_create, is_transaction, BASE_GAS, make_lr, print_lr, print_summary_errors } function is_transfer(record) { return record.gas_cost == BASE_GAS @@ -41,4 +41,16 @@ function make_lr(data, select_x, select_y) { function print_lr(lr, var_name = "size") { if (!!lr) return `Y = ${lr.weights[1][0].toFixed()} + ${lr.weights[0][0].toFixed()} * ${var_name}` else return "no linear regression available" +} + +function print_summary_errors(data, compute_error) { + let max_error_current = 0; + let nb_error = 0 + for (datum of data) { + let error = compute_error(datum) + if (error > 0) nb_error += 1 + max_error_current = Math.max(max_error_current, error) + } + console.log(`nb of errors: ${nb_error} ; maximum error: ${max_error_current} ticks`) + return nb_error } \ No newline at end of file -- GitLab From 9bad6d1e9f3a4618c2cd65e2a0c01ba884d4bf7f Mon Sep 17 00:00:00 2001 From: pecornilleau Date: Thu, 2 Nov 2023 18:51:19 +0100 Subject: [PATCH 05/10] EVM: Bench: analyze block finalization data --- .../benchmarks/scripts/analysis/analysis.js | 29 +++++++++++++++++-- .../scripts/analysis/block_finalization.js | 23 +++++++++++++++ 2 files changed, 50 insertions(+), 2 deletions(-) create mode 100644 etherlink/kernel_evm/benchmarks/scripts/analysis/block_finalization.js diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js index 4a9c1699fa7b..b00afdf13e58 100644 --- a/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js @@ -6,6 +6,7 @@ const { is_transfer, is_create, is_transaction, BASE_GAS } = require('./utils') // const { ChartConfiguration } = require('chart') const fs = require('fs') const fetch = require('./fetch') +const block_finalization = require('./block_finalization') const number_formatter_compact = Intl.NumberFormat('en', { notation: 'compact', compactDisplay: 'long' }); const number_formatter = Intl.NumberFormat('en', {}); @@ -26,7 +27,8 @@ function init_analysis() { nb_call: 0, nb_transfer: 0, kernel_runs: [], - fetch_data: [] + fetch_data: [], + block_finalization: [] }; return empty @@ -38,6 +40,10 @@ function print_analysis(infos) { console.info(`----------------------------------`) let error_fetch = fetch.print_fetch_analysis(infos) console.info(`-------------------------------------------------------`) + console.info(`Block Finalization Analysis`) + console.info(`----------------------------------`) + let error_block_finalization = block_finalization.print_analysis(infos) + console.info(`-------------------------------------------------------`) console.info(`Kernels infos`) console.info(`----------------------------------`) console.info(`Decoding: ${pp(infos.decode)} ticks`) @@ -54,8 +60,10 @@ function print_analysis(infos) { console.info(`Number of transfers: ${infos.nb_transfer}`) console.info(`Number of create/call: ${infos.nb_call}`) console.info(`Number of kernel run: ${infos.nb_kernel_run}`) + console.info(`Number of blocks: ${infos.block_finalization.length}`) console.info(`-------------------------------------------------------`) - return error_fetch + return error_fetch + error_block_finalization + } function process_record(record, acc) { @@ -70,6 +78,8 @@ function process_bench_record(record, acc) { acc.init = Math.max(acc.init, record.interpreter_init_ticks) } if (!isNaN(record.kernel_run_ticks)) acc.kernel_runs.push(record.kernel_run_ticks) + + // Adds info needed for fetch analysis if (!isNaN(record.fetch_blueprint_ticks) && !isNaN(record.nb_tx)) { acc.fetch_data.push({ ticks: record.fetch_blueprint_ticks, @@ -78,6 +88,21 @@ function process_bench_record(record, acc) { benchmark_name: record.benchmark_name }) } + + // Adds infos needed for block finalization analysis + if (!isNaN(record.inbox_size)) { + acc.block_finalization.push(record) + } + if (!isNaN(record.block_finalize)) { + // add block_finalize info to last record if same benchmark + let last_record = acc.block_finalization.pop() + if (last_record.benchmark_name == record.benchmark_name) { + last_record.block_finalize = record.block_finalize + acc.block_finalization.push(last_record) + } else { + console.error("[Error] couldn't find correct finalize information") + } + } } function process_transaction_record(record, acc) { diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/block_finalization.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/block_finalization.js new file mode 100644 index 000000000000..0a2ac9b25588 --- /dev/null +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/block_finalization.js @@ -0,0 +1,23 @@ +// SPDX-FileCopyrightText: 2023 Marigold +// +// SPDX-License-Identifier: MIT + +const path = require('node:path') +module.exports = { print_analysis } +const fs = require('fs'); +const csv = require('csv-stringify/sync') +const utils = require("./utils") +const OUTPUT = 'block_finalization_data.csv' +const UPPER_BOUND = 125000000 + +function print_analysis(infos, dir = "analysis_result") { + let mlr = utils.make_lr(infos.block_finalization, (x) => x.nb_tx, (y) => y.block_finalize) + console.log(`Linear Regression: ${utils.print_lr(mlr, "nbtx")} `) + const csv_config = { + header: true, + columns: ["benchmark_name", "inbox_size", "nb_tx", "block_finalize"] + }; + fs.writeFileSync(path.format({ dir, name: OUTPUT }), csv.stringify(infos.block_finalization, csv_config)) + console.log(`current model: Y = ${UPPER_BOUND} `) + return utils.print_summary_errors(infos.block_finalization, datum => { return datum.block_finalize - UPPER_BOUND }) +} -- GitLab From 9f60629a4d34a538ec101f4cf28fda182043666f Mon Sep 17 00:00:00 2001 From: pecornilleau Date: Thu, 2 Nov 2023 19:07:58 +0100 Subject: [PATCH 06/10] EVM: Bench: analyze tx register data --- .../benchmarks/scripts/analysis/analysis.js | 14 +++++- .../scripts/analysis/tx_register.js | 50 +++++++++++++++++++ .../benchmarks/scripts/analysis/utils.js | 17 +++++-- 3 files changed, 75 insertions(+), 6 deletions(-) create mode 100644 etherlink/kernel_evm/benchmarks/scripts/analysis/tx_register.js diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js index b00afdf13e58..9d99b0a72bfa 100644 --- a/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js @@ -7,6 +7,7 @@ const { is_transfer, is_create, is_transaction, BASE_GAS } = require('./utils') const fs = require('fs') const fetch = require('./fetch') const block_finalization = require('./block_finalization') +const tx_register = require('./tx_register') const number_formatter_compact = Intl.NumberFormat('en', { notation: 'compact', compactDisplay: 'long' }); const number_formatter = Intl.NumberFormat('en', {}); @@ -28,7 +29,8 @@ function init_analysis() { nb_transfer: 0, kernel_runs: [], fetch_data: [], - block_finalization: [] + block_finalization: [], + tx_register: [], }; return empty @@ -44,6 +46,10 @@ function print_analysis(infos) { console.info(`----------------------------------`) let error_block_finalization = block_finalization.print_analysis(infos) console.info(`-------------------------------------------------------`) + console.info(`Transaction Registering Analysis`) + console.info(`----------------------------------`) + let error_register = tx_register.print_analysis(infos) + console.info(`-------------------------------------------------------`) console.info(`Kernels infos`) console.info(`----------------------------------`) console.info(`Decoding: ${pp(infos.decode)} ticks`) @@ -62,7 +68,7 @@ function print_analysis(infos) { console.info(`Number of kernel run: ${infos.nb_kernel_run}`) console.info(`Number of blocks: ${infos.block_finalization.length}`) console.info(`-------------------------------------------------------`) - return error_fetch + error_block_finalization + return error_fetch + error_block_finalization + error_register } @@ -109,6 +115,10 @@ function process_transaction_record(record, acc) { acc.signatures.push(record.signature_verification_ticks) if (is_transfer(record)) process_transfer(record, acc) else process_execution(record, acc) + + // Adds infos for tx registration analysis + if (!isNaN(record.tx_size) && !isNaN(record.store_transaction_object_ticks)) + acc.tx_register.push(record) } function process_transfer(record, acc) { diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/tx_register.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/tx_register.js new file mode 100644 index 000000000000..65b1a34bf8c1 --- /dev/null +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/tx_register.js @@ -0,0 +1,50 @@ +// SPDX-FileCopyrightText: 2023 Marigold +// +// SPDX-License-Identifier: MIT + +const path = require('node:path') +module.exports = { print_analysis } +const fs = require('fs'); +const csv = require('csv-stringify/sync'); +const utils = require("./utils") +const OUTPUT = 'register_tx_data.csv' +const MODEL_OBJ = { intercept: 200000, coef: 880 } +const MODEL_RECEIPT = { intercept: 200000, coef: 960 } +const MODEL_LOGBLOOM = { intercept: 5300, coef: 85000 } + +function print_analysis(infos, dir = "analysis_result") { + + // transaction object + console.log(`[object] Current model: ${utils.print_model(MODEL_OBJ, "size")}`) + let obj_lr = utils.make_lr(infos.tx_register, (x) => x.tx_size, (y) => y.store_transaction_object_ticks) + console.log(`[object] Computed LR: ${utils.print_lr(obj_lr)} `) + let error_object = utils.print_summary_errors(infos.tx_register, datum => { return datum.tx_size - utils.predict_linear_model(MODEL_OBJ, datum.receipt_size) }, "[object]") + fs.writeFileSync(path.format({ dir, name: "object_" + OUTPUT }), csv.stringify(infos.tx_register, { + header: true, + columns: ["benchmark_name", "tx_size", "store_transaction_object_ticks",] + })) + + // receipt + console.log(`[receipt] Current model: ${utils.print_model(MODEL_RECEIPT, "size")}`) + let receipt_lr = utils.make_lr(infos.tx_register, (x) => x.receipt_size, (y) => y.store_receipt_ticks) + console.log(`[receipt] Computed LR: ${utils.print_lr(receipt_lr)} `) + let error_receipt = utils.print_summary_errors(infos.tx_register, datum => { return datum.store_receipt_ticks - utils.predict_linear_model(MODEL_RECEIPT, datum.receipt_size) }, "[receipt]") + fs.writeFileSync(path.format({ dir, name: "receipt_" + OUTPUT }), csv.stringify(infos.tx_register, { + header: true, + columns: ["benchmark_name", "receipt_size", "store_receipt_ticks"] + })) + + // bloom + console.log(`[bloom] Current model: ${utils.print_model(MODEL_LOGBLOOM, "size")}`) + let bloom_lr = utils.make_lr(infos.tx_register, (x) => x.bloom_size, (y) => y.logs_to_bloom) + console.log(`[bloom] Computed LR: ${utils.print_lr(bloom_lr)} `) + let error_bloom = utils.print_summary_errors(infos.tx_register, datum => { return datum.logs_to_bloom - utils.predict_linear_model(MODEL_LOGBLOOM, datum.bloom_size) }, "[bloom]") + fs.writeFileSync(path.format({ dir, name: "bloom_" + OUTPUT }), csv.stringify(infos.tx_register, { + header: true, + columns: ["benchmark_name", "bloom_size", "logs_to_bloom"] + })) + + let errors = error_receipt + error_object + error_bloom + console.log(`Total errors: ${errors}`) + return errors +} diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/utils.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/utils.js index 5ebbf0305b9f..f8dc3d9f4b41 100644 --- a/etherlink/kernel_evm/benchmarks/scripts/analysis/utils.js +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/utils.js @@ -7,7 +7,7 @@ const CREATE_STORAGE_CUTOFF = 600_000 const MLR = require("ml-regression-multivariate-linear") -module.exports = { is_transfer, is_create, is_transaction, BASE_GAS, make_lr, print_lr, print_summary_errors } +module.exports = { is_transfer, is_create, is_transaction, BASE_GAS, make_lr, print_lr, print_summary_errors, print_model, predict_linear_model } function is_transfer(record) { return record.gas_cost == BASE_GAS @@ -43,14 +43,23 @@ function print_lr(lr, var_name = "size") { else return "no linear regression available" } -function print_summary_errors(data, compute_error) { +function print_summary_errors(data, compute_error, prefix = "") { let max_error_current = 0; let nb_error = 0 for (datum of data) { let error = compute_error(datum) if (error > 0) nb_error += 1 - max_error_current = Math.max(max_error_current, error) + if (!isNaN(error)) max_error_current = Math.max(max_error_current, error) } - console.log(`nb of errors: ${nb_error} ; maximum error: ${max_error_current} ticks`) + console.log(`${prefix} nb of errors: ${nb_error} ; maximum error: ${max_error_current} ticks`) return nb_error +} + +function print_model(model, var_name) { + return `Y = ${model.intercept} + ${model.coef} * ${var_name}` +} + +function predict_linear_model(model, x) { + if (isNaN(x)) return model.intercept + return model.intercept + model.coef * x } \ No newline at end of file -- GitLab From cbc81a4dbe3da6843b585e67c25af29d80ba87da Mon Sep 17 00:00:00 2001 From: pecornilleau Date: Thu, 2 Nov 2023 19:14:22 +0100 Subject: [PATCH 07/10] EVM: Bench: analyze tx overhead data --- .../benchmarks/scripts/analysis/analysis.js | 11 +++++++ .../scripts/analysis/tx_overhead.js | 30 +++++++++++++++++++ 2 files changed, 41 insertions(+) create mode 100644 etherlink/kernel_evm/benchmarks/scripts/analysis/tx_overhead.js diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js index 9d99b0a72bfa..cd43b950b338 100644 --- a/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js @@ -8,6 +8,7 @@ const fs = require('fs') const fetch = require('./fetch') const block_finalization = require('./block_finalization') const tx_register = require('./tx_register') +const tx_overhead = require('./tx_overhead') const number_formatter_compact = Intl.NumberFormat('en', { notation: 'compact', compactDisplay: 'long' }); const number_formatter = Intl.NumberFormat('en', {}); @@ -31,6 +32,7 @@ function init_analysis() { fetch_data: [], block_finalization: [], tx_register: [], + tx_overhead: [] }; return empty @@ -50,6 +52,11 @@ function print_analysis(infos) { console.info(`----------------------------------`) let error_register = tx_register.print_analysis(infos) console.info(`-------------------------------------------------------`) + console.info(`Transaction Overhead Analysis`) + console.info(`----------------------------------`) + // model is known to fall short as an overapproximation + let _error_tx_overhead = tx_overhead.print_analysis(infos) + console.info(`-------------------------------------------------------`) console.info(`Kernels infos`) console.info(`----------------------------------`) console.info(`Decoding: ${pp(infos.decode)} ticks`) @@ -119,6 +126,10 @@ function process_transaction_record(record, acc) { // Adds infos for tx registration analysis if (!isNaN(record.tx_size) && !isNaN(record.store_transaction_object_ticks)) acc.tx_register.push(record) + + // Adds infos for transaction overhead analysis + if (!isNaN(record.tx_size) && !isNaN(record.sputnik_runtime_ticks) && !isNaN(record.run_transaction_ticks)) + acc.tx_overhead.push(record) } function process_transfer(record, acc) { diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/tx_overhead.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/tx_overhead.js new file mode 100644 index 000000000000..f6fcfcc00d32 --- /dev/null +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/tx_overhead.js @@ -0,0 +1,30 @@ +// SPDX-FileCopyrightText: 2023 Marigold +// +// SPDX-License-Identifier: MIT + +const path = require('node:path') +module.exports = { print_analysis } +const fs = require('fs'); +const csv = require('csv-stringify/sync'); +const utils = require("./utils") +const OUTPUT = 'tx_overhead_data.csv' +const MODEL = { intercept: 1_150_000, coef: 880 } + +function print_analysis(infos, dir = "analysis_result") { + const data = infos.tx_overhead; + for (datum of data) { + datum.tx_overhead = datum.run_transaction_ticks - datum.sputnik_runtime_ticks + } + console.log(`Current model: ${utils.print_model(MODEL, "tx_size")}`) + let lr = utils.make_lr(infos.tx_register, (x) => x.tx_size, (y) => y.tx_overhead) + console.log(`Computed LR: ${utils.print_lr(lr)} `) + let error = utils.print_summary_errors(infos.tx_register, datum => { return datum.tx_overhead - utils.predict_linear_model(MODEL, datum.tx_size) }) + + const csv_config = { + header: true, + columns: ["benchmark_name", "status", "gas_cost", "tx_size", "run_transaction_ticks", "sputnik_runtime_ticks", "tx_overhead"] + }; + fs.writeFileSync(path.format({ dir, name: OUTPUT }), csv.stringify(data, csv_config)) + + return error +} \ No newline at end of file -- GitLab From de47e43a26a18d3557f60e03a6e9fc945d08703f Mon Sep 17 00:00:00 2001 From: pecornilleau Date: Thu, 9 Nov 2023 12:56:32 +0100 Subject: [PATCH 08/10] EVM: Bench: analyze queue store and read --- .../benchmarks/scripts/analysis/analysis.js | 11 ++++-- .../benchmarks/scripts/analysis/queue.js | 34 +++++++++++++++++++ 2 files changed, 43 insertions(+), 2 deletions(-) create mode 100644 etherlink/kernel_evm/benchmarks/scripts/analysis/queue.js diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js index cd43b950b338..4a197806c2c6 100644 --- a/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js @@ -9,6 +9,7 @@ const fetch = require('./fetch') const block_finalization = require('./block_finalization') const tx_register = require('./tx_register') const tx_overhead = require('./tx_overhead') +const queue = require('./queue') const number_formatter_compact = Intl.NumberFormat('en', { notation: 'compact', compactDisplay: 'long' }); const number_formatter = Intl.NumberFormat('en', {}); @@ -32,7 +33,8 @@ function init_analysis() { fetch_data: [], block_finalization: [], tx_register: [], - tx_overhead: [] + tx_overhead: [], + runs_infos: [] }; return empty @@ -48,6 +50,10 @@ function print_analysis(infos) { console.info(`----------------------------------`) let error_block_finalization = block_finalization.print_analysis(infos) console.info(`-------------------------------------------------------`) + console.info(`Queue read and storage analysis`) + console.info(`----------------------------------`) + let error_queue = queue.print_analysis(infos) + console.info(`-------------------------------------------------------`) console.info(`Transaction Registering Analysis`) console.info(`----------------------------------`) let error_register = tx_register.print_analysis(infos) @@ -75,7 +81,7 @@ function print_analysis(infos) { console.info(`Number of kernel run: ${infos.nb_kernel_run}`) console.info(`Number of blocks: ${infos.block_finalization.length}`) console.info(`-------------------------------------------------------`) - return error_fetch + error_block_finalization + error_register + return error_fetch + error_block_finalization + error_register + error_queue } @@ -85,6 +91,7 @@ function process_record(record, acc) { } function process_bench_record(record, acc) { + acc.runs_infos.push(record) if (!isNaN(record.interpreter_decode_ticks)) { acc.nb_kernel_run += 1 acc.decode = Math.max(acc.decode, record.interpreter_decode_ticks) diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/queue.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/queue.js new file mode 100644 index 000000000000..7e6c1f71eb3f --- /dev/null +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/queue.js @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2023 Marigold +// +// SPDX-License-Identifier: MIT + +const path = require('node:path') +module.exports = { print_analysis } +const fs = require('fs'); +const csv = require('csv-stringify/sync') +const utils = require("./utils") +const OUTPUT_STORE = 'queue_storing_data.csv' +const OUTPUT_READ = 'queue_reading_data.csv' +const READ_UPPER_BOUND = 500_000_000 +const STORE_UPPER_BOUND = 1_100_000_000 + +function print_analysis(infos, dir = "analysis_result") { + const read_data = infos.runs_infos.filter((d) => !!d.queue_read); + let mlr_read = utils.make_lr(read_data, (x) => x.queue_read, (y) => y.queue_read_ticks) + console.log(`[read] Computed LR: ${utils.print_lr(mlr_read)} `) + let error_read = utils.print_summary_errors(read_data, datum => { return datum.queue_read_ticks - READ_UPPER_BOUND }, "[read]") + fs.writeFileSync(path.format({ dir, name: OUTPUT_READ }), csv.stringify(read_data), { + header: true, + columns: ["benchmark_name", "queue_read", "queue_read_ticks"] + }) + + const store_data = infos.runs_infos.filter((d) => !!d.queue_store); + let mlr_store = utils.make_lr(store_data, (x) => x.queue_store, (y) => y.queue_store_ticks) + console.log(`[store] Computed LR: ${utils.print_lr(mlr_store)} `) + let error_store = utils.print_summary_errors(store_data, datum => { return datum.queue_store_ticks - STORE_UPPER_BOUND }, "[store]") + fs.writeFileSync(path.format({ dir, name: OUTPUT_STORE }), csv.stringify(store_data), { + header: true, + columns: ["benchmark_name", "queue_store", "queue_store_ticks"] + }) + return error_read + error_store; +} \ No newline at end of file -- GitLab From ea775c2ce5acfc4b130b44b0bee5a90ecef21dd9 Mon Sep 17 00:00:00 2001 From: pecornilleau Date: Thu, 30 Nov 2023 16:29:53 +0100 Subject: [PATCH 09/10] EVM: Bench: bit of refactoring --- .../scripts/analysis/block_finalization.js | 10 ++---- .../benchmarks/scripts/analysis/fetch.js | 10 ++---- .../benchmarks/scripts/analysis/queue.js | 15 +++------ .../scripts/analysis/tx_overhead.js | 8 +---- .../scripts/analysis/tx_register.js | 33 +++++++++++-------- .../benchmarks/scripts/analysis/utils.js | 14 +++++++- 6 files changed, 43 insertions(+), 47 deletions(-) diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/block_finalization.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/block_finalization.js index 0a2ac9b25588..fa93e67a5408 100644 --- a/etherlink/kernel_evm/benchmarks/scripts/analysis/block_finalization.js +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/block_finalization.js @@ -4,8 +4,6 @@ const path = require('node:path') module.exports = { print_analysis } -const fs = require('fs'); -const csv = require('csv-stringify/sync') const utils = require("./utils") const OUTPUT = 'block_finalization_data.csv' const UPPER_BOUND = 125000000 @@ -13,11 +11,9 @@ const UPPER_BOUND = 125000000 function print_analysis(infos, dir = "analysis_result") { let mlr = utils.make_lr(infos.block_finalization, (x) => x.nb_tx, (y) => y.block_finalize) console.log(`Linear Regression: ${utils.print_lr(mlr, "nbtx")} `) - const csv_config = { - header: true, - columns: ["benchmark_name", "inbox_size", "nb_tx", "block_finalize"] - }; - fs.writeFileSync(path.format({ dir, name: OUTPUT }), csv.stringify(infos.block_finalization, csv_config)) + + utils.print_csv(dir, OUTPUT, infos.block_finalization, ["benchmark_name", "inbox_size", "nb_tx", "block_finalize"]) + console.log(`current model: Y = ${UPPER_BOUND} `) return utils.print_summary_errors(infos.block_finalization, datum => { return datum.block_finalize - UPPER_BOUND }) } diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/fetch.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/fetch.js index 37cee067d0e5..18d10a6c3aba 100644 --- a/etherlink/kernel_evm/benchmarks/scripts/analysis/fetch.js +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/fetch.js @@ -4,20 +4,14 @@ const path = require('node:path') module.exports = { print_fetch_analysis } -const fs = require('fs'); -const csv = require('csv-stringify/sync') const utils = require("./utils") const OUTPUT = 'fetch_data.csv' const UPPER_BOUND = 1_000_000_000 function print_fetch_analysis(infos, dir = "analysis_result") { // prepare data - const csv_config = { - header: true, - columns: ["benchmark_name", "size", "nb_tx", "ticks"] - } - fs.mkdirSync(dir, { recursive: true }) - fs.writeFileSync(path.format({ dir, name: OUTPUT }), csv.stringify(infos.fetch_data, csv_config)) + + utils.print_csv(dir, OUTPUT, infos.fetch_data, ["benchmark_name", "size", "nb_tx", "ticks"]) // compute errors console.log(`current model: Y = ${UPPER_BOUND}`) diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/queue.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/queue.js index 7e6c1f71eb3f..d33db6490b5c 100644 --- a/etherlink/kernel_evm/benchmarks/scripts/analysis/queue.js +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/queue.js @@ -4,8 +4,6 @@ const path = require('node:path') module.exports = { print_analysis } -const fs = require('fs'); -const csv = require('csv-stringify/sync') const utils = require("./utils") const OUTPUT_STORE = 'queue_storing_data.csv' const OUTPUT_READ = 'queue_reading_data.csv' @@ -17,18 +15,15 @@ function print_analysis(infos, dir = "analysis_result") { let mlr_read = utils.make_lr(read_data, (x) => x.queue_read, (y) => y.queue_read_ticks) console.log(`[read] Computed LR: ${utils.print_lr(mlr_read)} `) let error_read = utils.print_summary_errors(read_data, datum => { return datum.queue_read_ticks - READ_UPPER_BOUND }, "[read]") - fs.writeFileSync(path.format({ dir, name: OUTPUT_READ }), csv.stringify(read_data), { - header: true, - columns: ["benchmark_name", "queue_read", "queue_read_ticks"] - }) + + utils.print_csv(dir, OUTPUT_READ, read_data, ["benchmark_name", "queue_read", "queue_read_ticks"]) const store_data = infos.runs_infos.filter((d) => !!d.queue_store); let mlr_store = utils.make_lr(store_data, (x) => x.queue_store, (y) => y.queue_store_ticks) console.log(`[store] Computed LR: ${utils.print_lr(mlr_store)} `) let error_store = utils.print_summary_errors(store_data, datum => { return datum.queue_store_ticks - STORE_UPPER_BOUND }, "[store]") - fs.writeFileSync(path.format({ dir, name: OUTPUT_STORE }), csv.stringify(store_data), { - header: true, - columns: ["benchmark_name", "queue_store", "queue_store_ticks"] - }) + + utils.print_csv(dir, OUTPUT_STORE, store_data, ["benchmark_name", "queue_store", "queue_store_ticks"]) + return error_read + error_store; } \ No newline at end of file diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/tx_overhead.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/tx_overhead.js index f6fcfcc00d32..37b18adc37a0 100644 --- a/etherlink/kernel_evm/benchmarks/scripts/analysis/tx_overhead.js +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/tx_overhead.js @@ -4,8 +4,6 @@ const path = require('node:path') module.exports = { print_analysis } -const fs = require('fs'); -const csv = require('csv-stringify/sync'); const utils = require("./utils") const OUTPUT = 'tx_overhead_data.csv' const MODEL = { intercept: 1_150_000, coef: 880 } @@ -20,11 +18,7 @@ function print_analysis(infos, dir = "analysis_result") { console.log(`Computed LR: ${utils.print_lr(lr)} `) let error = utils.print_summary_errors(infos.tx_register, datum => { return datum.tx_overhead - utils.predict_linear_model(MODEL, datum.tx_size) }) - const csv_config = { - header: true, - columns: ["benchmark_name", "status", "gas_cost", "tx_size", "run_transaction_ticks", "sputnik_runtime_ticks", "tx_overhead"] - }; - fs.writeFileSync(path.format({ dir, name: OUTPUT }), csv.stringify(data, csv_config)) + utils.print_csv(dir, OUTPUT, data, ["benchmark_name", "status", "gas_cost", "tx_size", "run_transaction_ticks", "sputnik_runtime_ticks", "tx_overhead"]) return error } \ No newline at end of file diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/tx_register.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/tx_register.js index 65b1a34bf8c1..942e1753b019 100644 --- a/etherlink/kernel_evm/benchmarks/scripts/analysis/tx_register.js +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/tx_register.js @@ -4,8 +4,6 @@ const path = require('node:path') module.exports = { print_analysis } -const fs = require('fs'); -const csv = require('csv-stringify/sync'); const utils = require("./utils") const OUTPUT = 'register_tx_data.csv' const MODEL_OBJ = { intercept: 200000, coef: 880 } @@ -14,35 +12,42 @@ const MODEL_LOGBLOOM = { intercept: 5300, coef: 85000 } function print_analysis(infos, dir = "analysis_result") { + function print_csv(name, columns) { + utils.print_csv(dir, name + OUTPUT, infos.tx_register, columns) + } + // transaction object console.log(`[object] Current model: ${utils.print_model(MODEL_OBJ, "size")}`) let obj_lr = utils.make_lr(infos.tx_register, (x) => x.tx_size, (y) => y.store_transaction_object_ticks) console.log(`[object] Computed LR: ${utils.print_lr(obj_lr)} `) let error_object = utils.print_summary_errors(infos.tx_register, datum => { return datum.tx_size - utils.predict_linear_model(MODEL_OBJ, datum.receipt_size) }, "[object]") - fs.writeFileSync(path.format({ dir, name: "object_" + OUTPUT }), csv.stringify(infos.tx_register, { - header: true, - columns: ["benchmark_name", "tx_size", "store_transaction_object_ticks",] - })) + + print_csv( + "object_", + ["benchmark_name", "tx_size", "store_transaction_object_ticks",] + ) // receipt console.log(`[receipt] Current model: ${utils.print_model(MODEL_RECEIPT, "size")}`) let receipt_lr = utils.make_lr(infos.tx_register, (x) => x.receipt_size, (y) => y.store_receipt_ticks) console.log(`[receipt] Computed LR: ${utils.print_lr(receipt_lr)} `) let error_receipt = utils.print_summary_errors(infos.tx_register, datum => { return datum.store_receipt_ticks - utils.predict_linear_model(MODEL_RECEIPT, datum.receipt_size) }, "[receipt]") - fs.writeFileSync(path.format({ dir, name: "receipt_" + OUTPUT }), csv.stringify(infos.tx_register, { - header: true, - columns: ["benchmark_name", "receipt_size", "store_receipt_ticks"] - })) + + print_csv( + "receipt_", + ["benchmark_name", "receipt_size", "store_receipt_ticks"] + ) // bloom console.log(`[bloom] Current model: ${utils.print_model(MODEL_LOGBLOOM, "size")}`) let bloom_lr = utils.make_lr(infos.tx_register, (x) => x.bloom_size, (y) => y.logs_to_bloom) console.log(`[bloom] Computed LR: ${utils.print_lr(bloom_lr)} `) let error_bloom = utils.print_summary_errors(infos.tx_register, datum => { return datum.logs_to_bloom - utils.predict_linear_model(MODEL_LOGBLOOM, datum.bloom_size) }, "[bloom]") - fs.writeFileSync(path.format({ dir, name: "bloom_" + OUTPUT }), csv.stringify(infos.tx_register, { - header: true, - columns: ["benchmark_name", "bloom_size", "logs_to_bloom"] - })) + + print_csv( + "bloom_", + ["benchmark_name", "bloom_size", "logs_to_bloom"] + ) let errors = error_receipt + error_object + error_bloom console.log(`Total errors: ${errors}`) diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/utils.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/utils.js index f8dc3d9f4b41..68d8e977e924 100644 --- a/etherlink/kernel_evm/benchmarks/scripts/analysis/utils.js +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/utils.js @@ -7,7 +7,11 @@ const CREATE_STORAGE_CUTOFF = 600_000 const MLR = require("ml-regression-multivariate-linear") -module.exports = { is_transfer, is_create, is_transaction, BASE_GAS, make_lr, print_lr, print_summary_errors, print_model, predict_linear_model } +const path = require('node:path') +const fs = require('fs'); +const csv = require('csv-stringify/sync'); + +module.exports = { is_transfer, is_create, is_transaction, BASE_GAS, make_lr, print_lr, print_summary_errors, print_model, predict_linear_model, print_csv } function is_transfer(record) { return record.gas_cost == BASE_GAS @@ -62,4 +66,12 @@ function print_model(model, var_name) { function predict_linear_model(model, x) { if (isNaN(x)) return model.intercept return model.intercept + model.coef * x +} + +function print_csv(dir, name, data_array, columns) { + fs.mkdirSync(dir, { recursive: true }) + fs.writeFileSync(path.format({ dir, name }), csv.stringify(data_array, { + header: true, + columns + })) } \ No newline at end of file -- GitLab From afed9148b05ccd110470ee1d97c523bfaee16b1c Mon Sep 17 00:00:00 2001 From: pecornilleau Date: Mon, 4 Dec 2023 10:39:51 +0100 Subject: [PATCH 10/10] EVM: bench: cleanup --- etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js | 1 - 1 file changed, 1 deletion(-) diff --git a/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js b/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js index 4a197806c2c6..df4847adcd96 100644 --- a/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js +++ b/etherlink/kernel_evm/benchmarks/scripts/analysis/analysis.js @@ -3,7 +3,6 @@ // SPDX-License-Identifier: MIT const { is_transfer, is_create, is_transaction, BASE_GAS } = require('./utils') -// const { ChartConfiguration } = require('chart') const fs = require('fs') const fetch = require('./fetch') const block_finalization = require('./block_finalization') -- GitLab