diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml new file mode 100644 index 00000000..27c12674 --- /dev/null +++ b/.github/workflows/deploy.yml @@ -0,0 +1,22 @@ +--- +name: deploy + +on: + pull_request: + push: + branches: [main] + +concurrency: + group: deploy-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Deploy devnet + uses: kurtosis-tech/kurtosis-github-action@v1 + with: + path: . + args: cdk-params.yml diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml new file mode 100644 index 00000000..a4d4d9cd --- /dev/null +++ b/.github/workflows/lint.yml @@ -0,0 +1,54 @@ +--- +name: lint + +on: + pull_request: + push: + branches: [main] + +concurrency: + group: lint-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + starklark: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install kurtosis + run: | + echo "deb [trusted=yes] https://apt.fury.io/kurtosis-tech/ /" | sudo tee /etc/apt/sources.list.d/kurtosis.list + sudo apt update + sudo apt install kurtosis-cli + kurtosis analytics disable + - name: Run kurtosis linter + run: kurtosis lint ${{ github.workspace }} + + yaml: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install yamllint + run: pip install yamllint + - name: Run yamllint + run: yamllint --config-file .yamllint.yml . + + hadolint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install hadolint + run: | + sudo wget -O /usr/local/bin/hadolint https://github.com/hadolint/hadolint/releases/latest/download/hadolint-Linux-x86_64 + sudo chmod +x /usr/local/bin/hadolint + - name: Run hadolint + run: find . -type f -name 'Dockerfile*' | sort | xargs -I {} hadolint --config .hadolint.yml {} + + shellcheck: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Install shellcheck + run: sudo apt install shellcheck + - name: Run shellcheck + run: find . -type f -name '*.sh' | sort | xargs -I {} shellcheck {} diff --git a/.hadolint b/.hadolint new file mode 100644 index 00000000..0f1be395 --- /dev/null +++ b/.hadolint @@ -0,0 +1,5 @@ +--- +strict-labels: true # Do not permit labels other than specified in `label-schema`. +label-schema: + author: email + description: text diff --git a/.yamllint.yml b/.yamllint.yml new file mode 100644 index 00000000..0c01e2b9 --- /dev/null +++ b/.yamllint.yml @@ -0,0 +1,3 @@ +--- +rules: + line-length: disable diff --git a/main.star b/main.star index 4ea8efc4..7eaa3cc4 100644 --- a/main.star +++ b/main.star @@ -1,4 +1,6 @@ -ethereum_package = import_module("github.com/kurtosis-tech/ethereum-package/main.star@2.0.0") +ethereum_package = import_module( + "github.com/kurtosis-tech/ethereum-package/main.star@2.0.0" +) CONTRACTS_IMAGE = "node:20-bookworm" CONTRACTS_BRANCH = "develop" @@ -6,8 +8,8 @@ CONTRACTS_BRANCH = "develop" POSTGRES_IMAGE = "postgres:16.2" POSTGRES_PORT_ID = "postgres" -def run(plan, args): +def run(plan, args): deployment_label = args["deployment_idx"] # Determine system architecture. @@ -23,97 +25,80 @@ def run(plan, args): # Make ethereum package availabile. For now we'll stick with most # of the defaults - ethereum_package.run(plan, { - "additional_services": [], - "network_params": { - "network_id": args["l1_network_id"], - "preregistered_validator_keys_mnemonic": args["l1_preallocated_mnemonic"], - } - }) + ethereum_package.run( + plan, + { + "additional_services": [], + "network_params": { + "network_id": args["l1_network_id"], + "preregistered_validator_keys_mnemonic": args[ + "l1_preallocated_mnemonic" + ], + }, + }, + ) # Deploy Parameters - deploy_parameters_template = read_file( - src = "./templates/deploy_parameters.json" - ) + deploy_parameters_template = read_file(src="./templates/deploy_parameters.json") deploy_parameters_artifact = plan.render_templates( - config = { + config={ "deploy_parameters.json": struct( - template=deploy_parameters_template, - data=args + template=deploy_parameters_template, data=args ) } ) # Create Rollup Paramaters create_rollup_parameters_template = read_file( - src = "./templates/create_rollup_parameters.json" + src="./templates/create_rollup_parameters.json" ) create_rollup_parameters_artifact = plan.render_templates( - config = { + config={ "create_rollup_parameters.json": struct( - template=create_rollup_parameters_template, - data=args + template=create_rollup_parameters_template, data=args ) } ) # Contract Deployment script contract_deployment_script_template = read_file( - src = "./templates/run-contract-setup.sh" + src="./templates/run-contract-setup.sh" ) contract_deployment_script_artifact = plan.render_templates( - config = { + config={ "run-contract-setup.sh": struct( - template=contract_deployment_script_template, - data=args + template=contract_deployment_script_template, data=args ) } ) # Node configuration - node_config_template = read_file( - src = "./templates/node-config.toml" - ) + node_config_template = read_file(src="./templates/node-config.toml") node_config_artifact = plan.render_templates( - config = { - "node-config.toml": struct( - template=node_config_template, - data=args - ) - } + config={"node-config.toml": struct(template=node_config_template, data=args)} ) # Bridge configuration - bridge_config_template = read_file( - src = "./templates/bridge-config.toml" - ) + bridge_config_template = read_file(src="./templates/bridge-config.toml") bridge_config_artifact = plan.render_templates( - config = { - "bridge-config.toml": struct( - template=bridge_config_template, - data=args - ) + config={ + "bridge-config.toml": struct(template=bridge_config_template, data=args) } ) # Prover configuration - prover_config_template = read_file( - src = "./templates/prover-config.json" - ) + prover_config_template = read_file(src="./templates/prover-config.json") prover_config_artifact = plan.render_templates( - config = { - "prover-config.json": struct( - template=prover_config_template, - data=args - ) + config={ + "prover-config.json": struct(template=prover_config_template, data=args) } ) - zkevm_etc_directory = Directory(persistent_key = "zkevm-artifacts") + zkevm_etc_directory = Directory(persistent_key="zkevm-artifacts") plan.add_service( - name = "contracts"+args["deployment_idx"], - config = ServiceConfig( - image = CONTRACTS_IMAGE, - files = { + name="contracts" + args["deployment_idx"], + config=ServiceConfig( + image=CONTRACTS_IMAGE, + files={ "/opt/zkevm": zkevm_etc_directory, "/opt/contract-deploy/": Directory( - artifact_names = [ + artifact_names=[ deploy_parameters_artifact, create_rollup_parameters_artifact, contract_deployment_script_artifact, @@ -121,7 +106,7 @@ def run(plan, args): prover_config_artifact, bridge_config_artifact, ] - ) + ), }, ), ) @@ -129,34 +114,46 @@ def run(plan, args): # check if the contracts were already initialized.. I'm leaving # this here for now, but it's not useful contract_init_stat = plan.exec( - service_name = "contracts"+args["deployment_idx"], - acceptable_codes = [0, 1], - recipe = ExecRecipe(command = ["stat", "/opt/zkevm/.init-complete.lock"]) + service_name="contracts" + args["deployment_idx"], + acceptable_codes=[0, 1], + recipe=ExecRecipe(command=["stat", "/opt/zkevm/.init-complete.lock"]), ) plan.exec( - service_name = "contracts"+args["deployment_idx"], - recipe = ExecRecipe(command = ["git", "clone", "--depth", "1", "-b", args["zkevm_contracts_branch"], args["zkevm_contracts_repo"], "/opt/zkevm-contracts"]) + service_name="contracts" + args["deployment_idx"], + recipe=ExecRecipe( + command=[ + "git", + "clone", + "--depth", + "1", + "-b", + args["zkevm_contracts_branch"], + args["zkevm_contracts_repo"], + "/opt/zkevm-contracts", + ] + ), ) plan.exec( - service_name = "contracts"+args["deployment_idx"], - recipe = ExecRecipe(command = ["chmod", "a+x", "/opt/contract-deploy/run-contract-setup.sh"]) + service_name="contracts" + args["deployment_idx"], + recipe=ExecRecipe( + command=["chmod", "a+x", "/opt/contract-deploy/run-contract-setup.sh"] + ), ) plan.print("Running zkEVM contract deployment. This might take some time...") plan.exec( - service_name = "contracts"+args["deployment_idx"], - recipe = ExecRecipe(command = ["/opt/contract-deploy/run-contract-setup.sh"]) + service_name="contracts" + args["deployment_idx"], + recipe=ExecRecipe(command=["/opt/contract-deploy/run-contract-setup.sh"]), ) zkevm_configs = plan.store_service_files( - service_name = "contracts"+args["deployment_idx"], - src = "/opt/zkevm", - name = "zkevm", - description = "These are the files needed to start various node services" + service_name="contracts" + args["deployment_idx"], + src="/opt/zkevm", + name="zkevm", + description="These are the files needed to start various node services", ) - # plan.stop_service( # name = "contracts"+args["deployment_idx"] # ) @@ -165,251 +162,328 @@ def run(plan, args): # TODO do a big sed for all of these hard coded ports and make them configurable plan.add_service( - name = "zkevm-prover"+args["deployment_idx"], - config = ServiceConfig( - image = args["zkevm_prover_image"], - ports = { - "hash-db-server": PortSpec(args["zkevm_hash_db_port"], application_protocol="grpc"), - "executor-server": PortSpec(args["zkevm_executor_port"], application_protocol="grpc"), - }, - files = { + name="zkevm-prover" + args["deployment_idx"], + config=ServiceConfig( + image=args["zkevm_prover_image"], + ports={ + "hash-db-server": PortSpec( + args["zkevm_hash_db_port"], application_protocol="grpc" + ), + "executor-server": PortSpec( + args["zkevm_executor_port"], application_protocol="grpc" + ), + }, + files={ "/etc/": zkevm_configs, }, - entrypoint = [ - "/bin/bash", "-c" - ], - cmd = [ - "[[ \"{0}\" == \"aarch64\" || \"{0}\" == \"arm64\" ]] && export EXPERIMENTAL_DOCKER_DESKTOP_FORCE_QEMU=1; \ - /usr/local/bin/zkProver -c /etc/zkevm/prover-config.json".format(cpu_arch), + entrypoint=["/bin/bash", "-c"], + cmd=[ + '[[ "{0}" == "aarch64" || "{0}" == "arm64" ]] && export EXPERIMENTAL_DOCKER_DESKTOP_FORCE_QEMU=1; \ + /usr/local/bin/zkProver -c /etc/zkevm/prover-config.json'.format( + cpu_arch + ), ], ), ) plan.add_service( - name = "zkevm-node-synchronizer"+args["deployment_idx"], - config = ServiceConfig( - image = args["zkevm_node_image"], - files = { + name="zkevm-node-synchronizer" + args["deployment_idx"], + config=ServiceConfig( + image=args["zkevm_node_image"], + files={ "/etc/": zkevm_configs, }, - ports = { - "pprof": PortSpec(args["zkevm_pprof_port"], application_protocol="http"), - "prometheus": PortSpec(args["zkevm_prometheus_port"], application_protocol="http"), + ports={ + "pprof": PortSpec( + args["zkevm_pprof_port"], application_protocol="http" + ), + "prometheus": PortSpec( + args["zkevm_prometheus_port"], application_protocol="http" + ), }, - entrypoint = [ + entrypoint=[ "/app/zkevm-node", ], - cmd = [ + cmd=[ "run", - "--cfg", "/etc/zkevm/node-config.toml", - "--network", "custom", - "--custom-network-file", "/etc/zkevm/genesis.json", - "--components", "synchronizer" + "--cfg", + "/etc/zkevm/node-config.toml", + "--network", + "custom", + "--custom-network-file", + "/etc/zkevm/genesis.json", + "--components", + "synchronizer", ], ), ) plan.add_service( - name = "zkevm-node-sequencer"+args["deployment_idx"], - config = ServiceConfig( - image = args["zkevm_node_image"], - files = { + name="zkevm-node-sequencer" + args["deployment_idx"], + config=ServiceConfig( + image=args["zkevm_node_image"], + files={ "/etc/": zkevm_configs, }, - ports = { - "trusted-rpc": PortSpec(args["zkevm_rpc_http_port"], application_protocol="http"), - "data-streamer": PortSpec(args["zkevm_data_streamer_port"], application_protocol="datastream"), - "pprof": PortSpec(args["zkevm_pprof_port"], application_protocol="http"), - "prometheus": PortSpec(args["zkevm_prometheus_port"], application_protocol="http"), + ports={ + "trusted-rpc": PortSpec( + args["zkevm_rpc_http_port"], application_protocol="http" + ), + "data-streamer": PortSpec( + args["zkevm_data_streamer_port"], application_protocol="datastream" + ), + "pprof": PortSpec( + args["zkevm_pprof_port"], application_protocol="http" + ), + "prometheus": PortSpec( + args["zkevm_prometheus_port"], application_protocol="http" + ), }, - entrypoint = [ + entrypoint=[ "/app/zkevm-node", ], - cmd = [ + cmd=[ "run", - "--cfg", "/etc/zkevm/node-config.toml", - "--network", "custom", - "--custom-network-file", "/etc/zkevm/genesis.json", - "--components", "sequencer,rpc", - "--http.api", "eth,net,debug,zkevm,txpool,web3", + "--cfg", + "/etc/zkevm/node-config.toml", + "--network", + "custom", + "--custom-network-file", + "/etc/zkevm/genesis.json", + "--components", + "sequencer,rpc", + "--http.api", + "eth,net,debug,zkevm,txpool,web3", ], ), ) plan.add_service( - name = "zkevm-node-sequencersender"+args["deployment_idx"], - config = ServiceConfig( - image = args["zkevm_node_image"], - files = { + name="zkevm-node-sequencersender" + args["deployment_idx"], + config=ServiceConfig( + image=args["zkevm_node_image"], + files={ "/etc/": zkevm_configs, }, - ports = { - "pprof": PortSpec(args["zkevm_pprof_port"], application_protocol="http"), - "prometheus": PortSpec(args["zkevm_prometheus_port"], application_protocol="http"), + ports={ + "pprof": PortSpec( + args["zkevm_pprof_port"], application_protocol="http" + ), + "prometheus": PortSpec( + args["zkevm_prometheus_port"], application_protocol="http" + ), }, - entrypoint = [ + entrypoint=[ "/app/zkevm-node", ], - cmd = [ + cmd=[ "run", - "--cfg", "/etc/zkevm/node-config.toml", - "--network", "custom", - "--custom-network-file", "/etc/zkevm/genesis.json", - "--components", "sequencersender", + "--cfg", + "/etc/zkevm/node-config.toml", + "--network", + "custom", + "--custom-network-file", + "/etc/zkevm/genesis.json", + "--components", + "sequencersender", ], ), ) plan.add_service( - name = "zkevm-node-aggregator"+args["deployment_idx"], - config = ServiceConfig( - image = args["zkevm_node_image"], - ports = { - "trusted-aggregator": PortSpec(args["zkevm_aggregator_port"], application_protocol="grpc"), - "pprof": PortSpec(args["zkevm_pprof_port"], application_protocol="http"), - "prometheus": PortSpec(args["zkevm_prometheus_port"], application_protocol="http"), - }, - files = { + name="zkevm-node-aggregator" + args["deployment_idx"], + config=ServiceConfig( + image=args["zkevm_node_image"], + ports={ + "trusted-aggregator": PortSpec( + args["zkevm_aggregator_port"], application_protocol="grpc" + ), + "pprof": PortSpec( + args["zkevm_pprof_port"], application_protocol="http" + ), + "prometheus": PortSpec( + args["zkevm_prometheus_port"], application_protocol="http" + ), + }, + files={ "/etc/": zkevm_configs, }, - entrypoint = [ + entrypoint=[ "/app/zkevm-node", ], - cmd = [ + cmd=[ "run", - "--cfg", "/etc/zkevm/node-config.toml", - "--network", "custom", - "--custom-network-file", "/etc/zkevm/genesis.json", - "--components", "aggregator", + "--cfg", + "/etc/zkevm/node-config.toml", + "--network", + "custom", + "--custom-network-file", + "/etc/zkevm/genesis.json", + "--components", + "aggregator", ], ), ) plan.add_service( - name = "zkevm-node-rpc"+args["deployment_idx"], - config = ServiceConfig( - image = args["zkevm_node_image"], - ports = { - "trusted-rpc": PortSpec(args["zkevm_rpc_http_port"], application_protocol="http"), - "trusted-ws": PortSpec(args["zkevm_rpc_ws_port"], application_protocol="ws"), - "pprof": PortSpec(args["zkevm_pprof_port"], application_protocol="http"), - "prometheus": PortSpec(args["zkevm_prometheus_port"], application_protocol="http"), - }, - files = { + name="zkevm-node-rpc" + args["deployment_idx"], + config=ServiceConfig( + image=args["zkevm_node_image"], + ports={ + "trusted-rpc": PortSpec( + args["zkevm_rpc_http_port"], application_protocol="http" + ), + "trusted-ws": PortSpec( + args["zkevm_rpc_ws_port"], application_protocol="ws" + ), + "pprof": PortSpec( + args["zkevm_pprof_port"], application_protocol="http" + ), + "prometheus": PortSpec( + args["zkevm_prometheus_port"], application_protocol="http" + ), + }, + files={ "/etc/": zkevm_configs, }, - entrypoint = [ + entrypoint=[ "/app/zkevm-node", ], - cmd = [ + cmd=[ "run", - "--cfg", "/etc/zkevm/node-config.toml", - "--network", "custom", - "--custom-network-file", "/etc/zkevm/genesis.json", - "--components", "rpc", - "--http.api", "eth,net,debug,zkevm,txpool,web3", + "--cfg", + "/etc/zkevm/node-config.toml", + "--network", + "custom", + "--custom-network-file", + "/etc/zkevm/genesis.json", + "--components", + "rpc", + "--http.api", + "eth,net,debug,zkevm,txpool,web3", ], ), ) plan.add_service( - name = "zkevm-node-eth-tx-manager"+args["deployment_idx"], - config = ServiceConfig( - image = args["zkevm_node_image"], - files = { + name="zkevm-node-eth-tx-manager" + args["deployment_idx"], + config=ServiceConfig( + image=args["zkevm_node_image"], + files={ "/etc/": zkevm_configs, }, - ports = { - "pprof": PortSpec(args["zkevm_pprof_port"], application_protocol="http"), - "prometheus": PortSpec(args["zkevm_prometheus_port"], application_protocol="http"), + ports={ + "pprof": PortSpec( + args["zkevm_pprof_port"], application_protocol="http" + ), + "prometheus": PortSpec( + args["zkevm_prometheus_port"], application_protocol="http" + ), }, - entrypoint = [ + entrypoint=[ "/app/zkevm-node", ], - cmd = [ + cmd=[ "run", - "--cfg", "/etc/zkevm/node-config.toml", - "--network", "custom", - "--custom-network-file", "/etc/zkevm/genesis.json", - "--components", "eth-tx-manager", + "--cfg", + "/etc/zkevm/node-config.toml", + "--network", + "custom", + "--custom-network-file", + "/etc/zkevm/genesis.json", + "--components", + "eth-tx-manager", ], ), ) plan.add_service( - name = "zkevm-node-l2-gas-pricer"+args["deployment_idx"], - config = ServiceConfig( - image = args["zkevm_node_image"], - files = { + name="zkevm-node-l2-gas-pricer" + args["deployment_idx"], + config=ServiceConfig( + image=args["zkevm_node_image"], + files={ "/etc/": zkevm_configs, }, - ports = { - "pprof": PortSpec(args["zkevm_pprof_port"], application_protocol="http"), - "prometheus": PortSpec(args["zkevm_prometheus_port"], application_protocol="http"), + ports={ + "pprof": PortSpec( + args["zkevm_pprof_port"], application_protocol="http" + ), + "prometheus": PortSpec( + args["zkevm_prometheus_port"], application_protocol="http" + ), }, - entrypoint = [ + entrypoint=[ "/app/zkevm-node", ], - cmd = [ + cmd=[ "run", - "--cfg", "/etc/zkevm/node-config.toml", - "--network", "custom", - "--custom-network-file", "/etc/zkevm/genesis.json", - "--components", "l2gaspricer", + "--cfg", + "/etc/zkevm/node-config.toml", + "--network", + "custom", + "--custom-network-file", + "/etc/zkevm/genesis.json", + "--components", + "l2gaspricer", ], ), ) plan.add_service( - name = "zkevm-bridge-service"+args["deployment_idx"], - config = ServiceConfig( - image = "hermeznetwork/zkevm-bridge-service:v0.4.2", - ports = { - "bridge-rpc": PortSpec(args["zkevm_bridge_rpc_port"], application_protocol="http"), - "bridge-grpc": PortSpec(args["zkevm_bridge_grpc_port"], application_protocol="grpc"), - }, - files = { + name="zkevm-bridge-service" + args["deployment_idx"], + config=ServiceConfig( + image="hermeznetwork/zkevm-bridge-service:v0.4.2", + ports={ + "bridge-rpc": PortSpec( + args["zkevm_bridge_rpc_port"], application_protocol="http" + ), + "bridge-grpc": PortSpec( + args["zkevm_bridge_grpc_port"], application_protocol="grpc" + ), + }, + files={ "/etc/": zkevm_configs, }, - entrypoint = [ + entrypoint=[ "/app/zkevm-bridge", ], - cmd = [ - "run", - "--cfg", "/etc/zkevm/bridge-config.toml" - ], + cmd=["run", "--cfg", "/etc/zkevm/bridge-config.toml"], ), ) + def add_databases(plan, args): prover_db_init_artifact = plan.upload_files( - src = "./templates/prover-db-init.sql", - name = "prover-db-init.sql" + src="./templates/prover-db-init.sql", name="prover-db-init.sql" ) prover_db = plan.add_service( - name = args["zkevm_db_prover_hostname"]+args["deployment_idx"], - config = ServiceConfig( - image = POSTGRES_IMAGE, - ports = { - POSTGRES_PORT_ID: PortSpec(args["zkevm_db_postgres_port"], application_protocol = "postgresql"), + name=args["zkevm_db_prover_hostname"] + args["deployment_idx"], + config=ServiceConfig( + image=POSTGRES_IMAGE, + ports={ + POSTGRES_PORT_ID: PortSpec( + args["zkevm_db_postgres_port"], application_protocol="postgresql" + ), }, - env_vars = { + env_vars={ "POSTGRES_DB": args["zkevm_db_prover_name"], "POSTGRES_USER": args["zkevm_db_prover_user"], "POSTGRES_PASSWORD": args["zkevm_db_prover_password"], }, - files = { + files={ "/docker-entrypoint-initdb.d/": prover_db_init_artifact, }, ), ) pool_db = plan.add_service( - name = args["zkevm_db_pool_hostname"]+args["deployment_idx"], - config = ServiceConfig( - image = POSTGRES_IMAGE, - ports = { - POSTGRES_PORT_ID: PortSpec(args["zkevm_db_postgres_port"], application_protocol = "postgresql"), + name=args["zkevm_db_pool_hostname"] + args["deployment_idx"], + config=ServiceConfig( + image=POSTGRES_IMAGE, + ports={ + POSTGRES_PORT_ID: PortSpec( + args["zkevm_db_postgres_port"], application_protocol="postgresql" + ), }, - env_vars = { + env_vars={ "POSTGRES_DB": args["zkevm_db_pool_name"], "POSTGRES_USER": args["zkevm_db_pool_user"], "POSTGRES_PASSWORD": args["zkevm_db_pool_password"], @@ -417,38 +491,40 @@ def add_databases(plan, args): ), ) - event_db_init_artifact = plan.upload_files( - src = "./templates/event-db-init.sql", - name = "event-db-init.sql" + src="./templates/event-db-init.sql", name="event-db-init.sql" ) event_db = plan.add_service( - name = args["zkevm_db_event_hostname"]+args["deployment_idx"], - config = ServiceConfig( - image = POSTGRES_IMAGE, - ports = { - POSTGRES_PORT_ID: PortSpec(args["zkevm_db_postgres_port"], application_protocol = "postgresql"), + name=args["zkevm_db_event_hostname"] + args["deployment_idx"], + config=ServiceConfig( + image=POSTGRES_IMAGE, + ports={ + POSTGRES_PORT_ID: PortSpec( + args["zkevm_db_postgres_port"], application_protocol="postgresql" + ), }, - env_vars = { + env_vars={ "POSTGRES_DB": args["zkevm_db_event_name"], "POSTGRES_USER": args["zkevm_db_event_user"], "POSTGRES_PASSWORD": args["zkevm_db_event_password"], }, - files = { + files={ "/docker-entrypoint-initdb.d/": event_db_init_artifact, }, ), ) state_db = plan.add_service( - name = args["zkevm_db_state_hostname"]+args["deployment_idx"], - config = ServiceConfig( - image = POSTGRES_IMAGE, - ports = { - POSTGRES_PORT_ID: PortSpec(args["zkevm_db_postgres_port"], application_protocol = "postgresql"), + name=args["zkevm_db_state_hostname"] + args["deployment_idx"], + config=ServiceConfig( + image=POSTGRES_IMAGE, + ports={ + POSTGRES_PORT_ID: PortSpec( + args["zkevm_db_postgres_port"], application_protocol="postgresql" + ), }, - env_vars = { + env_vars={ "POSTGRES_DB": args["zkevm_db_state_name"], "POSTGRES_USER": args["zkevm_db_state_user"], "POSTGRES_PASSWORD": args["zkevm_db_state_password"], @@ -456,17 +532,18 @@ def add_databases(plan, args): ), ) bridge_db = plan.add_service( - name = args["zkevm_db_bridge_hostname"]+args["deployment_idx"], - config = ServiceConfig( - image = POSTGRES_IMAGE, - ports = { - POSTGRES_PORT_ID: PortSpec(args["zkevm_db_postgres_port"], application_protocol = "postgresql"), + name=args["zkevm_db_bridge_hostname"] + args["deployment_idx"], + config=ServiceConfig( + image=POSTGRES_IMAGE, + ports={ + POSTGRES_PORT_ID: PortSpec( + args["zkevm_db_postgres_port"], application_protocol="postgresql" + ), }, - env_vars = { + env_vars={ "POSTGRES_DB": args["zkevm_db_bridge_name"], "POSTGRES_USER": args["zkevm_db_bridge_user"], "POSTGRES_PASSWORD": args["zkevm_db_bridge_password"], }, ), ) - diff --git a/templates/run-contract-setup.sh b/templates/run-contract-setup.sh index 22c66e0f..71ca9340 100755 --- a/templates/run-contract-setup.sh +++ b/templates/run-contract-setup.sh @@ -13,6 +13,7 @@ fi apt update apt-get -y install socat jq yq curl -s -L https://foundry.paradigm.xyz | bash +# shellcheck disable=SC1091 source /root/.bashrc foundryup &> /dev/null @@ -32,14 +33,14 @@ popd 2>&1 echo "Funding important accounts on l1" # FIXME this look might never finish.. Add a counter -until cast send --rpc-url {{.l1_rpc_url}} --mnemonic "{{.l1_preallocated_mnemonic}}" --value 0 {{.zkevm_l2_sequencer_address}}; do +until cast send --rpc-url "{{.l1_rpc_url}}" --mnemonic "{{.l1_preallocated_mnemonic}}" --value 0 "{{.zkevm_l2_sequencer_address}}"; do 2>&1 echo "l1 rpc might nto be ready" sleep 5 done -cast send --rpc-url {{.l1_rpc_url}} --mnemonic "{{.l1_preallocated_mnemonic}}" --value 100ether {{.zkevm_l2_sequencer_address}} -cast send --rpc-url {{.l1_rpc_url}} --mnemonic "{{.l1_preallocated_mnemonic}}" --value 100ether {{.zkevm_l2_aggregator_address}} -cast send --rpc-url {{.l1_rpc_url}} --mnemonic "{{.l1_preallocated_mnemonic}}" --value 100ether {{.zkevm_l2_admin_address}} +cast send --rpc-url "{{.l1_rpc_url}}" --mnemonic "{{.l1_preallocated_mnemonic}}" --value 100ether "{{.zkevm_l2_sequencer_address}}" +cast send --rpc-url "{{.l1_rpc_url}}" --mnemonic "{{.l1_preallocated_mnemonic}}" --value 100ether "{{.zkevm_l2_aggregator_address}}" +cast send --rpc-url "{{.l1_rpc_url}}" --mnemonic "{{.l1_preallocated_mnemonic}}" --value 100ether "{{.zkevm_l2_admin_address}}" cp /opt/contract-deploy/deploy_parameters.json /opt/zkevm-contracts/deployment/v2/deploy_parameters.json @@ -90,26 +91,32 @@ jq --slurpfile c combined.json '.L1Config.polTokenAddress = $c[0].polTokenAddres jq --slurpfile c combined.json '.L1Config.polygonZkEVMAddress = $c[0].rollupAddress' genesis.json > g.json; mv g.json genesis.json # note this particular setting is different for the bridge service!! +# shellcheck disable=SC2016 tomlq --slurpfile c combined.json -t '.NetworkConfig.GenBlockNumber = $c[0].deploymentRollupManagerBlockNumber' bridge-config.toml > b.json; mv b.json bridge-config.toml +# shellcheck disable=SC2016 tomlq --slurpfile c combined.json -t '.NetworkConfig.PolygonBridgeAddress = $c[0].polygonZkEVMBridgeAddress' bridge-config.toml > b.json; mv b.json bridge-config.toml +# shellcheck disable=SC2016 tomlq --slurpfile c combined.json -t '.NetworkConfig.PolygonZkEVMGlobalExitRootAddress = $c[0].polygonZkEVMGlobalExitRootAddress' bridge-config.toml > b.json; mv b.json bridge-config.toml +# shellcheck disable=SC2016 tomlq --slurpfile c combined.json -t '.NetworkConfig.PolygonRollupManagerAddress = $c[0].polygonRollupManagerAddress' bridge-config.toml > b.json; mv b.json bridge-config.toml +# shellcheck disable=SC2016 tomlq --slurpfile c combined.json -t '.NetworkConfig.PolygonZkEVMAddress = $c[0].rollupAddress' bridge-config.toml > b.json; mv b.json bridge-config.toml +# shellcheck disable=SC2016 tomlq --slurpfile c combined.json -t '.NetworkConfig.L2PolygonBridgeAddresses = [$c[0].polygonZkEVMBridgeAddress]' bridge-config.toml > b.json; mv b.json bridge-config.toml -cast send --private-key {{.zkevm_l2_sequencer_private_key}} --legacy --rpc-url {{.l1_rpc_url}} "$(jq -r '.polTokenAddress' combined.json)" 'approve(address,uint256)(bool)' "$(jq -r '.rollupAddress' combined.json)" 1000000000000000000000000000 &> approval.out +cast send --private-key "{{.zkevm_l2_sequencer_private_key}}" --legacy --rpc-url "{{.l1_rpc_url}}" "$(jq -r '.polTokenAddress' combined.json)" "approve(address,uint256)(bool)" "$(jq -r '.rollupAddress' combined.json)" 1000000000000000000000000000 &> approval.out -polycli parseethwallet --hexkey {{.zkevm_l2_sequencer_private_key}} --password {{.zkevm_l2_keystore_password}} --keystore tmp.keys +polycli parseethwallet --hexkey "{{.zkevm_l2_sequencer_private_key}}" --password "{{.zkevm_l2_keystore_password}}" --keystore tmp.keys mv tmp.keys/UTC* sequencer.keystore chmod a+r sequencer.keystore rm -rf tmp.keys -polycli parseethwallet --hexkey {{.zkevm_l2_aggregator_private_key}} --password {{.zkevm_l2_keystore_password}} --keystore tmp.keys +polycli parseethwallet --hexkey "{{.zkevm_l2_aggregator_private_key}}" --password "{{.zkevm_l2_keystore_password}}" --keystore tmp.keys mv tmp.keys/UTC* aggregator.keystore chmod a+r aggregator.keystore rm -rf tmp.keys -polycli parseethwallet --hexkey {{.zkevm_l2_claimtxmanager_private_key}} --password {{.zkevm_l2_keystore_password}} --keystore tmp.keys +polycli parseethwallet --hexkey "{{.zkevm_l2_claimtxmanager_private_key}}" --password "{{.zkevm_l2_keystore_password}}" --keystore tmp.keys mv tmp.keys/UTC* claimtxmanager.keystore chmod a+r claimtxmanager.keystore rm -rf tmp.keys