Compare commits
6 Commits
main
...
jules_wip_
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d03c00f9a9 | ||
|
|
f9ea6d118c | ||
|
|
318c4d26f5 | ||
|
|
ef273ee331 | ||
|
|
e4f6ca8ee7 | ||
|
|
7cf79509e3 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -1,2 +1 @@
|
||||
.env
|
||||
peer-backups/
|
||||
|
||||
1
abstract-mainnet.yml
Symbolic link
1
abstract-mainnet.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
abstract/external-node/abstract-mainnet-external-node-pruned.yml
|
||||
1
abstract-testnet.yml
Symbolic link
1
abstract-testnet.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
abstract/external-node/abstract-testnet-external-node-pruned.yml
|
||||
@@ -29,8 +29,8 @@ x-logging-defaults: &logging-defaults
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
abstract-mainnet-archive:
|
||||
image: ${ABSTRACT_EXTERNAL_NODE_IMAGE:-matterlabs/external-node}:${ABSTRACT_MAINNET_EXTERNAL_NODE_VERSION:-v29.7.0}
|
||||
abstract-mainnet-archive-client:
|
||||
image: ${ABSTRACT_EXTERNAL_NODE_IMAGE:-matterlabs/external-node}:${ABSTRACT_MAINNET_EXTERNAL_NODE_VERSION:-v27.5.7}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -61,7 +61,7 @@ services:
|
||||
- EN_L1_CHAIN_ID=1
|
||||
- EN_L2_CHAIN_ID=2741
|
||||
- EN_MAIN_NODE_URL=https://api.mainnet.abs.xyz
|
||||
- EN_MAX_RESPONSE_BODY_SIZE_MB=25
|
||||
- EN_MAX_RESPONSE_BODY_SIZE_MB=30
|
||||
- EN_MAX_RESPONSE_BODY_SIZE_OVERRIDES_MB=eth_getLogs=100,eth_getBlockReceipts=None
|
||||
- EN_MERKLE_TREE_PATH=./db/ext-node/lightweight
|
||||
- EN_PROMETHEUS_PORT=3322
|
||||
@@ -83,24 +83,35 @@ services:
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
|
||||
abstract-mainnet-archive:
|
||||
image: nginx
|
||||
expose:
|
||||
- '80'
|
||||
environment:
|
||||
PROXY_HOST: abstract-mainnet-archive-client
|
||||
RPC_PATH: ''
|
||||
RPC_PORT: 8545
|
||||
WS_PATH: ''
|
||||
WS_PORT: 8546
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- abstract-mainnet-archive-client
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ./nginx-proxy:/etc/nginx/templates
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.abstract-mainnet-external-node-archive-stripprefix.stripprefix.prefixes=/abstract-mainnet-archive
|
||||
- traefik.http.services.abstract-mainnet-external-node-archive.loadbalancer.server.port=8545
|
||||
- traefik.http.services.abstract-mainnet-external-node-archive.loadbalancer.server.port=80
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-archive.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-archive.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-archive.rule=Host(`$DOMAIN`) && (Path(`/abstract-mainnet-archive`) || Path(`/abstract-mainnet-archive/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.abstract-mainnet-external-node-archive.rule=Path(`/abstract-mainnet-archive`) || Path(`/abstract-mainnet-archive/`)}
|
||||
- traefik.http.routers.abstract-mainnet-external-node-archive.middlewares=abstract-mainnet-external-node-archive-stripprefix, ipallowlist
|
||||
- traefik.http.routers.abstract-mainnet-external-node-archive.priority=50 # gets any request that is not GET with UPGRADE header
|
||||
- traefik.http.routers.abstract-mainnet-external-node-archive-ws.priority=100 # answers GET requests first
|
||||
- traefik.http.services.abstract-mainnet-external-node-archive-ws.loadbalancer.server.port=8546
|
||||
- traefik.http.routers.abstract-mainnet-external-node-archive-ws.service=abstract-mainnet-external-node-archive-ws
|
||||
- traefik.http.routers.abstract-mainnet-external-node-archive.service=abstract-mainnet-external-node-archive
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-archive-ws.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-archive-ws.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-archive-ws.rule=Host(`$DOMAIN`) && (Path(`/abstract-mainnet-archive`) || Path(`/abstract-mainnet-archive/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- ${NO_SSL:+traefik.http.routers.abstract-mainnet-external-node-archive-ws.rule=(Path(`/abstract-mainnet-archive`) || Path(`/abstract-mainnet-archive/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- traefik.http.routers.abstract-mainnet-external-node-archive-ws.middlewares=abstract-mainnet-external-node-archive-stripprefix, ipallowlist
|
||||
|
||||
abstract-mainnet-archive-db:
|
||||
image: postgres:14
|
||||
|
||||
@@ -29,8 +29,8 @@ x-logging-defaults: &logging-defaults
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
abstract-mainnet:
|
||||
image: ${ABSTRACT_EXTERNAL_NODE_IMAGE:-matterlabs/external-node}:${ABSTRACT_MAINNET_EXTERNAL_NODE_VERSION:-v29.7.0}
|
||||
abstract-mainnet-client:
|
||||
image: ${ABSTRACT_EXTERNAL_NODE_IMAGE:-matterlabs/external-node}:${ABSTRACT_MAINNET_EXTERNAL_NODE_VERSION:-v27.5.7}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -61,7 +61,7 @@ services:
|
||||
- EN_L1_CHAIN_ID=1
|
||||
- EN_L2_CHAIN_ID=2741
|
||||
- EN_MAIN_NODE_URL=https://api.mainnet.abs.xyz
|
||||
- EN_MAX_RESPONSE_BODY_SIZE_MB=25
|
||||
- EN_MAX_RESPONSE_BODY_SIZE_MB=30
|
||||
- EN_MAX_RESPONSE_BODY_SIZE_OVERRIDES_MB=eth_getLogs=100,eth_getBlockReceipts=None
|
||||
- EN_MERKLE_TREE_PATH=./db/ext-node/lightweight
|
||||
- EN_PROMETHEUS_PORT=3322
|
||||
@@ -83,24 +83,35 @@ services:
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
|
||||
abstract-mainnet:
|
||||
image: nginx
|
||||
expose:
|
||||
- '80'
|
||||
environment:
|
||||
PROXY_HOST: abstract-mainnet-client
|
||||
RPC_PATH: ''
|
||||
RPC_PORT: 8545
|
||||
WS_PATH: ''
|
||||
WS_PORT: 8546
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- abstract-mainnet-client
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ./nginx-proxy:/etc/nginx/templates
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.abstract-mainnet-external-node-pruned-stripprefix.stripprefix.prefixes=/abstract-mainnet
|
||||
- traefik.http.services.abstract-mainnet-external-node-pruned.loadbalancer.server.port=8545
|
||||
- traefik.http.services.abstract-mainnet-external-node-pruned.loadbalancer.server.port=80
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-pruned.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-pruned.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-pruned.rule=Host(`$DOMAIN`) && (Path(`/abstract-mainnet`) || Path(`/abstract-mainnet/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.abstract-mainnet-external-node-pruned.rule=Path(`/abstract-mainnet`) || Path(`/abstract-mainnet/`)}
|
||||
- traefik.http.routers.abstract-mainnet-external-node-pruned.middlewares=abstract-mainnet-external-node-pruned-stripprefix, ipallowlist
|
||||
- traefik.http.routers.abstract-mainnet-external-node-pruned.priority=50 # gets any request that is not GET with UPGRADE header
|
||||
- traefik.http.routers.abstract-mainnet-external-node-pruned-ws.priority=100 # answers GET requests first
|
||||
- traefik.http.services.abstract-mainnet-external-node-pruned-ws.loadbalancer.server.port=8546
|
||||
- traefik.http.routers.abstract-mainnet-external-node-pruned-ws.service=abstract-mainnet-external-node-pruned-ws
|
||||
- traefik.http.routers.abstract-mainnet-external-node-pruned.service=abstract-mainnet-external-node-pruned
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-pruned-ws.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-pruned-ws.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-pruned-ws.rule=Host(`$DOMAIN`) && (Path(`/abstract-mainnet`) || Path(`/abstract-mainnet/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- ${NO_SSL:+traefik.http.routers.abstract-mainnet-external-node-pruned-ws.rule=(Path(`/abstract-mainnet`) || Path(`/abstract-mainnet/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- traefik.http.routers.abstract-mainnet-external-node-pruned-ws.middlewares=abstract-mainnet-external-node-pruned-stripprefix, ipallowlist
|
||||
|
||||
abstract-mainnet-db:
|
||||
image: postgres:14
|
||||
|
||||
@@ -29,8 +29,8 @@ x-logging-defaults: &logging-defaults
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
abstract-testnet-archive:
|
||||
image: ${ABSTRACT_EXTERNAL_NODE_IMAGE:-matterlabs/external-node}:${ABSTRACT_TESTNET_EXTERNAL_NODE_VERSION:-v29.7.0}
|
||||
abstract-testnet-archive-client:
|
||||
image: ${ABSTRACT_EXTERNAL_NODE_IMAGE:-matterlabs/external-node}:${ABSTRACT_TESTNET_EXTERNAL_NODE_VERSION:-v28.2.1}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -61,7 +61,7 @@ services:
|
||||
- EN_L1_CHAIN_ID=11155111
|
||||
- EN_L2_CHAIN_ID=11124
|
||||
- EN_MAIN_NODE_URL=https://api.testnet.abs.xyz
|
||||
- EN_MAX_RESPONSE_BODY_SIZE_MB=25
|
||||
- EN_MAX_RESPONSE_BODY_SIZE_MB=30
|
||||
- EN_MAX_RESPONSE_BODY_SIZE_OVERRIDES_MB=eth_getLogs=100,eth_getBlockReceipts=None
|
||||
- EN_MERKLE_TREE_PATH=./db/ext-node/lightweight
|
||||
- EN_PROMETHEUS_PORT=3322
|
||||
@@ -83,24 +83,35 @@ services:
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
|
||||
abstract-testnet-archive:
|
||||
image: nginx
|
||||
expose:
|
||||
- '80'
|
||||
environment:
|
||||
PROXY_HOST: abstract-testnet-archive-client
|
||||
RPC_PATH: ''
|
||||
RPC_PORT: 8545
|
||||
WS_PATH: ''
|
||||
WS_PORT: 8546
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- abstract-testnet-archive-client
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ./nginx-proxy:/etc/nginx/templates
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.abstract-testnet-external-node-archive-stripprefix.stripprefix.prefixes=/abstract-testnet-archive
|
||||
- traefik.http.services.abstract-testnet-external-node-archive.loadbalancer.server.port=8545
|
||||
- traefik.http.services.abstract-testnet-external-node-archive.loadbalancer.server.port=80
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-archive.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-archive.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-archive.rule=Host(`$DOMAIN`) && (Path(`/abstract-testnet-archive`) || Path(`/abstract-testnet-archive/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.abstract-testnet-external-node-archive.rule=Path(`/abstract-testnet-archive`) || Path(`/abstract-testnet-archive/`)}
|
||||
- traefik.http.routers.abstract-testnet-external-node-archive.middlewares=abstract-testnet-external-node-archive-stripprefix, ipallowlist
|
||||
- traefik.http.routers.abstract-testnet-external-node-archive.priority=50 # gets any request that is not GET with UPGRADE header
|
||||
- traefik.http.routers.abstract-testnet-external-node-archive-ws.priority=100 # answers GET requests first
|
||||
- traefik.http.services.abstract-testnet-external-node-archive-ws.loadbalancer.server.port=8546
|
||||
- traefik.http.routers.abstract-testnet-external-node-archive-ws.service=abstract-testnet-external-node-archive-ws
|
||||
- traefik.http.routers.abstract-testnet-external-node-archive.service=abstract-testnet-external-node-archive
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-archive-ws.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-archive-ws.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-archive-ws.rule=Host(`$DOMAIN`) && (Path(`/abstract-testnet-archive`) || Path(`/abstract-testnet-archive/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- ${NO_SSL:+traefik.http.routers.abstract-testnet-external-node-archive-ws.rule=(Path(`/abstract-testnet-archive`) || Path(`/abstract-testnet-archive/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- traefik.http.routers.abstract-testnet-external-node-archive-ws.middlewares=abstract-testnet-external-node-archive-stripprefix, ipallowlist
|
||||
|
||||
abstract-testnet-archive-db:
|
||||
image: postgres:14
|
||||
|
||||
@@ -29,8 +29,8 @@ x-logging-defaults: &logging-defaults
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
abstract-testnet:
|
||||
image: ${ABSTRACT_EXTERNAL_NODE_IMAGE:-matterlabs/external-node}:${ABSTRACT_TESTNET_EXTERNAL_NODE_VERSION:-v29.7.0}
|
||||
abstract-testnet-client:
|
||||
image: ${ABSTRACT_EXTERNAL_NODE_IMAGE:-matterlabs/external-node}:${ABSTRACT_TESTNET_EXTERNAL_NODE_VERSION:-v28.2.1}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -61,7 +61,7 @@ services:
|
||||
- EN_L1_CHAIN_ID=11155111
|
||||
- EN_L2_CHAIN_ID=11124
|
||||
- EN_MAIN_NODE_URL=https://api.testnet.abs.xyz
|
||||
- EN_MAX_RESPONSE_BODY_SIZE_MB=25
|
||||
- EN_MAX_RESPONSE_BODY_SIZE_MB=30
|
||||
- EN_MAX_RESPONSE_BODY_SIZE_OVERRIDES_MB=eth_getLogs=100,eth_getBlockReceipts=None
|
||||
- EN_MERKLE_TREE_PATH=./db/ext-node/lightweight
|
||||
- EN_PROMETHEUS_PORT=3322
|
||||
@@ -83,24 +83,35 @@ services:
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
|
||||
abstract-testnet:
|
||||
image: nginx
|
||||
expose:
|
||||
- '80'
|
||||
environment:
|
||||
PROXY_HOST: abstract-testnet-client
|
||||
RPC_PATH: ''
|
||||
RPC_PORT: 8545
|
||||
WS_PATH: ''
|
||||
WS_PORT: 8546
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- abstract-testnet-client
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ./nginx-proxy:/etc/nginx/templates
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.abstract-testnet-external-node-pruned-stripprefix.stripprefix.prefixes=/abstract-testnet
|
||||
- traefik.http.services.abstract-testnet-external-node-pruned.loadbalancer.server.port=8545
|
||||
- traefik.http.services.abstract-testnet-external-node-pruned.loadbalancer.server.port=80
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-pruned.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-pruned.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-pruned.rule=Host(`$DOMAIN`) && (Path(`/abstract-testnet`) || Path(`/abstract-testnet/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.abstract-testnet-external-node-pruned.rule=Path(`/abstract-testnet`) || Path(`/abstract-testnet/`)}
|
||||
- traefik.http.routers.abstract-testnet-external-node-pruned.middlewares=abstract-testnet-external-node-pruned-stripprefix, ipallowlist
|
||||
- traefik.http.routers.abstract-testnet-external-node-pruned.priority=50 # gets any request that is not GET with UPGRADE header
|
||||
- traefik.http.routers.abstract-testnet-external-node-pruned-ws.priority=100 # answers GET requests first
|
||||
- traefik.http.services.abstract-testnet-external-node-pruned-ws.loadbalancer.server.port=8546
|
||||
- traefik.http.routers.abstract-testnet-external-node-pruned-ws.service=abstract-testnet-external-node-pruned-ws
|
||||
- traefik.http.routers.abstract-testnet-external-node-pruned.service=abstract-testnet-external-node-pruned
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-pruned-ws.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-pruned-ws.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-pruned-ws.rule=Host(`$DOMAIN`) && (Path(`/abstract-testnet`) || Path(`/abstract-testnet/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- ${NO_SSL:+traefik.http.routers.abstract-testnet-external-node-pruned-ws.rule=(Path(`/abstract-testnet`) || Path(`/abstract-testnet/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- traefik.http.routers.abstract-testnet-external-node-pruned-ws.middlewares=abstract-testnet-external-node-pruned-stripprefix, ipallowlist
|
||||
|
||||
abstract-testnet-db:
|
||||
image: postgres:14
|
||||
|
||||
1
alephzero-mainnet-archive.yml
Symbolic link
1
alephzero-mainnet-archive.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
arb/nitro/alephzero-mainnet-nitro-archive-pebble-hash.yml
|
||||
1
alephzero-mainnet.yml
Symbolic link
1
alephzero-mainnet.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
arb/nitro/alephzero-mainnet-nitro-pruned-pebble-path.yml
|
||||
1
alephzero-sepolia-archive.yml
Symbolic link
1
alephzero-sepolia-archive.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
arb/nitro/alephzero-sepolia-nitro-archive-leveldb-hash.yml
|
||||
1
alephzero-sepolia.yml
Symbolic link
1
alephzero-sepolia.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
arb/nitro/alephzero-sepolia-nitro-pruned-pebble-path.yml
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
arbitrum-one-arbnode-archive:
|
||||
image: ${ARBITRUM_ARBNODE_IMAGE:-offchainlabs/arb-node}:${ARBITRUM_ONE_ARBNODE_VERSION:-v1.4.6-551a39b3}
|
||||
image: ${ARBITRUM_ARBNODE_IMAGE:-offchainlabs/arb-node}:${ARBITRUM_ONE_ARBNODE_VERSION:-v1.4.5-e97c1a4}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
|
||||
@@ -1,166 +0,0 @@
|
||||
---
|
||||
x-logging-defaults: &logging-defaults
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
# Usage:
|
||||
#
|
||||
# mkdir rpc && cd rpc
|
||||
#
|
||||
# git init
|
||||
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
|
||||
# git fetch origin vibe
|
||||
# git checkout origin/vibe
|
||||
#
|
||||
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
|
||||
#
|
||||
# env
|
||||
# ...
|
||||
# IP=$(curl ipinfo.io/ip)
|
||||
# DOMAIN=${IP}.traefik.me
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro-erigon/arbitrum-sepolia-nitro-erigon-archive-trace.yml
|
||||
#
|
||||
# docker compose up -d
|
||||
#
|
||||
# curl -X POST https://${IP}.traefik.me/arbitrum-sepolia-nitro-erigon-archive \
|
||||
# -H "Content-Type: application/json" \
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
arbitrum-sepolia-nitro-erigon-archive:
|
||||
image: ${ARBITRUM_NITRO_ERIGON_IMAGE:-erigontech/nitro-erigon}:${ARBITRUM_SEPOLIA_NITRO_ERIGON_VERSION:-main-1a9771c}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
|
||||
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
|
||||
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
|
||||
net.core.somaxconn: 32768 # Higher connection queue
|
||||
# Memory/Connection Management
|
||||
# net.core.netdev_max_backlog: 50000 # Increase network buffer
|
||||
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
|
||||
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
|
||||
ulimits:
|
||||
nofile: 1048576 # Max open files (for RPC/WS connections)
|
||||
memlock: -1 # Disable memory locking limits (for in-memory DBs like MDBX)
|
||||
user: root
|
||||
ports:
|
||||
- 11387:11387
|
||||
- 11387:11387/udp
|
||||
- 31387:31387
|
||||
- 31387:31387/udp
|
||||
- 36387:36387
|
||||
- 36387:36387/udp
|
||||
expose:
|
||||
- 8545
|
||||
entrypoint: [erigon]
|
||||
command:
|
||||
- --chain=arb-sepolia
|
||||
- --datadir=/root/.local/share/erigon
|
||||
- --http
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,erigon,web3,net,debug,trace,txpool,admin,ots
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --l2rpc=${ARBITRUM_SEPOLIA_EXECUTION_RPC}
|
||||
- --maxpeers=50
|
||||
- --metrics
|
||||
- --metrics.addr=0.0.0.0
|
||||
- --metrics.port=6060
|
||||
- --nat=extip:${IP}
|
||||
- --p2p.allowed-ports=31387
|
||||
- --p2p.allowed-ports=36387
|
||||
- --port=11387
|
||||
- --prune.mode=archive
|
||||
- --rpc.evmtimeout=${ARBITRUM_SEPOLIA_NITRO_ERIGON_ARCHIVE_TRACE_EVMTIMEOUT:-5m0s}
|
||||
- --rpc.gascap=6000000000
|
||||
- --rpc.overlay.getlogstimeout=${ARBITRUM_SEPOLIA_NITRO_ERIGON_ARCHIVE_TRACE_GETLOGSTIMEOUT:-5m0s}
|
||||
- --rpc.overlay.replayblocktimeout=${ARBITRUM_SEPOLIA_NITRO_ERIGON_ARCHIVE_TRACE_REPLAYBLOCKTIMEOUT:-10s}
|
||||
- --rpc.returndata.limit=10000000
|
||||
- --sync.loop.block.limit=100000
|
||||
- --torrent.port=26387
|
||||
- --ws
|
||||
- --ws.port=8545
|
||||
restart: unless-stopped
|
||||
stop_grace_period: 5m
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ${ARBITRUM_SEPOLIA_NITRO_ERIGON_ARCHIVE_TRACE_DATA:-arbitrum-sepolia-nitro-erigon-archive-trace}:/root/.local/share/erigon
|
||||
- /slowdisk:/slowdisk
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=true
|
||||
- prometheus-scrape.port=6060
|
||||
- prometheus-scrape.path=/debug/metrics/prometheus
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.arbitrum-sepolia-nitro-erigon-archive-trace-stripprefix.stripprefix.prefixes=/arbitrum-sepolia-nitro-erigon-archive
|
||||
- traefik.http.services.arbitrum-sepolia-nitro-erigon-archive-trace.loadbalancer.server.port=8545
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-erigon-archive-trace.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-erigon-archive-trace.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-erigon-archive-trace.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-sepolia-nitro-erigon-archive`) || Path(`/arbitrum-sepolia-nitro-erigon-archive/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.arbitrum-sepolia-nitro-erigon-archive-trace.rule=Path(`/arbitrum-sepolia-nitro-erigon-archive`) || Path(`/arbitrum-sepolia-nitro-erigon-archive/`)}
|
||||
- traefik.http.routers.arbitrum-sepolia-nitro-erigon-archive-trace.middlewares=arbitrum-sepolia-nitro-erigon-archive-trace-stripprefix, ipallowlist
|
||||
shm_size: 2gb
|
||||
|
||||
volumes:
|
||||
arbitrum-sepolia-nitro-erigon-archive-trace:
|
||||
|
||||
x-upstreams:
|
||||
- id: $${ID}
|
||||
labels:
|
||||
provider: $${PROVIDER}
|
||||
connection:
|
||||
generic:
|
||||
rpc:
|
||||
url: $${RPC_URL}
|
||||
ws:
|
||||
frameSize: 20Mb
|
||||
msgSize: 50Mb
|
||||
url: $${WS_URL}
|
||||
chain: arbitrum-sepolia
|
||||
method-groups:
|
||||
enabled:
|
||||
- debug
|
||||
- filter
|
||||
- trace
|
||||
methods:
|
||||
disabled:
|
||||
enabled:
|
||||
- name: txpool_content # TODO: should be disabled for rollup nodes
|
||||
# standard geth and erigon
|
||||
- name: debug_accountRange
|
||||
- name: debug_getModifiedAccountsByNumber
|
||||
- name: debug_getModifiedAccountsByHash
|
||||
# non standard geth and erigon
|
||||
- name: eth_getRawTransactionByHash
|
||||
- name: eth_getRawTransactionByBlockHashAndIndex
|
||||
# non standard erigon only
|
||||
- name: eth_getBlockReceipts
|
||||
- name: eth_protocolVersion
|
||||
- name: eth_callMany
|
||||
- name: eth_callBundle
|
||||
- name: debug_accountAt
|
||||
- name: debug_traceCallMany
|
||||
- name: erigon_getHeaderByHash
|
||||
- name: erigon_getBlockReceiptsByBlockHash
|
||||
- name: erigon_getHeaderByNumber
|
||||
- name: erigon_getLogsByHash
|
||||
- name: erigon_forks
|
||||
- name: erigon_getBlockByTimestamp
|
||||
- name: erigon_BlockNumber
|
||||
- name: erigon_getLatestLogs
|
||||
- name: ots_getInternalOperations
|
||||
- name: ots_hasCode
|
||||
- name: ots_getTransactionError
|
||||
- name: ots_traceTransaction
|
||||
- name: ots_getBlockDetails
|
||||
- name: ots_getBlockDetailsByHash
|
||||
- name: ots_getBlockTransactions
|
||||
- name: ots_searchTransactionsBefore
|
||||
- name: ots_searchTransactionsAfter
|
||||
- name: ots_getTransactionBySenderAndNonce
|
||||
- name: ots_getContractCreator
|
||||
...
|
||||
@@ -1,167 +0,0 @@
|
||||
---
|
||||
x-logging-defaults: &logging-defaults
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
# Usage:
|
||||
#
|
||||
# mkdir rpc && cd rpc
|
||||
#
|
||||
# git init
|
||||
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
|
||||
# git fetch origin vibe
|
||||
# git checkout origin/vibe
|
||||
#
|
||||
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
|
||||
#
|
||||
# env
|
||||
# ...
|
||||
# IP=$(curl ipinfo.io/ip)
|
||||
# DOMAIN=${IP}.traefik.me
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro-erigon/arbitrum-sepolia-nitro-erigon-minimal-trace.yml
|
||||
#
|
||||
# docker compose up -d
|
||||
#
|
||||
# curl -X POST https://${IP}.traefik.me/arbitrum-sepolia-nitro-erigon-minimal \
|
||||
# -H "Content-Type: application/json" \
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
arbitrum-sepolia-nitro-erigon-minimal:
|
||||
image: ${ARBITRUM_NITRO_ERIGON_IMAGE:-erigontech/nitro-erigon}:${ARBITRUM_SEPOLIA_NITRO_ERIGON_VERSION:-main-1a9771c}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
|
||||
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
|
||||
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
|
||||
net.core.somaxconn: 32768 # Higher connection queue
|
||||
# Memory/Connection Management
|
||||
# net.core.netdev_max_backlog: 50000 # Increase network buffer
|
||||
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
|
||||
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
|
||||
ulimits:
|
||||
nofile: 1048576 # Max open files (for RPC/WS connections)
|
||||
memlock: -1 # Disable memory locking limits (for in-memory DBs like MDBX)
|
||||
user: root
|
||||
ports:
|
||||
- 12072:12072
|
||||
- 12072:12072/udp
|
||||
- 32072:32072
|
||||
- 32072:32072/udp
|
||||
- 37072:37072
|
||||
- 37072:37072/udp
|
||||
expose:
|
||||
- 8545
|
||||
entrypoint: [erigon]
|
||||
command:
|
||||
- --chain=arb-sepolia
|
||||
- --datadir=/root/.local/share/erigon
|
||||
- --http
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,erigon,web3,net,debug,trace,txpool,admin,ots
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --l2rpc=${ARBITRUM_SEPOLIA_EXECUTION_RPC}
|
||||
- --maxpeers=50
|
||||
- --metrics
|
||||
- --metrics.addr=0.0.0.0
|
||||
- --metrics.port=6060
|
||||
- --nat=extip:${IP}
|
||||
- --p2p.allowed-ports=32072
|
||||
- --p2p.allowed-ports=37072
|
||||
- --persist.receipts=false
|
||||
- --port=12072
|
||||
- --prune.mode=minimal
|
||||
- --rpc.evmtimeout=${ARBITRUM_SEPOLIA_NITRO_ERIGON_MINIMAL_TRACE_EVMTIMEOUT:-5m0s}
|
||||
- --rpc.gascap=6000000000
|
||||
- --rpc.overlay.getlogstimeout=${ARBITRUM_SEPOLIA_NITRO_ERIGON_MINIMAL_TRACE_GETLOGSTIMEOUT:-5m0s}
|
||||
- --rpc.overlay.replayblocktimeout=${ARBITRUM_SEPOLIA_NITRO_ERIGON_MINIMAL_TRACE_REPLAYBLOCKTIMEOUT:-10s}
|
||||
- --rpc.returndata.limit=10000000
|
||||
- --sync.loop.block.limit=100000
|
||||
- --torrent.port=27072
|
||||
- --ws
|
||||
- --ws.port=8545
|
||||
restart: unless-stopped
|
||||
stop_grace_period: 5m
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ${ARBITRUM_SEPOLIA_NITRO_ERIGON_MINIMAL_TRACE_DATA:-arbitrum-sepolia-nitro-erigon-minimal-trace}:/root/.local/share/erigon
|
||||
- /slowdisk:/slowdisk
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=true
|
||||
- prometheus-scrape.port=6060
|
||||
- prometheus-scrape.path=/debug/metrics/prometheus
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.arbitrum-sepolia-nitro-erigon-minimal-trace-stripprefix.stripprefix.prefixes=/arbitrum-sepolia-nitro-erigon-minimal
|
||||
- traefik.http.services.arbitrum-sepolia-nitro-erigon-minimal-trace.loadbalancer.server.port=8545
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-erigon-minimal-trace.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-erigon-minimal-trace.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-erigon-minimal-trace.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-sepolia-nitro-erigon-minimal`) || Path(`/arbitrum-sepolia-nitro-erigon-minimal/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.arbitrum-sepolia-nitro-erigon-minimal-trace.rule=Path(`/arbitrum-sepolia-nitro-erigon-minimal`) || Path(`/arbitrum-sepolia-nitro-erigon-minimal/`)}
|
||||
- traefik.http.routers.arbitrum-sepolia-nitro-erigon-minimal-trace.middlewares=arbitrum-sepolia-nitro-erigon-minimal-trace-stripprefix, ipallowlist
|
||||
shm_size: 2gb
|
||||
|
||||
volumes:
|
||||
arbitrum-sepolia-nitro-erigon-minimal-trace:
|
||||
|
||||
x-upstreams:
|
||||
- id: $${ID}
|
||||
labels:
|
||||
provider: $${PROVIDER}
|
||||
connection:
|
||||
generic:
|
||||
rpc:
|
||||
url: $${RPC_URL}
|
||||
ws:
|
||||
frameSize: 20Mb
|
||||
msgSize: 50Mb
|
||||
url: $${WS_URL}
|
||||
chain: arbitrum-sepolia
|
||||
method-groups:
|
||||
enabled:
|
||||
- debug
|
||||
- filter
|
||||
- trace
|
||||
methods:
|
||||
disabled:
|
||||
enabled:
|
||||
- name: txpool_content # TODO: should be disabled for rollup nodes
|
||||
# standard geth and erigon
|
||||
- name: debug_accountRange
|
||||
- name: debug_getModifiedAccountsByNumber
|
||||
- name: debug_getModifiedAccountsByHash
|
||||
# non standard geth and erigon
|
||||
- name: eth_getRawTransactionByHash
|
||||
- name: eth_getRawTransactionByBlockHashAndIndex
|
||||
# non standard erigon only
|
||||
- name: eth_getBlockReceipts
|
||||
- name: eth_protocolVersion
|
||||
- name: eth_callMany
|
||||
- name: eth_callBundle
|
||||
- name: debug_accountAt
|
||||
- name: debug_traceCallMany
|
||||
- name: erigon_getHeaderByHash
|
||||
- name: erigon_getBlockReceiptsByBlockHash
|
||||
- name: erigon_getHeaderByNumber
|
||||
- name: erigon_getLogsByHash
|
||||
- name: erigon_forks
|
||||
- name: erigon_getBlockByTimestamp
|
||||
- name: erigon_BlockNumber
|
||||
- name: erigon_getLatestLogs
|
||||
- name: ots_getInternalOperations
|
||||
- name: ots_hasCode
|
||||
- name: ots_getTransactionError
|
||||
- name: ots_traceTransaction
|
||||
- name: ots_getBlockDetails
|
||||
- name: ots_getBlockDetailsByHash
|
||||
- name: ots_getBlockTransactions
|
||||
- name: ots_searchTransactionsBefore
|
||||
- name: ots_searchTransactionsAfter
|
||||
- name: ots_getTransactionBySenderAndNonce
|
||||
- name: ots_getContractCreator
|
||||
...
|
||||
@@ -1,167 +0,0 @@
|
||||
---
|
||||
x-logging-defaults: &logging-defaults
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
# Usage:
|
||||
#
|
||||
# mkdir rpc && cd rpc
|
||||
#
|
||||
# git init
|
||||
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
|
||||
# git fetch origin vibe
|
||||
# git checkout origin/vibe
|
||||
#
|
||||
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
|
||||
#
|
||||
# env
|
||||
# ...
|
||||
# IP=$(curl ipinfo.io/ip)
|
||||
# DOMAIN=${IP}.traefik.me
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro-erigon/arbitrum-sepolia-nitro-erigon-pruned-trace.yml
|
||||
#
|
||||
# docker compose up -d
|
||||
#
|
||||
# curl -X POST https://${IP}.traefik.me/arbitrum-sepolia-nitro-erigon \
|
||||
# -H "Content-Type: application/json" \
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
arbitrum-sepolia-nitro-erigon:
|
||||
image: ${ARBITRUM_NITRO_ERIGON_IMAGE:-erigontech/nitro-erigon}:${ARBITRUM_SEPOLIA_NITRO_ERIGON_VERSION:-main-1a9771c}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
|
||||
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
|
||||
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
|
||||
net.core.somaxconn: 32768 # Higher connection queue
|
||||
# Memory/Connection Management
|
||||
# net.core.netdev_max_backlog: 50000 # Increase network buffer
|
||||
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
|
||||
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
|
||||
ulimits:
|
||||
nofile: 1048576 # Max open files (for RPC/WS connections)
|
||||
memlock: -1 # Disable memory locking limits (for in-memory DBs like MDBX)
|
||||
user: root
|
||||
ports:
|
||||
- 13369:13369
|
||||
- 13369:13369/udp
|
||||
- 33369:33369
|
||||
- 33369:33369/udp
|
||||
- 38369:38369
|
||||
- 38369:38369/udp
|
||||
expose:
|
||||
- 8545
|
||||
entrypoint: [erigon]
|
||||
command:
|
||||
- --chain=arb-sepolia
|
||||
- --datadir=/root/.local/share/erigon
|
||||
- --http
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,erigon,web3,net,debug,trace,txpool,admin,ots
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --l2rpc=${ARBITRUM_SEPOLIA_EXECUTION_RPC}
|
||||
- --maxpeers=50
|
||||
- --metrics
|
||||
- --metrics.addr=0.0.0.0
|
||||
- --metrics.port=6060
|
||||
- --nat=extip:${IP}
|
||||
- --p2p.allowed-ports=33369
|
||||
- --p2p.allowed-ports=38369
|
||||
- --persist.receipts=false
|
||||
- --port=13369
|
||||
- --prune.mode=full
|
||||
- --rpc.evmtimeout=${ARBITRUM_SEPOLIA_NITRO_ERIGON_PRUNED_TRACE_EVMTIMEOUT:-5m0s}
|
||||
- --rpc.gascap=6000000000
|
||||
- --rpc.overlay.getlogstimeout=${ARBITRUM_SEPOLIA_NITRO_ERIGON_PRUNED_TRACE_GETLOGSTIMEOUT:-5m0s}
|
||||
- --rpc.overlay.replayblocktimeout=${ARBITRUM_SEPOLIA_NITRO_ERIGON_PRUNED_TRACE_REPLAYBLOCKTIMEOUT:-10s}
|
||||
- --rpc.returndata.limit=10000000
|
||||
- --sync.loop.block.limit=100000
|
||||
- --torrent.port=28369
|
||||
- --ws
|
||||
- --ws.port=8545
|
||||
restart: unless-stopped
|
||||
stop_grace_period: 5m
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ${ARBITRUM_SEPOLIA_NITRO_ERIGON_PRUNED_TRACE_DATA:-arbitrum-sepolia-nitro-erigon-pruned-trace}:/root/.local/share/erigon
|
||||
- /slowdisk:/slowdisk
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=true
|
||||
- prometheus-scrape.port=6060
|
||||
- prometheus-scrape.path=/debug/metrics/prometheus
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.arbitrum-sepolia-nitro-erigon-pruned-trace-stripprefix.stripprefix.prefixes=/arbitrum-sepolia-nitro-erigon
|
||||
- traefik.http.services.arbitrum-sepolia-nitro-erigon-pruned-trace.loadbalancer.server.port=8545
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-erigon-pruned-trace.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-erigon-pruned-trace.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-erigon-pruned-trace.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-sepolia-nitro-erigon`) || Path(`/arbitrum-sepolia-nitro-erigon/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.arbitrum-sepolia-nitro-erigon-pruned-trace.rule=Path(`/arbitrum-sepolia-nitro-erigon`) || Path(`/arbitrum-sepolia-nitro-erigon/`)}
|
||||
- traefik.http.routers.arbitrum-sepolia-nitro-erigon-pruned-trace.middlewares=arbitrum-sepolia-nitro-erigon-pruned-trace-stripprefix, ipallowlist
|
||||
shm_size: 2gb
|
||||
|
||||
volumes:
|
||||
arbitrum-sepolia-nitro-erigon-pruned-trace:
|
||||
|
||||
x-upstreams:
|
||||
- id: $${ID}
|
||||
labels:
|
||||
provider: $${PROVIDER}
|
||||
connection:
|
||||
generic:
|
||||
rpc:
|
||||
url: $${RPC_URL}
|
||||
ws:
|
||||
frameSize: 20Mb
|
||||
msgSize: 50Mb
|
||||
url: $${WS_URL}
|
||||
chain: arbitrum-sepolia
|
||||
method-groups:
|
||||
enabled:
|
||||
- debug
|
||||
- filter
|
||||
- trace
|
||||
methods:
|
||||
disabled:
|
||||
enabled:
|
||||
- name: txpool_content # TODO: should be disabled for rollup nodes
|
||||
# standard geth and erigon
|
||||
- name: debug_accountRange
|
||||
- name: debug_getModifiedAccountsByNumber
|
||||
- name: debug_getModifiedAccountsByHash
|
||||
# non standard geth and erigon
|
||||
- name: eth_getRawTransactionByHash
|
||||
- name: eth_getRawTransactionByBlockHashAndIndex
|
||||
# non standard erigon only
|
||||
- name: eth_getBlockReceipts
|
||||
- name: eth_protocolVersion
|
||||
- name: eth_callMany
|
||||
- name: eth_callBundle
|
||||
- name: debug_accountAt
|
||||
- name: debug_traceCallMany
|
||||
- name: erigon_getHeaderByHash
|
||||
- name: erigon_getBlockReceiptsByBlockHash
|
||||
- name: erigon_getHeaderByNumber
|
||||
- name: erigon_getLogsByHash
|
||||
- name: erigon_forks
|
||||
- name: erigon_getBlockByTimestamp
|
||||
- name: erigon_BlockNumber
|
||||
- name: erigon_getLatestLogs
|
||||
- name: ots_getInternalOperations
|
||||
- name: ots_hasCode
|
||||
- name: ots_getTransactionError
|
||||
- name: ots_traceTransaction
|
||||
- name: ots_getBlockDetails
|
||||
- name: ots_getBlockDetailsByHash
|
||||
- name: ots_getBlockTransactions
|
||||
- name: ots_searchTransactionsBefore
|
||||
- name: ots_searchTransactionsAfter
|
||||
- name: ots_getTransactionBySenderAndNonce
|
||||
- name: ots_getContractCreator
|
||||
...
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
alephzero-mainnet-archive:
|
||||
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_MAINNET_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -50,24 +50,19 @@ services:
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=true
|
||||
- --execution.caching.database-cache=${ALEPHZERO_MAINNET_NITRO_ARCHIVE_PEBBLE_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${ALEPHZERO_MAINNET_NITRO_ARCHIVE_PEBBLE_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${ALEPHZERO_MAINNET_NITRO_ARCHIVE_PEBBLE_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${ALEPHZERO_MAINNET_NITRO_ARCHIVE_PEBBLE_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://rpc.alephzero.raas.gelato.cloud
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
alephzero-mainnet:
|
||||
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_MAINNET_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -49,25 +49,21 @@ services:
|
||||
- 8545
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=${ALEPHZERO_MAINNET_ARCHIVE_DB:-false}
|
||||
- --execution.caching.database-cache=${ALEPHZERO_MAINNET_NITRO_PRUNED_PEBBLE_PATH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${ALEPHZERO_MAINNET_NITRO_PRUNED_PEBBLE_PATH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.archive=false
|
||||
- --execution.caching.state-scheme=path
|
||||
- --execution.caching.trie-clean-cache=${ALEPHZERO_MAINNET_NITRO_PRUNED_PEBBLE_PATH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${ALEPHZERO_MAINNET_NITRO_PRUNED_PEBBLE_PATH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://rpc.alephzero.raas.gelato.cloud
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
alephzero-sepolia-archive:
|
||||
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_SEPOLIA_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -50,24 +50,19 @@ services:
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=true
|
||||
- --execution.caching.database-cache=${ALEPHZERO_SEPOLIA_NITRO_ARCHIVE_PEBBLE_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${ALEPHZERO_SEPOLIA_NITRO_ARCHIVE_PEBBLE_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${ALEPHZERO_SEPOLIA_NITRO_ARCHIVE_PEBBLE_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${ALEPHZERO_SEPOLIA_NITRO_ARCHIVE_PEBBLE_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://rpc.alephzero-testnet.gelato.digital
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
alephzero-sepolia:
|
||||
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_SEPOLIA_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -49,25 +49,21 @@ services:
|
||||
- 8545
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=${ALEPHZERO_SEPOLIA_ARCHIVE_DB:-false}
|
||||
- --execution.caching.database-cache=${ALEPHZERO_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${ALEPHZERO_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.archive=false
|
||||
- --execution.caching.state-scheme=path
|
||||
- --execution.caching.trie-clean-cache=${ALEPHZERO_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${ALEPHZERO_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://rpc.alephzero-testnet.gelato.digital
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
arbitrum-nova-archive:
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_NOVA_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_NOVA_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -50,18 +50,15 @@ services:
|
||||
command:
|
||||
- --chain.id=42170
|
||||
- --execution.caching.archive=true
|
||||
- --execution.caching.database-cache=${ARBITRUM_NOVA_NITRO_ARCHIVE_LEVELDB_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${ARBITRUM_NOVA_NITRO_ARCHIVE_LEVELDB_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${ARBITRUM_NOVA_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${ARBITRUM_NOVA_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --init.latest=archive
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
arbitrum-nova:
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_NOVA_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_NOVA_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -49,19 +49,16 @@ services:
|
||||
- 8545
|
||||
command:
|
||||
- --chain.id=42170
|
||||
- --execution.caching.archive=${ARBITRUM_NOVA_ARCHIVE_DB:-false}
|
||||
- --execution.caching.database-cache=${ARBITRUM_NOVA_NITRO_PRUNED_PEBBLE_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${ARBITRUM_NOVA_NITRO_PRUNED_PEBBLE_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${ARBITRUM_NOVA_NITRO_PRUNED_PEBBLE_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${ARBITRUM_NOVA_NITRO_PRUNED_PEBBLE_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.caching.archive=false
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --init.latest=pruned
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
|
||||
@@ -1,181 +0,0 @@
|
||||
---
|
||||
x-logging-defaults: &logging-defaults
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
# Usage:
|
||||
#
|
||||
# mkdir rpc && cd rpc
|
||||
#
|
||||
# git init
|
||||
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
|
||||
# git fetch origin vibe
|
||||
# git checkout origin/vibe
|
||||
#
|
||||
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
|
||||
#
|
||||
# env
|
||||
# ...
|
||||
# IP=$(curl ipinfo.io/ip)
|
||||
# DOMAIN=${IP}.traefik.me
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-one-nitro-archive-erigon.yml
|
||||
#
|
||||
# docker compose up -d
|
||||
#
|
||||
# curl -X POST https://${IP}.traefik.me/arbitrum-one-nitro-archive-erigon \
|
||||
# -H "Content-Type: application/json" \
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
arbitrum-one-archive-erigon:
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-erigontech/nitro-erigon}:${ARBITRUM_ONE_NITRO_VERSION:-main-de68b93}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
|
||||
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
|
||||
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
|
||||
net.core.somaxconn: 32768 # Higher connection queue
|
||||
# Memory/Connection Management
|
||||
# net.core.netdev_max_backlog: 50000 # Increase network buffer
|
||||
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
|
||||
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
|
||||
ulimits:
|
||||
nofile: 1048576 # Max open files (for RPC/WS connections)
|
||||
memlock: -1 # Disable memory locking limits (for in-memory DBs like MDBX)
|
||||
user: root
|
||||
ports:
|
||||
- 21789:21789
|
||||
- 21789:21789/udp
|
||||
- 27891:27891
|
||||
- 27891:27891/udp
|
||||
- 38917:38917
|
||||
- 38917:38917/udp
|
||||
- 43123:43123
|
||||
- 43123:43123/udp
|
||||
- 49231:49231
|
||||
- 49231:49231/udp
|
||||
expose:
|
||||
- 8545
|
||||
- 5555
|
||||
entrypoint: [erigon]
|
||||
command:
|
||||
- --datadir=/root/.local/share/erigon
|
||||
- --http
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,erigon,web3,net,debug,trace,txpool,admin,ots
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --maxpeers=50
|
||||
- --metrics
|
||||
- --metrics.addr=0.0.0.0
|
||||
- --metrics.port=6060
|
||||
- --nat=extip:${IP}
|
||||
- --p2p.allowed-ports=43123
|
||||
- --p2p.allowed-ports=49231
|
||||
- --port=21789
|
||||
- --prune.mode=archive
|
||||
- --l2rpc="http://arbitrum-one-archive:8545"
|
||||
- --torrent.download.rate=${ARBITRUM_ONE_NITRO_ARCHIVE_ERIGON_MAX_DOWNLOAD_RATE:-1000mb}
|
||||
- --torrent.port=38917
|
||||
- --ws
|
||||
- --ws.port=8545
|
||||
restart: unless-stopped
|
||||
stop_grace_period: 5m
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ${ARBITRUM_ONE_NITRO_ARCHIVE_ERIGON_DATA:-arbitrum-one-nitro-archive-erigon}:/root/.local/share/erigon
|
||||
- /slowdisk:/slowdisk
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=true
|
||||
- prometheus-scrape.port=6060
|
||||
- prometheus-scrape.path=/debug/metrics/prometheus
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.arbitrum-one-nitro-archive-erigon-stripprefix.stripprefix.prefixes=/arbitrum-one-nitro-archive-erigon
|
||||
- traefik.http.services.arbitrum-one-nitro-archive-erigon.loadbalancer.server.port=8545
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-erigon.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-erigon.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-erigon.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one-nitro-archive-erigon`) || Path(`/arbitrum-one-nitro-archive-erigon/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.arbitrum-one-nitro-archive-erigon.rule=Path(`/arbitrum-one-nitro-archive-erigon`) || Path(`/arbitrum-one-nitro-archive-erigon/`)}
|
||||
- traefik.http.routers.arbitrum-one-nitro-archive-erigon.middlewares=arbitrum-one-nitro-archive-erigon-stripprefix, ipallowlist
|
||||
- traefik.http.routers.arbitrum-one-nitro-archive-erigon.service=arbitrum-one-nitro-archive-erigon
|
||||
- traefik.http.routers.arbitrum-one-nitro-archive-erigon-node.service=arbitrum-one-nitro-archive-erigon-node
|
||||
- traefik.http.services.arbitrum-one-nitro-archive-erigon-node.loadbalancer.server.port=5555
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-erigon-node.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-erigon-node.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-erigon-node.rule=Host(`$DOMAIN`) && PathPrefix(`/arbitrum-one-nitro-archive-erigon/eth`)}
|
||||
- ${NO_SSL:+traefik.http.routers.arbitrum-one-nitro-archive-erigon-node.rule=PathPrefix(`/arbitrum-one-nitro-archive-erigon/eth`)}
|
||||
- traefik.http.routers.arbitrum-one-nitro-archive-erigon-node.middlewares=arbitrum-one-nitro-archive-erigon-stripprefix, ipallowlist
|
||||
shm_size: 2gb
|
||||
|
||||
volumes:
|
||||
arbitrum-one-nitro-archive-erigon:
|
||||
|
||||
x-upstreams:
|
||||
- id: $${ID}
|
||||
labels:
|
||||
provider: $${PROVIDER}
|
||||
connection:
|
||||
generic:
|
||||
rpc:
|
||||
url: $${RPC_URL}
|
||||
ws:
|
||||
frameSize: 20Mb
|
||||
msgSize: 50Mb
|
||||
url: $${WS_URL}
|
||||
chain: arbitrum-one
|
||||
method-groups:
|
||||
enabled:
|
||||
- debug
|
||||
- filter
|
||||
- trace
|
||||
methods:
|
||||
disabled:
|
||||
enabled:
|
||||
- name: txpool_content # TODO: should be disabled for rollup nodes
|
||||
# standard geth and erigon
|
||||
- name: debug_accountRange
|
||||
- name: debug_getModifiedAccountsByNumber
|
||||
- name: debug_getModifiedAccountsByHash
|
||||
# non standard geth and erigon
|
||||
- name: eth_getRawTransactionByHash
|
||||
- name: eth_getRawTransactionByBlockHashAndIndex
|
||||
# non standard erigon only
|
||||
- name: eth_getBlockReceipts
|
||||
- name: eth_protocolVersion
|
||||
- name: eth_callMany
|
||||
- name: eth_callBundle
|
||||
- name: debug_accountAt
|
||||
- name: debug_traceCallMany
|
||||
- name: erigon_getHeaderByHash
|
||||
- name: erigon_getBlockReceiptsByBlockHash
|
||||
- name: erigon_getHeaderByNumber
|
||||
- name: erigon_getLogsByHash
|
||||
- name: erigon_forks
|
||||
- name: erigon_getBlockByTimestamp
|
||||
- name: erigon_BlockNumber
|
||||
- name: erigon_getLatestLogs
|
||||
- name: ots_getInternalOperations
|
||||
- name: ots_hasCode
|
||||
- name: ots_getTransactionError
|
||||
- name: ots_traceTransaction
|
||||
- name: ots_getBlockDetails
|
||||
- name: ots_getBlockDetailsByHash
|
||||
- name: ots_getBlockTransactions
|
||||
- name: ots_searchTransactionsBefore
|
||||
- name: ots_searchTransactionsAfter
|
||||
- name: ots_getTransactionBySenderAndNonce
|
||||
- name: ots_getContractCreator
|
||||
- id: $${ID}-beacon-chain
|
||||
chain: eth-beacon-chain
|
||||
labels:
|
||||
provider: $${PROVIDER}-beacon-chain
|
||||
connection:
|
||||
generic:
|
||||
rpc:
|
||||
url: $${RPC_URL}
|
||||
...
|
||||
@@ -1,9 +1,4 @@
|
||||
---
|
||||
x-logging-defaults: &logging-defaults
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
# Usage:
|
||||
#
|
||||
@@ -20,17 +15,17 @@ x-logging-defaults: &logging-defaults
|
||||
# ...
|
||||
# IP=$(curl ipinfo.io/ip)
|
||||
# DOMAIN=${IP}.traefik.me
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-nova-nitro-pruned-pebble-hash.yml
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-one-nitro-archive-leveldb-hash--benchmark.yml
|
||||
#
|
||||
# docker compose up -d
|
||||
#
|
||||
# curl -X POST https://${IP}.traefik.me/arbitrum-nova \
|
||||
# curl -X POST https://${IP}.traefik.me/arbitrum-one-archive \
|
||||
# -H "Content-Type: application/json" \
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
arbitrum-nova:
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_NOVA_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
arbitrum-one-archive:
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.5.5-90ee45c}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -48,31 +43,24 @@ services:
|
||||
expose:
|
||||
- 8545
|
||||
command:
|
||||
- --chain.id=42170
|
||||
- --execution.caching.archive=${ARBITRUM_NOVA_ARCHIVE_DB:-false}
|
||||
- --execution.caching.database-cache=${ARBITRUM_NOVA_NITRO_PRUNED_PEBBLE_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${ARBITRUM_NOVA_NITRO_PRUNED_PEBBLE_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${ARBITRUM_NOVA_NITRO_PRUNED_PEBBLE_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${ARBITRUM_NOVA_NITRO_PRUNED_PEBBLE_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --chain.id=42161
|
||||
- --execution.caching.archive=true
|
||||
- --execution.caching.state-scheme=hash
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.prune=full
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --init.latest=archive
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.sequencer=false
|
||||
- --node.staker.enable=false
|
||||
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
|
||||
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
|
||||
- --persistent.chain=/root/.arbitrum/arbitrum-nova
|
||||
- --persistent.db-engine=pebble
|
||||
- --persistent.chain=/root/.arbitrum/arbitrum-one-archive
|
||||
- --persistent.db-engine=leveldb
|
||||
- --ws.addr=0.0.0.0
|
||||
- --ws.origins=*
|
||||
- --ws.port=8545
|
||||
@@ -81,25 +69,38 @@ services:
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ${ARBITRUM_NOVA_NITRO_PRUNED_PEBBLE_HASH_DATA:-arbitrum-nova-nitro-pruned-pebble-hash}:/root/.arbitrum
|
||||
- ./tmp/arbitrum-nova:/tmp
|
||||
- ${ARBITRUM_ONE_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-arbitrum-one-nitro-archive-leveldb-hash}:/root/.arbitrum
|
||||
- ./tmp/arbitrum-one-archive:/tmp
|
||||
- /slowdisk:/slowdisk
|
||||
logging: *logging-defaults
|
||||
|
||||
arbitrum-one-archive-benchmark:
|
||||
build:
|
||||
context: ./benchmark-proxy
|
||||
dockerfile: Dockerfile
|
||||
expose:
|
||||
- '8545'
|
||||
environment:
|
||||
- ENABLE_DETAILED_LOGS=${BENCHMARK_PROXY_VERBOSE:-false}
|
||||
- LISTEN_ADDR=:8545
|
||||
- PRIMARY_BACKEND=http://arbitrum-one-archive:8545
|
||||
- SUMMARY_INTERVAL=60
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- arbitrum-one-archive
|
||||
networks:
|
||||
- chains
|
||||
labels:
|
||||
- prometheus-scrape.enabled=true
|
||||
- prometheus-scrape.port=6070
|
||||
- prometheus-scrape.path=/debug/metrics/prometheus
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.arbitrum-nova-nitro-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-nova
|
||||
- traefik.http.services.arbitrum-nova-nitro-pruned-pebble-hash.loadbalancer.server.port=8545
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-nova-nitro-pruned-pebble-hash.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-nova-nitro-pruned-pebble-hash.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-nova-nitro-pruned-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-nova`) || Path(`/arbitrum-nova/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.arbitrum-nova-nitro-pruned-pebble-hash.rule=Path(`/arbitrum-nova`) || Path(`/arbitrum-nova/`)}
|
||||
- traefik.http.routers.arbitrum-nova-nitro-pruned-pebble-hash.middlewares=arbitrum-nova-nitro-pruned-pebble-hash-stripprefix, ipallowlist
|
||||
- traefik.http.middlewares.arbitrum-one-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/arbitrum-one-archive
|
||||
- traefik.http.services.arbitrum-one-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one-archive`) || Path(`/arbitrum-one-archive/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.rule=Path(`/arbitrum-one-archive`) || Path(`/arbitrum-one-archive/`)}
|
||||
- traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.middlewares=arbitrum-one-nitro-archive-leveldb-hash-stripprefix, ipallowlist
|
||||
|
||||
volumes:
|
||||
arbitrum-nova-nitro-pruned-pebble-hash:
|
||||
arbitrum-one-nitro-archive-leveldb-hash:
|
||||
|
||||
x-upstreams:
|
||||
- id: $${ID}
|
||||
@@ -113,7 +114,7 @@ x-upstreams:
|
||||
frameSize: 20Mb
|
||||
msgSize: 50Mb
|
||||
url: $${WS_URL}
|
||||
chain: arbitrum-nova
|
||||
chain: arbitrum
|
||||
method-groups:
|
||||
enabled:
|
||||
- debug
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
arbitrum-one-archive:
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -50,19 +50,16 @@ services:
|
||||
command:
|
||||
- --chain.id=42161
|
||||
- --execution.caching.archive=true
|
||||
- --execution.caching.database-cache=${ARBITRUM_ONE_NITRO_ARCHIVE_LEVELDB_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${ARBITRUM_ONE_NITRO_ARCHIVE_LEVELDB_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${ARBITRUM_ONE_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${ARBITRUM_ONE_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.rpc.classic-redirect=http://arbitrum-one-arbnode-archive:8545
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --init.latest=archive
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
@@ -98,7 +95,7 @@ services:
|
||||
- traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.middlewares=arbitrum-one-nitro-archive-leveldb-hash-stripprefix, ipallowlist
|
||||
|
||||
arbitrum-one-arbnode-archive:
|
||||
image: ${ARBITRUM_ARBNODE_IMAGE:-offchainlabs/arb-node}:${ARBITRUM_ONE_ARBNODE_VERSION:-v1.4.6-551a39b3}
|
||||
image: ${ARBITRUM_ARBNODE_IMAGE:-offchainlabs/arb-node}:${ARBITRUM_ONE_ARBNODE_VERSION:-v1.4.5-e97c1a4}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
arbitrum-one-archive:
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -50,19 +50,16 @@ services:
|
||||
command:
|
||||
- --chain.id=42161
|
||||
- --execution.caching.archive=true
|
||||
- --execution.caching.database-cache=${ARBITRUM_ONE_NITRO_ARCHIVE_PEBBLE_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${ARBITRUM_ONE_NITRO_ARCHIVE_PEBBLE_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${ARBITRUM_ONE_NITRO_ARCHIVE_PEBBLE_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${ARBITRUM_ONE_NITRO_ARCHIVE_PEBBLE_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.rpc.classic-redirect=http://arbitrum-one-arbnode-archive:8545
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --init.latest=archive
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
@@ -99,7 +96,7 @@ services:
|
||||
- traefik.http.routers.arbitrum-one-nitro-archive-pebble-hash.middlewares=arbitrum-one-nitro-archive-pebble-hash-stripprefix, ipallowlist
|
||||
|
||||
arbitrum-one-arbnode-archive:
|
||||
image: ${ARBITRUM_ARBNODE_IMAGE:-offchainlabs/arb-node}:${ARBITRUM_ONE_ARBNODE_VERSION:-v1.4.6-551a39b3}
|
||||
image: ${ARBITRUM_ARBNODE_IMAGE:-offchainlabs/arb-node}:${ARBITRUM_ONE_ARBNODE_VERSION:-v1.4.5-e97c1a4}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
|
||||
@@ -20,7 +20,7 @@ x-logging-defaults: &logging-defaults
|
||||
# ...
|
||||
# IP=$(curl ipinfo.io/ip)
|
||||
# DOMAIN=${IP}.traefik.me
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-one-nitro-pruned-pebble-hash.yml
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-one-nitro-pruned-pebble-hash--benchmark.yml
|
||||
#
|
||||
# docker compose up -d
|
||||
#
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
arbitrum-one:
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -49,20 +49,16 @@ services:
|
||||
- 8545
|
||||
command:
|
||||
- --chain.id=42161
|
||||
- --execution.caching.archive=${ARBITRUM_ONE_ARCHIVE_DB:-false}
|
||||
- --execution.caching.database-cache=${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.caching.archive=false
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.prune=full
|
||||
- --init.download-path=/tmp
|
||||
- --init.latest=pruned
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
@@ -89,6 +85,26 @@ services:
|
||||
- prometheus-scrape.enabled=true
|
||||
- prometheus-scrape.port=6070
|
||||
- prometheus-scrape.path=/debug/metrics/prometheus
|
||||
|
||||
arbitrum-one-benchmark:
|
||||
build:
|
||||
context: ./benchmark-proxy
|
||||
dockerfile: Dockerfile
|
||||
expose:
|
||||
- '8545'
|
||||
environment:
|
||||
- ENABLE_DETAILED_LOGS=${BENCHMARK_PROXY_VERBOSE:-false}
|
||||
- LISTEN_ADDR=:8545
|
||||
- PRIMARY_BACKEND=http://arbitrum-one:8545
|
||||
- SUMMARY_INTERVAL=60
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- arbitrum-one
|
||||
networks:
|
||||
- chains
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.arbitrum-one-nitro-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-one
|
||||
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash.loadbalancer.server.port=8545
|
||||
@@ -1,205 +0,0 @@
|
||||
---
|
||||
x-logging-defaults: &logging-defaults
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
# Usage:
|
||||
#
|
||||
# mkdir rpc && cd rpc
|
||||
#
|
||||
# git init
|
||||
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
|
||||
# git fetch origin vibe
|
||||
# git checkout origin/vibe
|
||||
#
|
||||
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
|
||||
#
|
||||
# env
|
||||
# ...
|
||||
# IP=$(curl ipinfo.io/ip)
|
||||
# DOMAIN=${IP}.traefik.me
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-one-nitro-pruned-pebble-hash--fireeth.yml
|
||||
#
|
||||
# docker compose up -d
|
||||
#
|
||||
# curl -X POST https://${IP}.traefik.me/arbitrum-one \
|
||||
# -H "Content-Type: application/json" \
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
arbitrum-one:
|
||||
image: ${ARBITRUM_FIREETH_IMAGE:-ghcr.io/streamingfast/go-ethereum}:${ARBITRUM_ONE_FIREETH_VERSION:-v2.12.4-nitro-nitro-v3.6.7-fh3.0}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
|
||||
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
|
||||
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
|
||||
net.core.somaxconn: 32768 # Higher connection queue
|
||||
# Memory/Connection Management
|
||||
# net.core.netdev_max_backlog: 50000 # Increase network buffer
|
||||
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
|
||||
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
|
||||
ulimits:
|
||||
nofile: 1048576 # Max open files (for RPC/WS connections)
|
||||
user: root
|
||||
expose:
|
||||
- 8545
|
||||
entrypoint: [sh, -c, exec fireeth start reader-node --log-to-file=false --reader-node-arguments "$*", _]
|
||||
command:
|
||||
- --chain.id=42161
|
||||
- --execution.caching.archive=${ARBITRUM_ONE_ARCHIVE_DB:-false}
|
||||
- --execution.caching.database-cache=${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.prune=full
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.sequencer=false
|
||||
- --node.staker.enable=false
|
||||
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
|
||||
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
|
||||
- --persistent.chain=/root/.arbitrum/arbitrum-one
|
||||
- --persistent.db-engine=pebble
|
||||
- --ws.addr=0.0.0.0
|
||||
- --ws.origins=*
|
||||
- --ws.port=8545
|
||||
restart: unless-stopped
|
||||
stop_grace_period: 5m
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ${ARBITRUM_ONE_FIREETH_DATA:-arbitrum-one-fireeth}:/app/firehose-data
|
||||
- ${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_DATA:-arbitrum-one-nitro-pruned-pebble-hash}:/root/.arbitrum
|
||||
- ./tmp/arbitrum-one:/tmp
|
||||
- /slowdisk:/slowdisk
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=true
|
||||
- prometheus-scrape.port=6070
|
||||
- prometheus-scrape.path=/debug/metrics/prometheus
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.arbitrum-one-nitro-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-one
|
||||
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash.loadbalancer.server.port=8545
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one`) || Path(`/arbitrum-one/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.rule=Path(`/arbitrum-one`) || Path(`/arbitrum-one/`)}
|
||||
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.middlewares=arbitrum-one-nitro-pruned-pebble-hash-stripprefix, ipallowlist
|
||||
|
||||
arbitrum-one-firehose:
|
||||
image: ${ARBITRUM_FIREETH_IMAGE:-ghcr.io/streamingfast/go-ethereum}:${ARBITRUM_ONE_FIREETH_VERSION:-v2.12.4-nitro-nitro-v3.6.7-fh3.0}
|
||||
expose:
|
||||
- 10015
|
||||
- 10014
|
||||
environment:
|
||||
- ${ARBITRUM_ONE_FIREETH_BLOCKS_STORE:-/app/firehose-data/storage/merged-blocks}
|
||||
entrypoint: [sh, -c, exec fireeth --config-file="" --log-to-file=false start firehose index-builder relayer merger $@, _]
|
||||
command:
|
||||
- --firehose-rate-limit-bucket-fill-rate=${ARBITRUM_ONE_FIREHOSE_RATE_LIMIT_BUCKET_FILL_RATE:-1s}
|
||||
- --firehose-rate-limit-bucket-size=${ARBITRUM_ONE_FIREHOSE_RATE_LIMIT_BUCKET_SIZE:-200}
|
||||
- --log-to-file=false
|
||||
- --relayer-source=arbitrum-one:10010
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- arbitrum-one
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ${ARBITRUM_ONE_FIREETH_DATA:-arbitrum-one-fireeth}:/app/firehose-data
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
- traefik.enable=true
|
||||
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash-firehose.loadbalancer.server.scheme=h2c
|
||||
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-firehose.service=arbitrum-one-nitro-pruned-pebble-hash-firehose
|
||||
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash-firehose.loadbalancer.server.port=10015
|
||||
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-firehose.entrypoints=grpc
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-firehose.tls.certresolver=myresolver}
|
||||
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-firehose.rule=Host(`arbitrum-one-firehose.${DOMAIN}`)
|
||||
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-firehose.middlewares=ipallowlist
|
||||
|
||||
arbitrum-one-events:
|
||||
image: ${ARBITRUM_FIREETH_IMAGE:-ghcr.io/streamingfast/go-ethereum}:${ARBITRUM_ONE_FIREETH_VERSION:-v2.12.4-nitro-nitro-v3.6.7-fh3.0}
|
||||
expose:
|
||||
- 10016
|
||||
entrypoint: [sh, -c, exec fireeth --config-file="" --log-to-file=false start substreams-tier1 substreams-tier2 $@, _]
|
||||
command:
|
||||
- --common-live-blocks-addr=arbitrum-one-firehose:10014
|
||||
- --log-to-file=false
|
||||
- --substreams-block-execution-timeout=${ARBITRUM_ONE_SUBSTREAMS_BLOCK_EXECUTION_TIMEOUT:-3m0s}
|
||||
- --substreams-rpc-endpoints=${ARBITRUM_ONE_EXECUTION_ARCHIVE_RPC}
|
||||
- --substreams-tier1-max-subrequests=${ARBITRUM_ONE_SUBSTREAMS_TIER1_MAX_SUBREQUESTS:-4}
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- arbitrum-one
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ${ARBITRUM_ONE_FIREETH_DATA:-arbitrum-one-fireeth}:/app/firehose-data
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- traefik.enable=true
|
||||
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash-events.loadbalancer.server.scheme=h2c
|
||||
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-events.service=arbitrum-one-nitro-pruned-pebble-hash-events
|
||||
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash-events.loadbalancer.server.port=10016
|
||||
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-events.entrypoints=grpc
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-events.tls.certresolver=myresolver}
|
||||
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-events.rule=Host(`arbitrum-one-events.${DOMAIN}`)
|
||||
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-events.middlewares=ipallowlist
|
||||
|
||||
volumes:
|
||||
arbitrum-one-nitro-pruned-pebble-hash:
|
||||
arbitrum-one-nitro-pruned-pebble-hash_fireeth:
|
||||
|
||||
x-upstreams:
|
||||
- id: $${ID}
|
||||
labels:
|
||||
provider: $${PROVIDER}
|
||||
connection:
|
||||
generic:
|
||||
rpc:
|
||||
url: $${RPC_URL}
|
||||
ws:
|
||||
frameSize: 20Mb
|
||||
msgSize: 50Mb
|
||||
url: $${WS_URL}
|
||||
chain: arbitrum
|
||||
method-groups:
|
||||
enabled:
|
||||
- debug
|
||||
- filter
|
||||
methods:
|
||||
disabled:
|
||||
enabled:
|
||||
- name: txpool_content # TODO: should be disabled for rollup nodes
|
||||
# standard geth only
|
||||
- name: debug_getRawBlock
|
||||
- name: debug_getRawTransaction
|
||||
- name: debug_getRawReceipts
|
||||
- name: debug_getRawHeader
|
||||
- name: debug_getBadBlocks
|
||||
# non standard geth only slightly dangerous
|
||||
- name: debug_intermediateRoots
|
||||
- name: debug_dumpBlock
|
||||
# standard geth and erigon
|
||||
- name: debug_accountRange
|
||||
- name: debug_getModifiedAccountsByNumber
|
||||
- name: debug_getModifiedAccountsByHash
|
||||
# non standard geth and erigon
|
||||
- name: eth_getRawTransactionByHash
|
||||
- name: eth_getRawTransactionByBlockHashAndIndex
|
||||
...
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
arbitrum-one:
|
||||
image: ${ARBITRUM_FIREETH_IMAGE:-ghcr.io/streamingfast/go-ethereum}:${ARBITRUM_ONE_FIREETH_VERSION:-v2.12.4-nitro-nitro-v3.6.7-fh3.0}
|
||||
image: ${ARBITRUM_FIREETH_IMAGE:-ghcr.io/streamingfast/firehose-ethereum}:${ARBITRUM_ONE_FIREETH_VERSION:-v2.11.7-nitro-nitro-v3.5.5-fh3.0}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -50,19 +50,16 @@ services:
|
||||
entrypoint: [sh, -c, exec fireeth start reader-node --log-to-file=false --reader-node-arguments "$*", _]
|
||||
command:
|
||||
- --chain.id=42161
|
||||
- --execution.caching.archive=${ARBITRUM_ONE_ARCHIVE_DB:-false}
|
||||
- --execution.caching.database-cache=${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.caching.archive=false
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --init.latest=pruned
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
@@ -100,7 +97,7 @@ services:
|
||||
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.middlewares=arbitrum-one-nitro-pruned-pebble-hash-stripprefix, ipallowlist
|
||||
|
||||
arbitrum-one-firehose:
|
||||
image: ${ARBITRUM_FIREETH_IMAGE:-ghcr.io/streamingfast/go-ethereum}:${ARBITRUM_ONE_FIREETH_VERSION:-v2.12.4-nitro-nitro-v3.6.7-fh3.0}
|
||||
image: ${ARBITRUM_FIREETH_IMAGE:-ghcr.io/streamingfast/firehose-ethereum}:${ARBITRUM_ONE_FIREETH_VERSION:-v2.11.7-nitro-nitro-v3.5.5-fh3.0}
|
||||
expose:
|
||||
- 10015
|
||||
- 10014
|
||||
@@ -132,7 +129,7 @@ services:
|
||||
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-firehose.middlewares=ipallowlist
|
||||
|
||||
arbitrum-one-events:
|
||||
image: ${ARBITRUM_FIREETH_IMAGE:-ghcr.io/streamingfast/go-ethereum}:${ARBITRUM_ONE_FIREETH_VERSION:-v2.12.4-nitro-nitro-v3.6.7-fh3.0}
|
||||
image: ${ARBITRUM_FIREETH_IMAGE:-ghcr.io/streamingfast/firehose-ethereum}:${ARBITRUM_ONE_FIREETH_VERSION:-v2.11.7-nitro-nitro-v3.5.5-fh3.0}
|
||||
expose:
|
||||
- 10016
|
||||
entrypoint: [sh, -c, exec fireeth --config-file="" --log-to-file=false start substreams-tier1 substreams-tier2 $@, _]
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
arbitrum-one:
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -49,19 +49,16 @@ services:
|
||||
- 8545
|
||||
command:
|
||||
- --chain.id=42161
|
||||
- --execution.caching.archive=${ARBITRUM_ONE_ARCHIVE_DB:-false}
|
||||
- --execution.caching.database-cache=${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.caching.archive=false
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --init.latest=pruned
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
arbitrum-sepolia-archive:
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_SEPOLIA_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -50,18 +50,15 @@ services:
|
||||
command:
|
||||
- --chain.id=421614
|
||||
- --execution.caching.archive=true
|
||||
- --execution.caching.database-cache=${ARBITRUM_SEPOLIA_NITRO_ARCHIVE_PEBBLE_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${ARBITRUM_SEPOLIA_NITRO_ARCHIVE_PEBBLE_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${ARBITRUM_SEPOLIA_NITRO_ARCHIVE_PEBBLE_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${ARBITRUM_SEPOLIA_NITRO_ARCHIVE_PEBBLE_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --init.latest=archive
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
|
||||
@@ -1,141 +0,0 @@
|
||||
---
|
||||
x-logging-defaults: &logging-defaults
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
# Usage:
|
||||
#
|
||||
# mkdir rpc && cd rpc
|
||||
#
|
||||
# git init
|
||||
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
|
||||
# git fetch origin vibe
|
||||
# git checkout origin/vibe
|
||||
#
|
||||
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
|
||||
#
|
||||
# env
|
||||
# ...
|
||||
# IP=$(curl ipinfo.io/ip)
|
||||
# DOMAIN=${IP}.traefik.me
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-sepolia-nitro-pruned-pebble-hash.yml
|
||||
#
|
||||
# docker compose up -d
|
||||
#
|
||||
# curl -X POST https://${IP}.traefik.me/arbitrum-sepolia \
|
||||
# -H "Content-Type: application/json" \
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
arbitrum-sepolia:
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_SEPOLIA_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
|
||||
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
|
||||
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
|
||||
net.core.somaxconn: 32768 # Higher connection queue
|
||||
# Memory/Connection Management
|
||||
# net.core.netdev_max_backlog: 50000 # Increase network buffer
|
||||
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
|
||||
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
|
||||
ulimits:
|
||||
nofile: 1048576 # Max open files (for RPC/WS connections)
|
||||
user: root
|
||||
expose:
|
||||
- 8545
|
||||
command:
|
||||
- --chain.id=421614
|
||||
- --execution.caching.archive=${ARBITRUM_SEPOLIA_ARCHIVE_DB:-false}
|
||||
- --execution.caching.database-cache=${ARBITRUM_SEPOLIA_NITRO_PRUNED_PEBBLE_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${ARBITRUM_SEPOLIA_NITRO_PRUNED_PEBBLE_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${ARBITRUM_SEPOLIA_NITRO_PRUNED_PEBBLE_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${ARBITRUM_SEPOLIA_NITRO_PRUNED_PEBBLE_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.prune=full
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.sequencer=false
|
||||
- --node.staker.enable=false
|
||||
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
|
||||
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
|
||||
- --persistent.chain=/root/.arbitrum/arbitrum-sepolia
|
||||
- --persistent.db-engine=pebble
|
||||
- --ws.addr=0.0.0.0
|
||||
- --ws.origins=*
|
||||
- --ws.port=8545
|
||||
restart: unless-stopped
|
||||
stop_grace_period: 5m
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ${ARBITRUM_SEPOLIA_NITRO_PRUNED_PEBBLE_HASH_DATA:-arbitrum-sepolia-nitro-pruned-pebble-hash}:/root/.arbitrum
|
||||
- ./tmp/arbitrum-sepolia:/tmp
|
||||
- /slowdisk:/slowdisk
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=true
|
||||
- prometheus-scrape.port=6070
|
||||
- prometheus-scrape.path=/debug/metrics/prometheus
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.arbitrum-sepolia-nitro-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-sepolia
|
||||
- traefik.http.services.arbitrum-sepolia-nitro-pruned-pebble-hash.loadbalancer.server.port=8545
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-hash.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-hash.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-sepolia`) || Path(`/arbitrum-sepolia/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-hash.rule=Path(`/arbitrum-sepolia`) || Path(`/arbitrum-sepolia/`)}
|
||||
- traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-hash.middlewares=arbitrum-sepolia-nitro-pruned-pebble-hash-stripprefix, ipallowlist
|
||||
|
||||
volumes:
|
||||
arbitrum-sepolia-nitro-pruned-pebble-hash:
|
||||
|
||||
x-upstreams:
|
||||
- id: $${ID}
|
||||
labels:
|
||||
provider: $${PROVIDER}
|
||||
connection:
|
||||
generic:
|
||||
rpc:
|
||||
url: $${RPC_URL}
|
||||
ws:
|
||||
frameSize: 20Mb
|
||||
msgSize: 50Mb
|
||||
url: $${WS_URL}
|
||||
chain: arbitrum-sepolia
|
||||
method-groups:
|
||||
enabled:
|
||||
- debug
|
||||
- filter
|
||||
methods:
|
||||
disabled:
|
||||
enabled:
|
||||
- name: txpool_content # TODO: should be disabled for rollup nodes
|
||||
# standard geth only
|
||||
- name: debug_getRawBlock
|
||||
- name: debug_getRawTransaction
|
||||
- name: debug_getRawReceipts
|
||||
- name: debug_getRawHeader
|
||||
- name: debug_getBadBlocks
|
||||
# non standard geth only slightly dangerous
|
||||
- name: debug_intermediateRoots
|
||||
- name: debug_dumpBlock
|
||||
# standard geth and erigon
|
||||
- name: debug_accountRange
|
||||
- name: debug_getModifiedAccountsByNumber
|
||||
- name: debug_getModifiedAccountsByHash
|
||||
# non standard geth and erigon
|
||||
- name: eth_getRawTransactionByHash
|
||||
- name: eth_getRawTransactionByBlockHashAndIndex
|
||||
...
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
arbitrum-sepolia:
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_SEPOLIA_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -49,19 +49,16 @@ services:
|
||||
- 8545
|
||||
command:
|
||||
- --chain.id=421614
|
||||
- --execution.caching.archive=${ARBITRUM_SEPOLIA_ARCHIVE_DB:-false}
|
||||
- --execution.caching.database-cache=${ARBITRUM_SEPOLIA_NITRO_PRUNED_PEBBLE_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${ARBITRUM_SEPOLIA_NITRO_PRUNED_PEBBLE_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${ARBITRUM_SEPOLIA_NITRO_PRUNED_PEBBLE_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${ARBITRUM_SEPOLIA_NITRO_PRUNED_PEBBLE_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.caching.archive=false
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --init.latest=pruned
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
connext-sepolia-archive:
|
||||
image: ${CONNEXT_NITRO_IMAGE:-offchainlabs/nitro-node}:${CONNEXT_SEPOLIA_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${CONNEXT_NITRO_IMAGE:-offchainlabs/nitro-node}:${CONNEXT_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -50,24 +50,19 @@ services:
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=true
|
||||
- --execution.caching.database-cache=${CONNEXT_SEPOLIA_NITRO_ARCHIVE_LEVELDB_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${CONNEXT_SEPOLIA_NITRO_ARCHIVE_LEVELDB_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${CONNEXT_SEPOLIA_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${CONNEXT_SEPOLIA_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://rpc.connext-sepolia.gelato.digital
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
connext-sepolia:
|
||||
image: ${CONNEXT_NITRO_IMAGE:-offchainlabs/nitro-node}:${CONNEXT_SEPOLIA_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${CONNEXT_NITRO_IMAGE:-offchainlabs/nitro-node}:${CONNEXT_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -49,25 +49,21 @@ services:
|
||||
- 8545
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=${CONNEXT_SEPOLIA_ARCHIVE_DB:-false}
|
||||
- --execution.caching.database-cache=${CONNEXT_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${CONNEXT_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.archive=false
|
||||
- --execution.caching.state-scheme=path
|
||||
- --execution.caching.trie-clean-cache=${CONNEXT_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${CONNEXT_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://rpc.connext-sepolia.gelato.digital
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
everclear-mainnet-archive:
|
||||
image: ${EVERCLEAR_NITRO_IMAGE:-offchainlabs/nitro-node}:${EVERCLEAR_MAINNET_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${EVERCLEAR_NITRO_IMAGE:-offchainlabs/nitro-node}:${EVERCLEAR_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -50,24 +50,19 @@ services:
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=true
|
||||
- --execution.caching.database-cache=${EVERCLEAR_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${EVERCLEAR_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${EVERCLEAR_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${EVERCLEAR_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://rpc.everclear.raas.gelato.cloud
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
everclear-mainnet:
|
||||
image: ${EVERCLEAR_NITRO_IMAGE:-offchainlabs/nitro-node}:${EVERCLEAR_MAINNET_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${EVERCLEAR_NITRO_IMAGE:-offchainlabs/nitro-node}:${EVERCLEAR_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -49,25 +49,21 @@ services:
|
||||
- 8545
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=${EVERCLEAR_MAINNET_ARCHIVE_DB:-false}
|
||||
- --execution.caching.database-cache=${EVERCLEAR_MAINNET_NITRO_PRUNED_PEBBLE_PATH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${EVERCLEAR_MAINNET_NITRO_PRUNED_PEBBLE_PATH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.archive=false
|
||||
- --execution.caching.state-scheme=path
|
||||
- --execution.caching.trie-clean-cache=${EVERCLEAR_MAINNET_NITRO_PRUNED_PEBBLE_PATH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${EVERCLEAR_MAINNET_NITRO_PRUNED_PEBBLE_PATH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://rpc.everclear.raas.gelato.cloud
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
opencampuscodex-sepolia-archive:
|
||||
image: ${OPENCAMPUSCODEX_NITRO_IMAGE:-offchainlabs/nitro-node}:${OPENCAMPUSCODEX_SEPOLIA_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${OPENCAMPUSCODEX_NITRO_IMAGE:-offchainlabs/nitro-node}:${OPENCAMPUSCODEX_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -50,24 +50,19 @@ services:
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=true
|
||||
- --execution.caching.database-cache=${OPENCAMPUSCODEX_SEPOLIA_NITRO_ARCHIVE_LEVELDB_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${OPENCAMPUSCODEX_SEPOLIA_NITRO_ARCHIVE_LEVELDB_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${OPENCAMPUSCODEX_SEPOLIA_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${OPENCAMPUSCODEX_SEPOLIA_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://rpc.open-campus-codex.gelato.digital
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ARBITRUM_SEPOLIA_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
opencampuscodex-sepolia:
|
||||
image: ${OPENCAMPUSCODEX_NITRO_IMAGE:-offchainlabs/nitro-node}:${OPENCAMPUSCODEX_SEPOLIA_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${OPENCAMPUSCODEX_NITRO_IMAGE:-offchainlabs/nitro-node}:${OPENCAMPUSCODEX_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -49,25 +49,21 @@ services:
|
||||
- 8545
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=${OPENCAMPUSCODEX_SEPOLIA_ARCHIVE_DB:-false}
|
||||
- --execution.caching.database-cache=${OPENCAMPUSCODEX_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${OPENCAMPUSCODEX_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.archive=false
|
||||
- --execution.caching.state-scheme=path
|
||||
- --execution.caching.trie-clean-cache=${OPENCAMPUSCODEX_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${OPENCAMPUSCODEX_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://rpc.open-campus-codex.gelato.digital
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ARBITRUM_SEPOLIA_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
playblock-mainnet-archive:
|
||||
image: ${PLAYBLOCK_NITRO_IMAGE:-offchainlabs/nitro-node}:${PLAYBLOCK_MAINNET_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${PLAYBLOCK_NITRO_IMAGE:-offchainlabs/nitro-node}:${PLAYBLOCK_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -50,24 +50,19 @@ services:
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=true
|
||||
- --execution.caching.database-cache=${PLAYBLOCK_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${PLAYBLOCK_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${PLAYBLOCK_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${PLAYBLOCK_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://rpc.playblock.io
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ARBITRUM_NOVA_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
playblock-mainnet:
|
||||
image: ${PLAYBLOCK_NITRO_IMAGE:-offchainlabs/nitro-node}:${PLAYBLOCK_MAINNET_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${PLAYBLOCK_NITRO_IMAGE:-offchainlabs/nitro-node}:${PLAYBLOCK_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -49,25 +49,21 @@ services:
|
||||
- 8545
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=${PLAYBLOCK_MAINNET_ARCHIVE_DB:-false}
|
||||
- --execution.caching.database-cache=${PLAYBLOCK_MAINNET_NITRO_PRUNED_PEBBLE_PATH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${PLAYBLOCK_MAINNET_NITRO_PRUNED_PEBBLE_PATH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.archive=false
|
||||
- --execution.caching.state-scheme=path
|
||||
- --execution.caching.trie-clean-cache=${PLAYBLOCK_MAINNET_NITRO_PRUNED_PEBBLE_PATH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${PLAYBLOCK_MAINNET_NITRO_PRUNED_PEBBLE_PATH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://rpc.playblock.io
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ARBITRUM_NOVA_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
|
||||
@@ -1,147 +0,0 @@
|
||||
---
|
||||
x-logging-defaults: &logging-defaults
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
# Usage:
|
||||
#
|
||||
# mkdir rpc && cd rpc
|
||||
#
|
||||
# git init
|
||||
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
|
||||
# git fetch origin vibe
|
||||
# git checkout origin/vibe
|
||||
#
|
||||
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
|
||||
#
|
||||
# env
|
||||
# ...
|
||||
# IP=$(curl ipinfo.io/ip)
|
||||
# DOMAIN=${IP}.traefik.me
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/plume-mainnet-nitro-archive-leveldb-hash.yml
|
||||
#
|
||||
# docker compose up -d
|
||||
#
|
||||
# curl -X POST https://${IP}.traefik.me/plume-mainnet-archive \
|
||||
# -H "Content-Type: application/json" \
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
plume-mainnet-archive:
|
||||
image: ${PLUME_NITRO_IMAGE:-offchainlabs/nitro-node}:${PLUME_MAINNET_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
|
||||
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
|
||||
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
|
||||
net.core.somaxconn: 32768 # Higher connection queue
|
||||
# Memory/Connection Management
|
||||
# net.core.netdev_max_backlog: 50000 # Increase network buffer
|
||||
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
|
||||
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
|
||||
ulimits:
|
||||
nofile: 1048576 # Max open files (for RPC/WS connections)
|
||||
user: root
|
||||
expose:
|
||||
- 8545
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=true
|
||||
- --execution.caching.database-cache=${PLUME_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${PLUME_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${PLUME_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${PLUME_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://rpc.plume.org
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
- --node.data-availability.rest-aggregator.urls=https://das-plume-mainnet-1.t.conduit.xyz
|
||||
- --node.data-availability.sequencer-inbox-address=0x85eC1b9138a8b9659A51e2b51bb0861901040b59
|
||||
- --node.feed.input.url=wss://relay-plume-mainnet-1.t.conduit.xyz
|
||||
- --node.sequencer=false
|
||||
- --node.staker.enable=false
|
||||
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
|
||||
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
|
||||
- --persistent.chain=/root/.arbitrum/plume-mainnet-archive
|
||||
- --ws.addr=0.0.0.0
|
||||
- --ws.origins=*
|
||||
- --ws.port=8545
|
||||
restart: unless-stopped
|
||||
stop_grace_period: 5m
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ${PLUME_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-plume-mainnet-nitro-archive-leveldb-hash}:/root/.arbitrum
|
||||
- ./arb/plume/mainnet:/config
|
||||
- /slowdisk:/slowdisk
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=true
|
||||
- prometheus-scrape.port=6070
|
||||
- prometheus-scrape.path=/debug/metrics/prometheus
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.plume-mainnet-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/plume-mainnet-archive
|
||||
- traefik.http.services.plume-mainnet-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
|
||||
- ${NO_SSL:-traefik.http.routers.plume-mainnet-nitro-archive-leveldb-hash.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.plume-mainnet-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.plume-mainnet-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/plume-mainnet-archive`) || Path(`/plume-mainnet-archive/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.plume-mainnet-nitro-archive-leveldb-hash.rule=Path(`/plume-mainnet-archive`) || Path(`/plume-mainnet-archive/`)}
|
||||
- traefik.http.routers.plume-mainnet-nitro-archive-leveldb-hash.middlewares=plume-mainnet-nitro-archive-leveldb-hash-stripprefix, ipallowlist
|
||||
|
||||
volumes:
|
||||
plume-mainnet-nitro-archive-leveldb-hash:
|
||||
|
||||
x-upstreams:
|
||||
- id: $${ID}
|
||||
labels:
|
||||
provider: $${PROVIDER}
|
||||
connection:
|
||||
generic:
|
||||
rpc:
|
||||
url: $${RPC_URL}
|
||||
ws:
|
||||
frameSize: 20Mb
|
||||
msgSize: 50Mb
|
||||
url: $${WS_URL}
|
||||
chain: plume
|
||||
method-groups:
|
||||
enabled:
|
||||
- debug
|
||||
- filter
|
||||
methods:
|
||||
disabled:
|
||||
enabled:
|
||||
- name: txpool_content # TODO: should be disabled for rollup nodes
|
||||
# standard geth only
|
||||
- name: debug_getRawBlock
|
||||
- name: debug_getRawTransaction
|
||||
- name: debug_getRawReceipts
|
||||
- name: debug_getRawHeader
|
||||
- name: debug_getBadBlocks
|
||||
# non standard geth only slightly dangerous
|
||||
- name: debug_intermediateRoots
|
||||
- name: debug_dumpBlock
|
||||
# standard geth and erigon
|
||||
- name: debug_accountRange
|
||||
- name: debug_getModifiedAccountsByNumber
|
||||
- name: debug_getModifiedAccountsByHash
|
||||
# non standard geth and erigon
|
||||
- name: eth_getRawTransactionByHash
|
||||
- name: eth_getRawTransactionByBlockHashAndIndex
|
||||
...
|
||||
@@ -1,150 +0,0 @@
|
||||
---
|
||||
x-logging-defaults: &logging-defaults
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
# Usage:
|
||||
#
|
||||
# mkdir rpc && cd rpc
|
||||
#
|
||||
# git init
|
||||
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
|
||||
# git fetch origin vibe
|
||||
# git checkout origin/vibe
|
||||
#
|
||||
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
|
||||
#
|
||||
# env
|
||||
# ...
|
||||
# IP=$(curl ipinfo.io/ip)
|
||||
# DOMAIN=${IP}.traefik.me
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/plume-mainnet-nitro-pruned-pebble-path.yml
|
||||
#
|
||||
# docker compose up -d
|
||||
#
|
||||
# curl -X POST https://${IP}.traefik.me/plume-mainnet \
|
||||
# -H "Content-Type: application/json" \
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
plume-mainnet:
|
||||
image: ${PLUME_NITRO_IMAGE:-offchainlabs/nitro-node}:${PLUME_MAINNET_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
|
||||
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
|
||||
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
|
||||
net.core.somaxconn: 32768 # Higher connection queue
|
||||
# Memory/Connection Management
|
||||
# net.core.netdev_max_backlog: 50000 # Increase network buffer
|
||||
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
|
||||
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
|
||||
ulimits:
|
||||
nofile: 1048576 # Max open files (for RPC/WS connections)
|
||||
user: root
|
||||
expose:
|
||||
- 8545
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=${PLUME_MAINNET_ARCHIVE_DB:-false}
|
||||
- --execution.caching.database-cache=${PLUME_MAINNET_NITRO_PRUNED_PEBBLE_PATH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${PLUME_MAINNET_NITRO_PRUNED_PEBBLE_PATH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.state-scheme=path
|
||||
- --execution.caching.trie-clean-cache=${PLUME_MAINNET_NITRO_PRUNED_PEBBLE_PATH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${PLUME_MAINNET_NITRO_PRUNED_PEBBLE_PATH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://rpc.plume.org
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
- --node.data-availability.rest-aggregator.urls=https://das-plume-mainnet-1.t.conduit.xyz
|
||||
- --node.data-availability.sequencer-inbox-address=0x85eC1b9138a8b9659A51e2b51bb0861901040b59
|
||||
- --node.feed.input.url=wss://relay-plume-mainnet-1.t.conduit.xyz
|
||||
- --node.sequencer=false
|
||||
- --node.staker.enable=false
|
||||
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
|
||||
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
|
||||
- --persistent.chain=/root/.arbitrum/plume-mainnet
|
||||
- --persistent.db-engine=pebble
|
||||
- --ws.addr=0.0.0.0
|
||||
- --ws.origins=*
|
||||
- --ws.port=8545
|
||||
restart: unless-stopped
|
||||
stop_grace_period: 5m
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ${PLUME_MAINNET_NITRO_PRUNED_PEBBLE_PATH_DATA:-plume-mainnet-nitro-pruned-pebble-path}:/root/.arbitrum
|
||||
- ./arb/plume/mainnet:/config
|
||||
- /slowdisk:/slowdisk
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=true
|
||||
- prometheus-scrape.port=6070
|
||||
- prometheus-scrape.path=/debug/metrics/prometheus
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.plume-mainnet-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/plume-mainnet
|
||||
- traefik.http.services.plume-mainnet-nitro-pruned-pebble-path.loadbalancer.server.port=8545
|
||||
- ${NO_SSL:-traefik.http.routers.plume-mainnet-nitro-pruned-pebble-path.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.plume-mainnet-nitro-pruned-pebble-path.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.plume-mainnet-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && (Path(`/plume-mainnet`) || Path(`/plume-mainnet/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.plume-mainnet-nitro-pruned-pebble-path.rule=Path(`/plume-mainnet`) || Path(`/plume-mainnet/`)}
|
||||
- traefik.http.routers.plume-mainnet-nitro-pruned-pebble-path.middlewares=plume-mainnet-nitro-pruned-pebble-path-stripprefix, ipallowlist
|
||||
|
||||
volumes:
|
||||
plume-mainnet-nitro-pruned-pebble-path:
|
||||
|
||||
x-upstreams:
|
||||
- id: $${ID}
|
||||
labels:
|
||||
provider: $${PROVIDER}
|
||||
connection:
|
||||
generic:
|
||||
rpc:
|
||||
url: $${RPC_URL}
|
||||
ws:
|
||||
frameSize: 20Mb
|
||||
msgSize: 50Mb
|
||||
url: $${WS_URL}
|
||||
chain: plume
|
||||
method-groups:
|
||||
enabled:
|
||||
- debug
|
||||
- filter
|
||||
methods:
|
||||
disabled:
|
||||
# not compatible with path state scheme
|
||||
- name: debug_traceBlockByHash
|
||||
enabled:
|
||||
- name: txpool_content # TODO: should be disabled for rollup nodes
|
||||
# standard geth only
|
||||
- name: debug_getRawBlock
|
||||
- name: debug_getRawTransaction
|
||||
- name: debug_getRawReceipts
|
||||
- name: debug_getRawHeader
|
||||
- name: debug_getBadBlocks
|
||||
# non standard geth only slightly dangerous
|
||||
- name: debug_intermediateRoots
|
||||
- name: debug_dumpBlock
|
||||
# standard geth and erigon
|
||||
- name: debug_accountRange
|
||||
- name: debug_getModifiedAccountsByNumber
|
||||
- name: debug_getModifiedAccountsByHash
|
||||
# non standard geth and erigon
|
||||
- name: eth_getRawTransactionByHash
|
||||
- name: eth_getRawTransactionByBlockHashAndIndex
|
||||
...
|
||||
@@ -1,147 +0,0 @@
|
||||
---
|
||||
x-logging-defaults: &logging-defaults
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
# Usage:
|
||||
#
|
||||
# mkdir rpc && cd rpc
|
||||
#
|
||||
# git init
|
||||
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
|
||||
# git fetch origin vibe
|
||||
# git checkout origin/vibe
|
||||
#
|
||||
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
|
||||
#
|
||||
# env
|
||||
# ...
|
||||
# IP=$(curl ipinfo.io/ip)
|
||||
# DOMAIN=${IP}.traefik.me
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/plume-testnet-nitro-archive-leveldb-hash.yml
|
||||
#
|
||||
# docker compose up -d
|
||||
#
|
||||
# curl -X POST https://${IP}.traefik.me/plume-testnet-archive \
|
||||
# -H "Content-Type: application/json" \
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
plume-testnet-archive:
|
||||
image: ${PLUME_NITRO_IMAGE:-offchainlabs/nitro-node}:${PLUME_TESTNET_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
|
||||
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
|
||||
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
|
||||
net.core.somaxconn: 32768 # Higher connection queue
|
||||
# Memory/Connection Management
|
||||
# net.core.netdev_max_backlog: 50000 # Increase network buffer
|
||||
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
|
||||
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
|
||||
ulimits:
|
||||
nofile: 1048576 # Max open files (for RPC/WS connections)
|
||||
user: root
|
||||
expose:
|
||||
- 8545
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=true
|
||||
- --execution.caching.database-cache=${PLUME_TESTNET_NITRO_ARCHIVE_LEVELDB_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${PLUME_TESTNET_NITRO_ARCHIVE_LEVELDB_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${PLUME_TESTNET_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${PLUME_TESTNET_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://testnet-rpc.plume.org
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
- --node.data-availability.rest-aggregator.urls=https://das-plume-testnet-1.t.conduit.xyz
|
||||
- --node.data-availability.sequencer-inbox-address=0xbCa991f1831bE1F1E7e5576d5F84A645e70F3E4d
|
||||
- --node.feed.input.url=wss://relay-plume-testnet-1.t.conduit.xyz
|
||||
- --node.sequencer=false
|
||||
- --node.staker.enable=false
|
||||
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
|
||||
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
|
||||
- --persistent.chain=/root/.arbitrum/plume-testnet-archive
|
||||
- --ws.addr=0.0.0.0
|
||||
- --ws.origins=*
|
||||
- --ws.port=8545
|
||||
restart: unless-stopped
|
||||
stop_grace_period: 5m
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ${PLUME_TESTNET_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-plume-testnet-nitro-archive-leveldb-hash}:/root/.arbitrum
|
||||
- ./arb/plume/testnet:/config
|
||||
- /slowdisk:/slowdisk
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=true
|
||||
- prometheus-scrape.port=6070
|
||||
- prometheus-scrape.path=/debug/metrics/prometheus
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.plume-testnet-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/plume-testnet-archive
|
||||
- traefik.http.services.plume-testnet-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
|
||||
- ${NO_SSL:-traefik.http.routers.plume-testnet-nitro-archive-leveldb-hash.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.plume-testnet-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.plume-testnet-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/plume-testnet-archive`) || Path(`/plume-testnet-archive/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.plume-testnet-nitro-archive-leveldb-hash.rule=Path(`/plume-testnet-archive`) || Path(`/plume-testnet-archive/`)}
|
||||
- traefik.http.routers.plume-testnet-nitro-archive-leveldb-hash.middlewares=plume-testnet-nitro-archive-leveldb-hash-stripprefix, ipallowlist
|
||||
|
||||
volumes:
|
||||
plume-testnet-nitro-archive-leveldb-hash:
|
||||
|
||||
x-upstreams:
|
||||
- id: $${ID}
|
||||
labels:
|
||||
provider: $${PROVIDER}
|
||||
connection:
|
||||
generic:
|
||||
rpc:
|
||||
url: $${RPC_URL}
|
||||
ws:
|
||||
frameSize: 20Mb
|
||||
msgSize: 50Mb
|
||||
url: $${WS_URL}
|
||||
chain: plume-testnet
|
||||
method-groups:
|
||||
enabled:
|
||||
- debug
|
||||
- filter
|
||||
methods:
|
||||
disabled:
|
||||
enabled:
|
||||
- name: txpool_content # TODO: should be disabled for rollup nodes
|
||||
# standard geth only
|
||||
- name: debug_getRawBlock
|
||||
- name: debug_getRawTransaction
|
||||
- name: debug_getRawReceipts
|
||||
- name: debug_getRawHeader
|
||||
- name: debug_getBadBlocks
|
||||
# non standard geth only slightly dangerous
|
||||
- name: debug_intermediateRoots
|
||||
- name: debug_dumpBlock
|
||||
# standard geth and erigon
|
||||
- name: debug_accountRange
|
||||
- name: debug_getModifiedAccountsByNumber
|
||||
- name: debug_getModifiedAccountsByHash
|
||||
# non standard geth and erigon
|
||||
- name: eth_getRawTransactionByHash
|
||||
- name: eth_getRawTransactionByBlockHashAndIndex
|
||||
...
|
||||
@@ -1,150 +0,0 @@
|
||||
---
|
||||
x-logging-defaults: &logging-defaults
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
# Usage:
|
||||
#
|
||||
# mkdir rpc && cd rpc
|
||||
#
|
||||
# git init
|
||||
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
|
||||
# git fetch origin vibe
|
||||
# git checkout origin/vibe
|
||||
#
|
||||
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
|
||||
#
|
||||
# env
|
||||
# ...
|
||||
# IP=$(curl ipinfo.io/ip)
|
||||
# DOMAIN=${IP}.traefik.me
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/plume-testnet-nitro-pruned-pebble-path.yml
|
||||
#
|
||||
# docker compose up -d
|
||||
#
|
||||
# curl -X POST https://${IP}.traefik.me/plume-testnet \
|
||||
# -H "Content-Type: application/json" \
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
plume-testnet:
|
||||
image: ${PLUME_NITRO_IMAGE:-offchainlabs/nitro-node}:${PLUME_TESTNET_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
|
||||
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
|
||||
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
|
||||
net.core.somaxconn: 32768 # Higher connection queue
|
||||
# Memory/Connection Management
|
||||
# net.core.netdev_max_backlog: 50000 # Increase network buffer
|
||||
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
|
||||
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
|
||||
ulimits:
|
||||
nofile: 1048576 # Max open files (for RPC/WS connections)
|
||||
user: root
|
||||
expose:
|
||||
- 8545
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=${PLUME_TESTNET_ARCHIVE_DB:-false}
|
||||
- --execution.caching.database-cache=${PLUME_TESTNET_NITRO_PRUNED_PEBBLE_PATH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${PLUME_TESTNET_NITRO_PRUNED_PEBBLE_PATH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.state-scheme=path
|
||||
- --execution.caching.trie-clean-cache=${PLUME_TESTNET_NITRO_PRUNED_PEBBLE_PATH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${PLUME_TESTNET_NITRO_PRUNED_PEBBLE_PATH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://testnet-rpc.plume.org
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
- --node.data-availability.rest-aggregator.urls=https://das-plume-testnet-1.t.conduit.xyz
|
||||
- --node.data-availability.sequencer-inbox-address=0xbCa991f1831bE1F1E7e5576d5F84A645e70F3E4d
|
||||
- --node.feed.input.url=wss://relay-plume-testnet-1.t.conduit.xyz
|
||||
- --node.sequencer=false
|
||||
- --node.staker.enable=false
|
||||
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
|
||||
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
|
||||
- --persistent.chain=/root/.arbitrum/plume-testnet
|
||||
- --persistent.db-engine=pebble
|
||||
- --ws.addr=0.0.0.0
|
||||
- --ws.origins=*
|
||||
- --ws.port=8545
|
||||
restart: unless-stopped
|
||||
stop_grace_period: 5m
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ${PLUME_TESTNET_NITRO_PRUNED_PEBBLE_PATH_DATA:-plume-testnet-nitro-pruned-pebble-path}:/root/.arbitrum
|
||||
- ./arb/plume/testnet:/config
|
||||
- /slowdisk:/slowdisk
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=true
|
||||
- prometheus-scrape.port=6070
|
||||
- prometheus-scrape.path=/debug/metrics/prometheus
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.plume-testnet-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/plume-testnet
|
||||
- traefik.http.services.plume-testnet-nitro-pruned-pebble-path.loadbalancer.server.port=8545
|
||||
- ${NO_SSL:-traefik.http.routers.plume-testnet-nitro-pruned-pebble-path.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.plume-testnet-nitro-pruned-pebble-path.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.plume-testnet-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && (Path(`/plume-testnet`) || Path(`/plume-testnet/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.plume-testnet-nitro-pruned-pebble-path.rule=Path(`/plume-testnet`) || Path(`/plume-testnet/`)}
|
||||
- traefik.http.routers.plume-testnet-nitro-pruned-pebble-path.middlewares=plume-testnet-nitro-pruned-pebble-path-stripprefix, ipallowlist
|
||||
|
||||
volumes:
|
||||
plume-testnet-nitro-pruned-pebble-path:
|
||||
|
||||
x-upstreams:
|
||||
- id: $${ID}
|
||||
labels:
|
||||
provider: $${PROVIDER}
|
||||
connection:
|
||||
generic:
|
||||
rpc:
|
||||
url: $${RPC_URL}
|
||||
ws:
|
||||
frameSize: 20Mb
|
||||
msgSize: 50Mb
|
||||
url: $${WS_URL}
|
||||
chain: plume-testnet
|
||||
method-groups:
|
||||
enabled:
|
||||
- debug
|
||||
- filter
|
||||
methods:
|
||||
disabled:
|
||||
# not compatible with path state scheme
|
||||
- name: debug_traceBlockByHash
|
||||
enabled:
|
||||
- name: txpool_content # TODO: should be disabled for rollup nodes
|
||||
# standard geth only
|
||||
- name: debug_getRawBlock
|
||||
- name: debug_getRawTransaction
|
||||
- name: debug_getRawReceipts
|
||||
- name: debug_getRawHeader
|
||||
- name: debug_getBadBlocks
|
||||
# non standard geth only slightly dangerous
|
||||
- name: debug_intermediateRoots
|
||||
- name: debug_dumpBlock
|
||||
# standard geth and erigon
|
||||
- name: debug_accountRange
|
||||
- name: debug_getModifiedAccountsByNumber
|
||||
- name: debug_getModifiedAccountsByHash
|
||||
# non standard geth and erigon
|
||||
- name: eth_getRawTransactionByHash
|
||||
- name: eth_getRawTransactionByBlockHashAndIndex
|
||||
...
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
real-mainnet-archive:
|
||||
image: ${REAL_NITRO_IMAGE:-offchainlabs/nitro-node}:${REAL_MAINNET_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${REAL_NITRO_IMAGE:-offchainlabs/nitro-node}:${REAL_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -50,24 +50,19 @@ services:
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=true
|
||||
- --execution.caching.database-cache=${REAL_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${REAL_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${REAL_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${REAL_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://rpc.realforreal.gelato.digital
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
real-mainnet-archive:
|
||||
image: ${REAL_NITRO_IMAGE:-offchainlabs/nitro-node}:${REAL_MAINNET_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${REAL_NITRO_IMAGE:-offchainlabs/nitro-node}:${REAL_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -50,24 +50,19 @@ services:
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=true
|
||||
- --execution.caching.database-cache=${REAL_MAINNET_NITRO_ARCHIVE_PEBBLE_HASH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${REAL_MAINNET_NITRO_ARCHIVE_PEBBLE_HASH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.trie-clean-cache=${REAL_MAINNET_NITRO_ARCHIVE_PEBBLE_HASH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${REAL_MAINNET_NITRO_ARCHIVE_PEBBLE_HASH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://rpc.realforreal.gelato.digital
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.log-history=0
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
|
||||
@@ -30,7 +30,7 @@ x-logging-defaults: &logging-defaults
|
||||
|
||||
services:
|
||||
real-mainnet:
|
||||
image: ${REAL_NITRO_IMAGE:-offchainlabs/nitro-node}:${REAL_MAINNET_NITRO_VERSION:-v3.9.5-66e42c4}
|
||||
image: ${REAL_NITRO_IMAGE:-offchainlabs/nitro-node}:${REAL_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -49,25 +49,21 @@ services:
|
||||
- 8545
|
||||
command:
|
||||
- --conf.file=/config/baseConfig.json
|
||||
- --execution.caching.archive=${REAL_MAINNET_ARCHIVE_DB:-false}
|
||||
- --execution.caching.database-cache=${REAL_MAINNET_NITRO_PRUNED_PEBBLE_PATH_DATABASE_CACHE:-2048}
|
||||
- --execution.caching.snapshot-cache=${REAL_MAINNET_NITRO_PRUNED_PEBBLE_PATH_SNAPSHOT_CACHE:-400}
|
||||
- --execution.caching.archive=false
|
||||
- --execution.caching.state-scheme=path
|
||||
- --execution.caching.trie-clean-cache=${REAL_MAINNET_NITRO_PRUNED_PEBBLE_PATH_TRIE_CLEAN_CACHE:-600}
|
||||
- --execution.caching.trie-dirty-cache=${REAL_MAINNET_NITRO_PRUNED_PEBBLE_PATH_TRIE_DIRTY_CACHE:-1024}
|
||||
- --execution.forwarding-target=https://rpc.realforreal.gelato.digital
|
||||
- --execution.rpc.gas-cap=5500000000
|
||||
- --execution.rpc.gas-cap=600000000
|
||||
- --execution.sequencer.enable=false
|
||||
- --http.addr=0.0.0.0
|
||||
- --http.api=eth,net,web3,arb,txpool,debug
|
||||
- --http.corsdomain=*
|
||||
- --http.port=8545
|
||||
- --http.vhosts=*
|
||||
- --init.download-path=/tmp
|
||||
- --metrics
|
||||
- --metrics-server.addr=0.0.0.0
|
||||
- --metrics-server.port=6070
|
||||
- --node.batch-poster.enable=false
|
||||
- --node.da-provider.enable=false
|
||||
- --node.data-availability.enable=true
|
||||
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
|
||||
- --node.data-availability.rest-aggregator.enable=true
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
{
|
||||
"chain": {
|
||||
"info-json": "[{\"chain-id\":98866,\"parent-chain-id\":1,\"chain-name\":\"conduit-orbit-deployer\",\"chain-config\":{\"chainId\":98866,\"homesteadBlock\":0,\"daoForkBlock\":null,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"berlinBlock\":0,\"londonBlock\":0,\"clique\":{\"period\":0,\"epoch\":0},\"arbitrum\":{\"EnableArbOS\":true,\"AllowDebugPrecompiles\":false,\"DataAvailabilityCommittee\":true,\"InitialArbOSVersion\":32,\"InitialChainOwner\":\"0x5Ec32984332eaB190cA431545664320259D755d8\",\"GenesisBlockNum\":0}},\"rollup\":{\"bridge\":\"0x35381f63091926750F43b2A7401B083263aDEF83\",\"inbox\":\"0x943fc691242291B74B105e8D19bd9E5DC2fcBa1D\",\"sequencer-inbox\":\"0x85eC1b9138a8b9659A51e2b51bb0861901040b59\",\"rollup\":\"0x35c60Cc77b0A8bf6F938B11bd3E9D319a876c2aC\",\"validator-utils\":\"0x84eA2523b271029FFAeB58fc6E6F1435a280db44\",\"validator-wallet-creator\":\"0x0A5eC2286bB15893d5b8f320aAbc823B2186BA09\",\"deployed-at\":21887008}}]"
|
||||
}
|
||||
}
|
||||
1
arbitrum-nova.yml
Symbolic link
1
arbitrum-nova.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
arb/nitro/arbitrum-nova-nitro-pruned-pebble-hash.yml
|
||||
1
arbitrum-one-nitro-archive.yml
Symbolic link
1
arbitrum-one-nitro-archive.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
arb/nitro/alephzero-mainnet-nitro-archive-leveldb-hash.yml
|
||||
1
arbitrum-one.yml
Symbolic link
1
arbitrum-one.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
arb/nitro/arbitrum-one-nitro-pruned-pebble-hash.yml
|
||||
1
arbitrum-sepolia-archive.yml
Symbolic link
1
arbitrum-sepolia-archive.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
arb/nitro/arbitrum-sepolia-nitro-archive-pebble-hash.yml
|
||||
1
arbitrum-sepolia-nitro-pruned-pebble-hash.yml
Symbolic link
1
arbitrum-sepolia-nitro-pruned-pebble-hash.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
arb/nitro/arbitrum-sepolia-nitro-pruned-pebble-hash.yml
|
||||
1
arbitrum-sepolia.yml
Symbolic link
1
arbitrum-sepolia.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
arb/nitro/arbitrum-sepolia-nitro-pruned-pebble-hash.yml
|
||||
1
avalanche-fuji.yml
Symbolic link
1
avalanche-fuji.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
avalanche/go/avalanche-fuji-go-pruned-pebbledb.yml
|
||||
1
avalanche-mainnet-archive.yml
Symbolic link
1
avalanche-mainnet-archive.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
avalanche/go/avalanche-mainnet-go-archive-leveldb.yml
|
||||
1
avalanche-mainnet.yml
Symbolic link
1
avalanche-mainnet.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
avalanche/go/avalanche-mainnet-go-pruned-pebbledb.yml
|
||||
4
avalanche/configs/chains/C/archive-config.json
Normal file
4
avalanche/configs/chains/C/archive-config.json
Normal file
@@ -0,0 +1,4 @@
|
||||
{
|
||||
"state-sync-enabled": false,
|
||||
"pruning-enabled": false
|
||||
}
|
||||
43
avalanche/configs/chains/C/config.json
Normal file
43
avalanche/configs/chains/C/config.json
Normal file
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"snowman-api-enabled": false,
|
||||
"coreth-admin-api-enabled": false,
|
||||
"coreth-admin-api-dir": "",
|
||||
"eth-apis": [
|
||||
"public-eth",
|
||||
"public-eth-filter",
|
||||
"net",
|
||||
"web3",
|
||||
"internal-public-eth",
|
||||
"internal-public-blockchain",
|
||||
"internal-public-transaction-pool",
|
||||
"internal-public-account"
|
||||
],
|
||||
"continuous-profiler-dir": "",
|
||||
"continuous-profiler-frequency": 900000000000,
|
||||
"continuous-profiler-max-files": 5,
|
||||
"rpc-gas-cap": 50000000,
|
||||
"rpc-tx-fee-cap": 100,
|
||||
"preimages-enabled": false,
|
||||
"pruning-enabled": true,
|
||||
"snapshot-async": true,
|
||||
"snapshot-verification-enabled": false,
|
||||
"metrics-enabled": false,
|
||||
"metrics-expensive-enabled": false,
|
||||
"local-txs-enabled": false,
|
||||
"api-max-duration": 0,
|
||||
"ws-cpu-refill-rate": 0,
|
||||
"ws-cpu-max-stored": 0,
|
||||
"api-max-blocks-per-request": 0,
|
||||
"allow-unfinalized-queries": false,
|
||||
"allow-unprotected-txs": false,
|
||||
"keystore-directory": "",
|
||||
"keystore-external-signer": "",
|
||||
"keystore-insecure-unlock-allowed": false,
|
||||
"remote-tx-gossip-only-enabled": false,
|
||||
"tx-regossip-frequency": 60000000000,
|
||||
"tx-regossip-max-size": 15,
|
||||
"log-level": "debug",
|
||||
"offline-pruning-enabled": false,
|
||||
"offline-pruning-bloom-filter-size": 512,
|
||||
"offline-pruning-data-directory": ""
|
||||
}
|
||||
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"state-sync-enabled": true,
|
||||
"pruning-enabled": true,
|
||||
"offline-pruning-enabled": true,
|
||||
"offline-pruning-data-directory": "/root/.avalanchego/offline-pruning"
|
||||
}
|
||||
7
avalanche/configs/chains/C/fullnode-config.json
Normal file
7
avalanche/configs/chains/C/fullnode-config.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"state-sync-enabled": true,
|
||||
"pruning-enabled": true,
|
||||
"offline-pruning-enabled": false,
|
||||
"offline-pruning-data-directory": "/root/.avalanchego/offline-pruning",
|
||||
"rpc-gas-cap": 600000000
|
||||
}
|
||||
@@ -1,6 +1,4 @@
|
||||
{
|
||||
"state-sync-enabled": false,
|
||||
"pruning-enabled": false,
|
||||
"rpc-gas-cap": 2500000000,
|
||||
"eth-rpc-gas-limit": 2500000000
|
||||
"pruning-enabled": false
|
||||
}
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
{
|
||||
"rpc-gas-cap": 2500000000,
|
||||
"eth-rpc-gas-limit": 2500000000
|
||||
}
|
||||
@@ -29,8 +29,8 @@ x-logging-defaults: &logging-defaults
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
avalanche-fuji-archive:
|
||||
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_FUJI_GO_VERSION:-v1.14.1}
|
||||
avalanche-fuji-archive-client:
|
||||
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_FUJI_GO_VERSION:-v1.13.0-fuji}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -50,6 +50,7 @@ services:
|
||||
- 10046:10046/udp
|
||||
expose:
|
||||
- 9650
|
||||
- 9650
|
||||
entrypoint: [/avalanchego/build/avalanchego]
|
||||
command:
|
||||
- --chain-config-dir=/config/archive
|
||||
@@ -70,26 +71,35 @@ services:
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
|
||||
avalanche-fuji-archive:
|
||||
image: nginx
|
||||
expose:
|
||||
- '80'
|
||||
environment:
|
||||
PROXY_HOST: avalanche-fuji-archive-client
|
||||
RPC_PATH: /ext/bc/C/rpc
|
||||
RPC_PORT: 9650
|
||||
WS_PATH: /ext/bc/C/ws
|
||||
WS_PORT: 9650
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- avalanche-fuji-archive-client
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ./nginx-proxy:/etc/nginx/templates
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.avalanche-fuji-go-archive-leveldb-set-path.replacepath.path=/ext/bc/C/rpc
|
||||
- traefik.http.middlewares.avalanche-fuji-go-archive-leveldb-stripprefix.stripprefix.prefixes=/avalanche-fuji-archive
|
||||
- traefik.http.services.avalanche-fuji-go-archive-leveldb.loadbalancer.server.port=9650
|
||||
- traefik.http.services.avalanche-fuji-go-archive-leveldb.loadbalancer.server.port=80
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-archive-leveldb.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-archive-leveldb.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-archive-leveldb.rule=Host(`$DOMAIN`) && (Path(`/avalanche-fuji-archive`) || Path(`/avalanche-fuji-archive/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.avalanche-fuji-go-archive-leveldb.rule=Path(`/avalanche-fuji-archive`) || Path(`/avalanche-fuji-archive/`)}
|
||||
- traefik.http.routers.avalanche-fuji-go-archive-leveldb.middlewares=avalanche-fuji-go-archive-leveldb-stripprefix, avalanche-fuji-go-archive-leveldb-set-path, ipallowlist
|
||||
- traefik.http.routers.avalanche-fuji-go-archive-leveldb.priority=50 # gets any request that is not GET with UPGRADE header
|
||||
- traefik.http.routers.avalanche-fuji-go-archive-leveldb-ws.priority=100 # answers GET requests first
|
||||
- traefik.http.middlewares.avalanche-fuji-go-archive-leveldb-set-ws-path.replacepath.path=/ext/bc/C/ws
|
||||
- traefik.http.services.avalanche-fuji-go-archive-leveldb-ws.loadbalancer.server.port=9650
|
||||
- traefik.http.routers.avalanche-fuji-go-archive-leveldb-ws.service=avalanche-fuji-go-archive-leveldb-ws
|
||||
- traefik.http.routers.avalanche-fuji-go-archive-leveldb.service=avalanche-fuji-go-archive-leveldb
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-archive-leveldb-ws.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-archive-leveldb-ws.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-archive-leveldb-ws.rule=Host(`$DOMAIN`) && (Path(`/avalanche-fuji-archive`) || Path(`/avalanche-fuji-archive/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- ${NO_SSL:+traefik.http.routers.avalanche-fuji-go-archive-leveldb-ws.rule=(Path(`/avalanche-fuji-archive`) || Path(`/avalanche-fuji-archive/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- traefik.http.routers.avalanche-fuji-go-archive-leveldb-ws.middlewares=avalanche-fuji-go-archive-leveldb-stripprefix, avalanche-fuji-go-archive-leveldb-set-ws-path, ipallowlist
|
||||
- traefik.http.routers.avalanche-fuji-go-archive-leveldb.middlewares=avalanche-fuji-go-archive-leveldb-stripprefix, ipallowlist
|
||||
|
||||
volumes:
|
||||
avalanche-fuji-go-archive-leveldb:
|
||||
|
||||
@@ -1,118 +0,0 @@
|
||||
---
|
||||
x-logging-defaults: &logging-defaults
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
# Usage:
|
||||
#
|
||||
# mkdir rpc && cd rpc
|
||||
#
|
||||
# git init
|
||||
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
|
||||
# git fetch origin vibe
|
||||
# git checkout origin/vibe
|
||||
#
|
||||
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
|
||||
#
|
||||
# env
|
||||
# ...
|
||||
# IP=$(curl ipinfo.io/ip)
|
||||
# DOMAIN=${IP}.traefik.me
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:avalanche/go/avalanche-fuji-go-pruned-leveldb.yml
|
||||
#
|
||||
# docker compose up -d
|
||||
#
|
||||
# curl -X POST https://${IP}.traefik.me/avalanche-fuji \
|
||||
# -H "Content-Type: application/json" \
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
avalanche-fuji:
|
||||
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_FUJI_GO_VERSION:-v1.14.1}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
|
||||
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
|
||||
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
|
||||
net.core.somaxconn: 32768 # Higher connection queue
|
||||
# Memory/Connection Management
|
||||
# net.core.netdev_max_backlog: 50000 # Increase network buffer
|
||||
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
|
||||
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
|
||||
ulimits:
|
||||
nofile: 1048576 # Max open files (for RPC/WS connections)
|
||||
user: root
|
||||
ports:
|
||||
- 12059:12059
|
||||
- 12059:12059/udp
|
||||
expose:
|
||||
- 9650
|
||||
entrypoint: [/avalanchego/build/avalanchego]
|
||||
command:
|
||||
- --chain-config-dir=/config/pruned
|
||||
- --db-type=leveldb
|
||||
- --http-allowed-hosts=*
|
||||
- --http-host=
|
||||
- --network-id=fuji
|
||||
- --public-ip=${IP}
|
||||
- --staking-port=12059
|
||||
restart: unless-stopped
|
||||
stop_grace_period: 5m
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ${AVALANCHE_FUJI_GO_PRUNED_LEVELDB_DATA:-avalanche-fuji-go-pruned-leveldb}:/root/.avalanchego
|
||||
- ./avalanche/fuji:/config
|
||||
- /slowdisk:/slowdisk
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.avalanche-fuji-go-pruned-leveldb-set-path.replacepath.path=/ext/bc/C/rpc
|
||||
- traefik.http.middlewares.avalanche-fuji-go-pruned-leveldb-stripprefix.stripprefix.prefixes=/avalanche-fuji
|
||||
- traefik.http.services.avalanche-fuji-go-pruned-leveldb.loadbalancer.server.port=9650
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-leveldb.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-leveldb.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-leveldb.rule=Host(`$DOMAIN`) && (Path(`/avalanche-fuji`) || Path(`/avalanche-fuji/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.avalanche-fuji-go-pruned-leveldb.rule=Path(`/avalanche-fuji`) || Path(`/avalanche-fuji/`)}
|
||||
- traefik.http.routers.avalanche-fuji-go-pruned-leveldb.middlewares=avalanche-fuji-go-pruned-leveldb-stripprefix, avalanche-fuji-go-pruned-leveldb-set-path, ipallowlist
|
||||
- traefik.http.routers.avalanche-fuji-go-pruned-leveldb.priority=50 # gets any request that is not GET with UPGRADE header
|
||||
- traefik.http.routers.avalanche-fuji-go-pruned-leveldb-ws.priority=100 # answers GET requests first
|
||||
- traefik.http.middlewares.avalanche-fuji-go-pruned-leveldb-set-ws-path.replacepath.path=/ext/bc/C/ws
|
||||
- traefik.http.services.avalanche-fuji-go-pruned-leveldb-ws.loadbalancer.server.port=9650
|
||||
- traefik.http.routers.avalanche-fuji-go-pruned-leveldb-ws.service=avalanche-fuji-go-pruned-leveldb-ws
|
||||
- traefik.http.routers.avalanche-fuji-go-pruned-leveldb.service=avalanche-fuji-go-pruned-leveldb
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-leveldb-ws.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-leveldb-ws.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-leveldb-ws.rule=Host(`$DOMAIN`) && (Path(`/avalanche-fuji`) || Path(`/avalanche-fuji/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- ${NO_SSL:+traefik.http.routers.avalanche-fuji-go-pruned-leveldb-ws.rule=(Path(`/avalanche-fuji`) || Path(`/avalanche-fuji/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- traefik.http.routers.avalanche-fuji-go-pruned-leveldb-ws.middlewares=avalanche-fuji-go-pruned-leveldb-stripprefix, avalanche-fuji-go-pruned-leveldb-set-ws-path, ipallowlist
|
||||
|
||||
volumes:
|
||||
avalanche-fuji-go-pruned-leveldb:
|
||||
|
||||
x-upstreams:
|
||||
- id: $${ID}
|
||||
labels:
|
||||
provider: $${PROVIDER}
|
||||
connection:
|
||||
generic:
|
||||
rpc:
|
||||
url: $${RPC_URL}
|
||||
ws:
|
||||
frameSize: 20Mb
|
||||
msgSize: 50Mb
|
||||
url: $${WS_URL}
|
||||
chain: avalanche
|
||||
method-groups:
|
||||
enabled:
|
||||
- debug
|
||||
- filter
|
||||
methods:
|
||||
disabled:
|
||||
enabled:
|
||||
- name: txpool_content # TODO: should be disabled for rollup nodes
|
||||
...
|
||||
@@ -29,8 +29,8 @@ x-logging-defaults: &logging-defaults
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
avalanche-fuji:
|
||||
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_FUJI_GO_VERSION:-v1.14.1}
|
||||
avalanche-fuji-client:
|
||||
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_FUJI_GO_VERSION:-v1.13.0-fuji}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -50,6 +50,7 @@ services:
|
||||
- 10350:10350/udp
|
||||
expose:
|
||||
- 9650
|
||||
- 9650
|
||||
entrypoint: [/avalanchego/build/avalanchego]
|
||||
command:
|
||||
- --chain-config-dir=/config/pruned
|
||||
@@ -70,26 +71,35 @@ services:
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
|
||||
avalanche-fuji:
|
||||
image: nginx
|
||||
expose:
|
||||
- '80'
|
||||
environment:
|
||||
PROXY_HOST: avalanche-fuji-client
|
||||
RPC_PATH: /ext/bc/C/rpc
|
||||
RPC_PORT: 9650
|
||||
WS_PATH: /ext/bc/C/ws
|
||||
WS_PORT: 9650
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- avalanche-fuji-client
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ./nginx-proxy:/etc/nginx/templates
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.avalanche-fuji-go-pruned-pebbledb-set-path.replacepath.path=/ext/bc/C/rpc
|
||||
- traefik.http.middlewares.avalanche-fuji-go-pruned-pebbledb-stripprefix.stripprefix.prefixes=/avalanche-fuji
|
||||
- traefik.http.services.avalanche-fuji-go-pruned-pebbledb.loadbalancer.server.port=9650
|
||||
- traefik.http.services.avalanche-fuji-go-pruned-pebbledb.loadbalancer.server.port=80
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.rule=Host(`$DOMAIN`) && (Path(`/avalanche-fuji`) || Path(`/avalanche-fuji/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.rule=Path(`/avalanche-fuji`) || Path(`/avalanche-fuji/`)}
|
||||
- traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.middlewares=avalanche-fuji-go-pruned-pebbledb-stripprefix, avalanche-fuji-go-pruned-pebbledb-set-path, ipallowlist
|
||||
- traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.priority=50 # gets any request that is not GET with UPGRADE header
|
||||
- traefik.http.routers.avalanche-fuji-go-pruned-pebbledb-ws.priority=100 # answers GET requests first
|
||||
- traefik.http.middlewares.avalanche-fuji-go-pruned-pebbledb-set-ws-path.replacepath.path=/ext/bc/C/ws
|
||||
- traefik.http.services.avalanche-fuji-go-pruned-pebbledb-ws.loadbalancer.server.port=9650
|
||||
- traefik.http.routers.avalanche-fuji-go-pruned-pebbledb-ws.service=avalanche-fuji-go-pruned-pebbledb-ws
|
||||
- traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.service=avalanche-fuji-go-pruned-pebbledb
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-pebbledb-ws.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-pebbledb-ws.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-pebbledb-ws.rule=Host(`$DOMAIN`) && (Path(`/avalanche-fuji`) || Path(`/avalanche-fuji/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- ${NO_SSL:+traefik.http.routers.avalanche-fuji-go-pruned-pebbledb-ws.rule=(Path(`/avalanche-fuji`) || Path(`/avalanche-fuji/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- traefik.http.routers.avalanche-fuji-go-pruned-pebbledb-ws.middlewares=avalanche-fuji-go-pruned-pebbledb-stripprefix, avalanche-fuji-go-pruned-pebbledb-set-ws-path, ipallowlist
|
||||
- traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.middlewares=avalanche-fuji-go-pruned-pebbledb-stripprefix, ipallowlist
|
||||
|
||||
volumes:
|
||||
avalanche-fuji-go-pruned-pebbledb:
|
||||
|
||||
@@ -29,8 +29,8 @@ x-logging-defaults: &logging-defaults
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
avalanche-mainnet-archive:
|
||||
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_MAINNET_GO_VERSION:-v1.14.1}
|
||||
avalanche-mainnet-archive-client:
|
||||
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_MAINNET_GO_VERSION:-v1.13.0}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -50,6 +50,7 @@ services:
|
||||
- 12934:12934/udp
|
||||
expose:
|
||||
- 9650
|
||||
- 9650
|
||||
entrypoint: [/avalanchego/build/avalanchego]
|
||||
command:
|
||||
- --chain-config-dir=/config/archive
|
||||
@@ -70,26 +71,35 @@ services:
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
|
||||
avalanche-mainnet-archive:
|
||||
image: nginx
|
||||
expose:
|
||||
- '80'
|
||||
environment:
|
||||
PROXY_HOST: avalanche-mainnet-archive-client
|
||||
RPC_PATH: /ext/bc/C/rpc
|
||||
RPC_PORT: 9650
|
||||
WS_PATH: /ext/bc/C/ws
|
||||
WS_PORT: 9650
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- avalanche-mainnet-archive-client
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ./nginx-proxy:/etc/nginx/templates
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.avalanche-mainnet-go-archive-leveldb-set-path.replacepath.path=/ext/bc/C/rpc
|
||||
- traefik.http.middlewares.avalanche-mainnet-go-archive-leveldb-stripprefix.stripprefix.prefixes=/avalanche-mainnet-archive
|
||||
- traefik.http.services.avalanche-mainnet-go-archive-leveldb.loadbalancer.server.port=9650
|
||||
- traefik.http.services.avalanche-mainnet-go-archive-leveldb.loadbalancer.server.port=80
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive-leveldb.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive-leveldb.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive-leveldb.rule=Host(`$DOMAIN`) && (Path(`/avalanche-mainnet-archive`) || Path(`/avalanche-mainnet-archive/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.avalanche-mainnet-go-archive-leveldb.rule=Path(`/avalanche-mainnet-archive`) || Path(`/avalanche-mainnet-archive/`)}
|
||||
- traefik.http.routers.avalanche-mainnet-go-archive-leveldb.middlewares=avalanche-mainnet-go-archive-leveldb-stripprefix, avalanche-mainnet-go-archive-leveldb-set-path, ipallowlist
|
||||
- traefik.http.routers.avalanche-mainnet-go-archive-leveldb.priority=50 # gets any request that is not GET with UPGRADE header
|
||||
- traefik.http.routers.avalanche-mainnet-go-archive-leveldb-ws.priority=100 # answers GET requests first
|
||||
- traefik.http.middlewares.avalanche-mainnet-go-archive-leveldb-set-ws-path.replacepath.path=/ext/bc/C/ws
|
||||
- traefik.http.services.avalanche-mainnet-go-archive-leveldb-ws.loadbalancer.server.port=9650
|
||||
- traefik.http.routers.avalanche-mainnet-go-archive-leveldb-ws.service=avalanche-mainnet-go-archive-leveldb-ws
|
||||
- traefik.http.routers.avalanche-mainnet-go-archive-leveldb.service=avalanche-mainnet-go-archive-leveldb
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive-leveldb-ws.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive-leveldb-ws.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive-leveldb-ws.rule=Host(`$DOMAIN`) && (Path(`/avalanche-mainnet-archive`) || Path(`/avalanche-mainnet-archive/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- ${NO_SSL:+traefik.http.routers.avalanche-mainnet-go-archive-leveldb-ws.rule=(Path(`/avalanche-mainnet-archive`) || Path(`/avalanche-mainnet-archive/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- traefik.http.routers.avalanche-mainnet-go-archive-leveldb-ws.middlewares=avalanche-mainnet-go-archive-leveldb-stripprefix, avalanche-mainnet-go-archive-leveldb-set-ws-path, ipallowlist
|
||||
- traefik.http.routers.avalanche-mainnet-go-archive-leveldb.middlewares=avalanche-mainnet-go-archive-leveldb-stripprefix, ipallowlist
|
||||
|
||||
volumes:
|
||||
avalanche-mainnet-go-archive-leveldb:
|
||||
|
||||
@@ -1,118 +0,0 @@
|
||||
---
|
||||
x-logging-defaults: &logging-defaults
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
|
||||
# Usage:
|
||||
#
|
||||
# mkdir rpc && cd rpc
|
||||
#
|
||||
# git init
|
||||
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
|
||||
# git fetch origin vibe
|
||||
# git checkout origin/vibe
|
||||
#
|
||||
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
|
||||
#
|
||||
# env
|
||||
# ...
|
||||
# IP=$(curl ipinfo.io/ip)
|
||||
# DOMAIN=${IP}.traefik.me
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:avalanche/go/avalanche-mainnet-go-pruned-leveldb.yml
|
||||
#
|
||||
# docker compose up -d
|
||||
#
|
||||
# curl -X POST https://${IP}.traefik.me/avalanche-mainnet \
|
||||
# -H "Content-Type: application/json" \
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
avalanche-mainnet:
|
||||
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_MAINNET_GO_VERSION:-v1.14.1}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
|
||||
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
|
||||
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
|
||||
net.core.somaxconn: 32768 # Higher connection queue
|
||||
# Memory/Connection Management
|
||||
# net.core.netdev_max_backlog: 50000 # Increase network buffer
|
||||
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
|
||||
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
|
||||
ulimits:
|
||||
nofile: 1048576 # Max open files (for RPC/WS connections)
|
||||
user: root
|
||||
ports:
|
||||
- 12757:12757
|
||||
- 12757:12757/udp
|
||||
expose:
|
||||
- 9650
|
||||
entrypoint: [/avalanchego/build/avalanchego]
|
||||
command:
|
||||
- --chain-config-dir=/config/pruned
|
||||
- --db-type=leveldb
|
||||
- --http-allowed-hosts=*
|
||||
- --http-host=
|
||||
- --network-id=mainnet
|
||||
- --public-ip=${IP}
|
||||
- --staking-port=12757
|
||||
restart: unless-stopped
|
||||
stop_grace_period: 5m
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ${AVALANCHE_MAINNET_GO_PRUNED_LEVELDB_DATA:-avalanche-mainnet-go-pruned-leveldb}:/root/.avalanchego
|
||||
- ./avalanche/mainnet:/config
|
||||
- /slowdisk:/slowdisk
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.avalanche-mainnet-go-pruned-leveldb-set-path.replacepath.path=/ext/bc/C/rpc
|
||||
- traefik.http.middlewares.avalanche-mainnet-go-pruned-leveldb-stripprefix.stripprefix.prefixes=/avalanche-mainnet
|
||||
- traefik.http.services.avalanche-mainnet-go-pruned-leveldb.loadbalancer.server.port=9650
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-leveldb.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-leveldb.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-leveldb.rule=Host(`$DOMAIN`) && (Path(`/avalanche-mainnet`) || Path(`/avalanche-mainnet/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.avalanche-mainnet-go-pruned-leveldb.rule=Path(`/avalanche-mainnet`) || Path(`/avalanche-mainnet/`)}
|
||||
- traefik.http.routers.avalanche-mainnet-go-pruned-leveldb.middlewares=avalanche-mainnet-go-pruned-leveldb-stripprefix, avalanche-mainnet-go-pruned-leveldb-set-path, ipallowlist
|
||||
- traefik.http.routers.avalanche-mainnet-go-pruned-leveldb.priority=50 # gets any request that is not GET with UPGRADE header
|
||||
- traefik.http.routers.avalanche-mainnet-go-pruned-leveldb-ws.priority=100 # answers GET requests first
|
||||
- traefik.http.middlewares.avalanche-mainnet-go-pruned-leveldb-set-ws-path.replacepath.path=/ext/bc/C/ws
|
||||
- traefik.http.services.avalanche-mainnet-go-pruned-leveldb-ws.loadbalancer.server.port=9650
|
||||
- traefik.http.routers.avalanche-mainnet-go-pruned-leveldb-ws.service=avalanche-mainnet-go-pruned-leveldb-ws
|
||||
- traefik.http.routers.avalanche-mainnet-go-pruned-leveldb.service=avalanche-mainnet-go-pruned-leveldb
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-leveldb-ws.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-leveldb-ws.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-leveldb-ws.rule=Host(`$DOMAIN`) && (Path(`/avalanche-mainnet`) || Path(`/avalanche-mainnet/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- ${NO_SSL:+traefik.http.routers.avalanche-mainnet-go-pruned-leveldb-ws.rule=(Path(`/avalanche-mainnet`) || Path(`/avalanche-mainnet/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- traefik.http.routers.avalanche-mainnet-go-pruned-leveldb-ws.middlewares=avalanche-mainnet-go-pruned-leveldb-stripprefix, avalanche-mainnet-go-pruned-leveldb-set-ws-path, ipallowlist
|
||||
|
||||
volumes:
|
||||
avalanche-mainnet-go-pruned-leveldb:
|
||||
|
||||
x-upstreams:
|
||||
- id: $${ID}
|
||||
labels:
|
||||
provider: $${PROVIDER}
|
||||
connection:
|
||||
generic:
|
||||
rpc:
|
||||
url: $${RPC_URL}
|
||||
ws:
|
||||
frameSize: 20Mb
|
||||
msgSize: 50Mb
|
||||
url: $${WS_URL}
|
||||
chain: avalanche
|
||||
method-groups:
|
||||
enabled:
|
||||
- debug
|
||||
- filter
|
||||
methods:
|
||||
disabled:
|
||||
enabled:
|
||||
- name: txpool_content # TODO: should be disabled for rollup nodes
|
||||
...
|
||||
@@ -29,8 +29,8 @@ x-logging-defaults: &logging-defaults
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
avalanche-mainnet:
|
||||
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_MAINNET_GO_VERSION:-v1.14.1}
|
||||
avalanche-mainnet-client:
|
||||
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_MAINNET_GO_VERSION:-v1.13.0}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
@@ -50,6 +50,7 @@ services:
|
||||
- 11929:11929/udp
|
||||
expose:
|
||||
- 9650
|
||||
- 9650
|
||||
entrypoint: [/avalanchego/build/avalanchego]
|
||||
command:
|
||||
- --chain-config-dir=/config/pruned
|
||||
@@ -70,26 +71,35 @@ services:
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
|
||||
avalanche-mainnet:
|
||||
image: nginx
|
||||
expose:
|
||||
- '80'
|
||||
environment:
|
||||
PROXY_HOST: avalanche-mainnet-client
|
||||
RPC_PATH: /ext/bc/C/rpc
|
||||
RPC_PORT: 9650
|
||||
WS_PATH: /ext/bc/C/ws
|
||||
WS_PORT: 9650
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- avalanche-mainnet-client
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ./nginx-proxy:/etc/nginx/templates
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.avalanche-mainnet-go-pruned-pebbledb-set-path.replacepath.path=/ext/bc/C/rpc
|
||||
- traefik.http.middlewares.avalanche-mainnet-go-pruned-pebbledb-stripprefix.stripprefix.prefixes=/avalanche-mainnet
|
||||
- traefik.http.services.avalanche-mainnet-go-pruned-pebbledb.loadbalancer.server.port=9650
|
||||
- traefik.http.services.avalanche-mainnet-go-pruned-pebbledb.loadbalancer.server.port=80
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.rule=Host(`$DOMAIN`) && (Path(`/avalanche-mainnet`) || Path(`/avalanche-mainnet/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.rule=Path(`/avalanche-mainnet`) || Path(`/avalanche-mainnet/`)}
|
||||
- traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.middlewares=avalanche-mainnet-go-pruned-pebbledb-stripprefix, avalanche-mainnet-go-pruned-pebbledb-set-path, ipallowlist
|
||||
- traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.priority=50 # gets any request that is not GET with UPGRADE header
|
||||
- traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb-ws.priority=100 # answers GET requests first
|
||||
- traefik.http.middlewares.avalanche-mainnet-go-pruned-pebbledb-set-ws-path.replacepath.path=/ext/bc/C/ws
|
||||
- traefik.http.services.avalanche-mainnet-go-pruned-pebbledb-ws.loadbalancer.server.port=9650
|
||||
- traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb-ws.service=avalanche-mainnet-go-pruned-pebbledb-ws
|
||||
- traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.service=avalanche-mainnet-go-pruned-pebbledb
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb-ws.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb-ws.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb-ws.rule=Host(`$DOMAIN`) && (Path(`/avalanche-mainnet`) || Path(`/avalanche-mainnet/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- ${NO_SSL:+traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb-ws.rule=(Path(`/avalanche-mainnet`) || Path(`/avalanche-mainnet/`)) && Headers(`Upgrade`, `websocket`)}
|
||||
- traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb-ws.middlewares=avalanche-mainnet-go-pruned-pebbledb-stripprefix, avalanche-mainnet-go-pruned-pebbledb-set-ws-path, ipallowlist
|
||||
- traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.middlewares=avalanche-mainnet-go-pruned-pebbledb-stripprefix, ipallowlist
|
||||
|
||||
volumes:
|
||||
avalanche-mainnet-go-pruned-pebbledb:
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
{
|
||||
"snowman-api-enabled": false,
|
||||
"coreth-admin-api-enabled": false,
|
||||
"net-api-enabled": true,
|
||||
"rpc-gas-cap": 2500000000,
|
||||
"rpc-tx-fee-cap": 100,
|
||||
"eth-rpc-gas-limit": 2500000000,
|
||||
"eth-api-enabled": true,
|
||||
"personal-api-enabled": false,
|
||||
"tx-pool-api-enabled": false,
|
||||
"debug-api-enabled": false,
|
||||
"web3-api-enabled": true,
|
||||
"local-txs-enabled": false,
|
||||
"pruning-enabled": true,
|
||||
"api-max-duration": 0,
|
||||
"api-max-blocks-per-request": 0,
|
||||
"allow-unfinalized-queries": false,
|
||||
"log-level": "info",
|
||||
"state-sync-enabled": false,
|
||||
"state-sync-skip-resume": true
|
||||
}
|
||||
@@ -1,112 +0,0 @@
|
||||
---
|
||||
x-logging-defaults: &logging-defaults
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
# Aztec full node. See https://docs.aztec.network/network/setup/running_a_node
|
||||
# Admin port (8880) is not exposed; use docker exec for admin API.
|
||||
|
||||
# Usage:
|
||||
#
|
||||
# mkdir rpc && cd rpc
|
||||
#
|
||||
# git init
|
||||
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
|
||||
# git fetch origin vibe
|
||||
# git checkout origin/vibe
|
||||
#
|
||||
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
|
||||
#
|
||||
# env
|
||||
# ...
|
||||
# IP=$(curl ipinfo.io/ip)
|
||||
# DOMAIN=${IP}.traefik.me
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:aztec/aztec/aztec-devnet-aztec-pruned.yml
|
||||
#
|
||||
# docker compose up -d
|
||||
#
|
||||
# curl -X POST https://${IP}.traefik.me/aztec-devnet \
|
||||
# -H "Content-Type: application/json" \
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
aztec-devnet:
|
||||
image: ${AZTEC_AZTEC_IMAGE:-aztecprotocol/aztec}:${AZTEC_DEVNET_AZTEC_VERSION:-3.0.0-devnet.6-patch.1}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
|
||||
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
|
||||
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
|
||||
net.core.somaxconn: 32768 # Higher connection queue
|
||||
# Memory/Connection Management
|
||||
# net.core.netdev_max_backlog: 50000 # Increase network buffer
|
||||
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
|
||||
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
|
||||
ulimits:
|
||||
nofile: 1048576 # Max open files (for RPC/WS connections)
|
||||
user: root
|
||||
ports:
|
||||
- 12024:12024
|
||||
- 12024:12024/udp
|
||||
expose:
|
||||
- 8080
|
||||
environment:
|
||||
AZTEC_ADMIN_PORT: '8880'
|
||||
AZTEC_PORT: '8080'
|
||||
DATA_DIRECTORY: /var/lib/data
|
||||
ETHEREUM_HOSTS: ${ETHEREUM_SEPOLIA_EXECUTION_RPC}
|
||||
L1_CONSENSUS_HOST_URLS: ${ETHEREUM_SEPOLIA_BEACON_REST}
|
||||
LOG_LEVEL: ${AZTEC_LOG_LEVEL:-info}
|
||||
P2P_IP: ${IP}
|
||||
P2P_PORT: '12024'
|
||||
entrypoint: [node, --no-warnings, /usr/src/yarn-project/aztec/dest/bin/index.js, start]
|
||||
command:
|
||||
- --archiver
|
||||
- --network=devnet
|
||||
- --node
|
||||
restart: unless-stopped
|
||||
stop_grace_period: 5m
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ${AZTEC_DEVNET_AZTEC_PRUNED_DATA:-aztec-devnet-aztec-pruned}:/var/lib/data
|
||||
- /slowdisk:/slowdisk
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.aztec-devnet-aztec-pruned-stripprefix.stripprefix.prefixes=/aztec-devnet
|
||||
- traefik.http.services.aztec-devnet-aztec-pruned.loadbalancer.server.port=8080
|
||||
- ${NO_SSL:-traefik.http.routers.aztec-devnet-aztec-pruned.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.aztec-devnet-aztec-pruned.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.aztec-devnet-aztec-pruned.rule=Host(`$DOMAIN`) && (Path(`/aztec-devnet`) || Path(`/aztec-devnet/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.aztec-devnet-aztec-pruned.rule=Path(`/aztec-devnet`) || Path(`/aztec-devnet/`)}
|
||||
- traefik.http.routers.aztec-devnet-aztec-pruned.middlewares=aztec-devnet-aztec-pruned-stripprefix, ipallowlist
|
||||
|
||||
volumes:
|
||||
aztec-devnet-aztec-pruned:
|
||||
|
||||
x-upstreams:
|
||||
- id: $${ID}
|
||||
labels:
|
||||
provider: $${PROVIDER}
|
||||
connection:
|
||||
generic:
|
||||
rpc:
|
||||
url: $${RPC_URL}
|
||||
ws:
|
||||
frameSize: 20Mb
|
||||
msgSize: 50Mb
|
||||
url: $${WS_URL}
|
||||
chain: aztec-devnet
|
||||
method-groups:
|
||||
enabled:
|
||||
- debug
|
||||
- filter
|
||||
methods:
|
||||
disabled:
|
||||
enabled:
|
||||
- name: txpool_content # TODO: should be disabled for rollup nodes
|
||||
...
|
||||
@@ -1,112 +0,0 @@
|
||||
---
|
||||
x-logging-defaults: &logging-defaults
|
||||
driver: json-file
|
||||
options:
|
||||
max-size: "10m"
|
||||
max-file: "3"
|
||||
# Aztec full node. See https://docs.aztec.network/network/setup/running_a_node
|
||||
# Admin port (8880) is not exposed; use docker exec for admin API.
|
||||
|
||||
# Usage:
|
||||
#
|
||||
# mkdir rpc && cd rpc
|
||||
#
|
||||
# git init
|
||||
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
|
||||
# git fetch origin vibe
|
||||
# git checkout origin/vibe
|
||||
#
|
||||
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
|
||||
#
|
||||
# env
|
||||
# ...
|
||||
# IP=$(curl ipinfo.io/ip)
|
||||
# DOMAIN=${IP}.traefik.me
|
||||
# COMPOSE_FILE=base.yml:rpc.yml:aztec/aztec/aztec-testnet-aztec-pruned.yml
|
||||
#
|
||||
# docker compose up -d
|
||||
#
|
||||
# curl -X POST https://${IP}.traefik.me/aztec-testnet \
|
||||
# -H "Content-Type: application/json" \
|
||||
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
|
||||
|
||||
services:
|
||||
aztec-testnet:
|
||||
image: ${AZTEC_AZTEC_IMAGE:-aztecprotocol/aztec}:${AZTEC_TESTNET_AZTEC_VERSION:-3.0.2}
|
||||
sysctls:
|
||||
# TCP Performance
|
||||
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
|
||||
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
|
||||
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
|
||||
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
|
||||
net.core.somaxconn: 32768 # Higher connection queue
|
||||
# Memory/Connection Management
|
||||
# net.core.netdev_max_backlog: 50000 # Increase network buffer
|
||||
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
|
||||
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
|
||||
ulimits:
|
||||
nofile: 1048576 # Max open files (for RPC/WS connections)
|
||||
user: root
|
||||
ports:
|
||||
- 13009:13009
|
||||
- 13009:13009/udp
|
||||
expose:
|
||||
- 8080
|
||||
environment:
|
||||
AZTEC_ADMIN_PORT: '8880'
|
||||
AZTEC_PORT: '8080'
|
||||
DATA_DIRECTORY: /var/lib/data
|
||||
ETHEREUM_HOSTS: ${ETHEREUM_SEPOLIA_EXECUTION_RPC}
|
||||
L1_CONSENSUS_HOST_URLS: ${ETHEREUM_SEPOLIA_BEACON_REST}
|
||||
LOG_LEVEL: ${AZTEC_LOG_LEVEL:-info}
|
||||
P2P_IP: ${IP}
|
||||
P2P_PORT: '13009'
|
||||
entrypoint: [node, --no-warnings, /usr/src/yarn-project/aztec/dest/bin/index.js, start]
|
||||
command:
|
||||
- --archiver
|
||||
- --network=testnet
|
||||
- --node
|
||||
restart: unless-stopped
|
||||
stop_grace_period: 5m
|
||||
networks:
|
||||
- chains
|
||||
volumes:
|
||||
- ${AZTEC_TESTNET_AZTEC_PRUNED_DATA:-aztec-testnet-aztec-pruned}:/var/lib/data
|
||||
- /slowdisk:/slowdisk
|
||||
logging: *logging-defaults
|
||||
labels:
|
||||
- prometheus-scrape.enabled=false
|
||||
- traefik.enable=true
|
||||
- traefik.http.middlewares.aztec-testnet-aztec-pruned-stripprefix.stripprefix.prefixes=/aztec-testnet
|
||||
- traefik.http.services.aztec-testnet-aztec-pruned.loadbalancer.server.port=8080
|
||||
- ${NO_SSL:-traefik.http.routers.aztec-testnet-aztec-pruned.entrypoints=websecure}
|
||||
- ${NO_SSL:-traefik.http.routers.aztec-testnet-aztec-pruned.tls.certresolver=myresolver}
|
||||
- ${NO_SSL:-traefik.http.routers.aztec-testnet-aztec-pruned.rule=Host(`$DOMAIN`) && (Path(`/aztec-testnet`) || Path(`/aztec-testnet/`))}
|
||||
- ${NO_SSL:+traefik.http.routers.aztec-testnet-aztec-pruned.rule=Path(`/aztec-testnet`) || Path(`/aztec-testnet/`)}
|
||||
- traefik.http.routers.aztec-testnet-aztec-pruned.middlewares=aztec-testnet-aztec-pruned-stripprefix, ipallowlist
|
||||
|
||||
volumes:
|
||||
aztec-testnet-aztec-pruned:
|
||||
|
||||
x-upstreams:
|
||||
- id: $${ID}
|
||||
labels:
|
||||
provider: $${PROVIDER}
|
||||
connection:
|
||||
generic:
|
||||
rpc:
|
||||
url: $${RPC_URL}
|
||||
ws:
|
||||
frameSize: 20Mb
|
||||
msgSize: 50Mb
|
||||
url: $${WS_URL}
|
||||
chain: aztec-testnet
|
||||
method-groups:
|
||||
enabled:
|
||||
- debug
|
||||
- filter
|
||||
methods:
|
||||
disabled:
|
||||
enabled:
|
||||
- name: txpool_content # TODO: should be disabled for rollup nodes
|
||||
...
|
||||
@@ -1,6 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
BASEPATH="$(dirname "$0")"
|
||||
backup_dir="/backup"
|
||||
|
||||
if [[ -n $2 ]]; then
|
||||
@@ -12,36 +11,6 @@ else
|
||||
fi
|
||||
fi
|
||||
|
||||
# Function to generate metadata for a single volume
|
||||
generate_volume_metadata() {
|
||||
local volume_key=$1
|
||||
local source_folder=$2
|
||||
local metadata_file=$3
|
||||
|
||||
prefix="/var/lib/docker/volumes/rpc_$volume_key"
|
||||
static_file_list="$BASEPATH/static-file-path-list.txt"
|
||||
|
||||
# Initialize metadata file
|
||||
echo "Static file paths and sizes for volume: rpc_$volume_key" > "$metadata_file"
|
||||
echo "Generated: $(date)" >> "$metadata_file"
|
||||
echo "" >> "$metadata_file"
|
||||
|
||||
# Check each static file path
|
||||
if [[ -f "$static_file_list" ]]; then
|
||||
while IFS= read -r path; do
|
||||
# Check if the path exists
|
||||
if [[ -e "$prefix/_data/$path" ]]; then
|
||||
# Get the size
|
||||
size=$(du -sL "$prefix/_data/$path" 2>/dev/null | awk '{print $1}')
|
||||
# Format size in human-readable format
|
||||
size_formatted=$(echo "$(( size * 1024 ))" | numfmt --to=iec --suffix=B --format="%.2f")
|
||||
# Write to metadata file
|
||||
echo "$size_formatted $path" >> "$metadata_file"
|
||||
fi
|
||||
done < "$static_file_list"
|
||||
fi
|
||||
}
|
||||
|
||||
# Read the JSON input and extract the list of keys
|
||||
keys=$(cat /root/rpc/$1.yml | yaml2json - | jq '.volumes' | jq -r 'keys[]')
|
||||
|
||||
@@ -68,37 +37,15 @@ for key in $keys; do
|
||||
|
||||
folder_size_gb=$(printf "%.0f" "$folder_size")
|
||||
|
||||
timestamp=$(date +'%Y-%m-%d-%H-%M-%S')
|
||||
target_file="rpc_$key-${timestamp}-${folder_size_gb}G.tar.zst"
|
||||
metadata_file_name="rpc_$key-${timestamp}-${folder_size_gb}G.txt"
|
||||
target_file="rpc_$key-$(date +'%Y-%m-%d-%H-%M-%S')-${folder_size_gb}G.tar.zst"
|
||||
|
||||
#echo "$target_file"
|
||||
|
||||
if [[ -n $2 ]]; then
|
||||
# Upload volume archive
|
||||
tar -cf - --dereference "$source_folder" | pv -pterb -s $(du -sb "$source_folder" | awk '{print $1}') | zstd | curl -X PUT --upload-file - "$2/null/uploading-$target_file"
|
||||
curl -X MOVE -H "Destination: /null/$target_file" "$2/null/uploading-$target_file"
|
||||
|
||||
# Generate and upload metadata file
|
||||
echo "Generating metadata for volume: rpc_$key"
|
||||
temp_metadata="/tmp/$metadata_file_name"
|
||||
generate_volume_metadata "$key" "$source_folder" "$temp_metadata"
|
||||
curl -X PUT --upload-file "$temp_metadata" "$2/null/$metadata_file_name"
|
||||
rm -f "$temp_metadata"
|
||||
else
|
||||
# Create volume archive
|
||||
tar -cf - --dereference "$source_folder" | pv -pterb -s $(du -sb "$source_folder" | awk '{print $1}') | zstd -o "/backup/uploading-$target_file"
|
||||
mv "/backup/uploading-$target_file" "/backup/$target_file"
|
||||
|
||||
# Generate metadata file
|
||||
echo "Generating metadata for volume: rpc_$key"
|
||||
generate_volume_metadata "$key" "$source_folder" "/backup/$metadata_file_name"
|
||||
fi
|
||||
done
|
||||
|
||||
# Run show-size.sh to display overall summary
|
||||
echo ""
|
||||
echo "=== Overall Size Summary ==="
|
||||
if [[ -f "$BASEPATH/show-size.sh" ]]; then
|
||||
"$BASEPATH/show-size.sh" "$1" 2>&1
|
||||
fi
|
||||
|
||||
377
backup-peers.sh
377
backup-peers.sh
@@ -1,377 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Script to backup peers from all running nodes
|
||||
# Can be run as a cronjob to periodically backup peer lists
|
||||
# Usage: ./backup-peers.sh [backup-directory] [--verbose]
|
||||
|
||||
BASEPATH="$(dirname "$0")"
|
||||
source $BASEPATH/.env
|
||||
|
||||
# Parse arguments
|
||||
VERBOSE=false
|
||||
BACKUP_DIR=""
|
||||
|
||||
for arg in "$@"; do
|
||||
case "$arg" in
|
||||
--verbose|-v)
|
||||
VERBOSE=true
|
||||
;;
|
||||
--help|-h)
|
||||
echo "Usage: $0 [backup-directory] [--verbose|-v]"
|
||||
echo ""
|
||||
echo " backup-directory: Optional. Directory to store backups (default: ./peer-backups)"
|
||||
echo " --verbose, -v: Enable verbose output"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
if [ -z "$BACKUP_DIR" ] && [[ ! "$arg" =~ ^- ]]; then
|
||||
BACKUP_DIR="$arg"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Default backup directory if not provided
|
||||
if [ -z "$BACKUP_DIR" ]; then
|
||||
BACKUP_DIR="$BASEPATH/peer-backups"
|
||||
fi
|
||||
|
||||
# Create backup directory if it doesn't exist
|
||||
mkdir -p "$BACKUP_DIR"
|
||||
|
||||
# Timestamp for this backup run
|
||||
TIMESTAMP=$(date +%Y%m%d_%H%M%S)
|
||||
|
||||
# Blacklist for compose files (same as show-status.sh)
|
||||
blacklist=(
|
||||
"drpc.yml" "drpc-free.yml" "drpc-home.yml" # dshackles
|
||||
"arbitrum-one-mainnet-arbnode-archive-trace.yml" # always behind and no reference rpc
|
||||
"ethereum-beacon-mainnet-lighthouse-pruned-blobs" # can't handle beacon rest api yet
|
||||
"rpc.yml" "monitoring.yml" "ftp.yml" "backup-http.yml" "base.yml" # no rpcs
|
||||
)
|
||||
|
||||
# Path blacklist (read from file if it exists)
|
||||
path_blacklist=()
|
||||
if [ -f "$BASEPATH/path-blacklist.txt" ]; then
|
||||
while IFS= read -r line; do
|
||||
if [ -n "$line" ]; then
|
||||
path_blacklist+=("$line")
|
||||
fi
|
||||
done < "$BASEPATH/path-blacklist.txt"
|
||||
fi
|
||||
|
||||
# Protocol and domain settings
|
||||
if [ -n "$NO_SSL" ]; then
|
||||
PROTO="http"
|
||||
DOMAIN="${DOMAIN:-0.0.0.0}"
|
||||
else
|
||||
PROTO="https"
|
||||
# For HTTPS, DOMAIN should be set
|
||||
if [ -z "$DOMAIN" ]; then
|
||||
echo "Error: DOMAIN variable not found in $BASEPATH/.env" >&2
|
||||
echo "Please set DOMAIN in your .env file" >&2
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Function to extract RPC paths from a compose file
|
||||
extract_rpc_paths() {
|
||||
local compose_file="$1"
|
||||
local full_path="$BASEPATH/${compose_file}"
|
||||
|
||||
if [ ! -f "$full_path" ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Extract paths using grep (same method as peer-count.sh)
|
||||
# Try Perl regex first, fallback to extended regex if -P is not supported
|
||||
pathlist=$(cat "$full_path" | grep -oP "stripprefix\.prefixes.*?/\K[^\"]+" 2>/dev/null)
|
||||
if [ $? -ne 0 ] || [ -z "$pathlist" ]; then
|
||||
# Fallback for systems without Perl regex support
|
||||
pathlist=$(cat "$full_path" | grep -oE "stripprefix\.prefixes[^:]*:.*?/([^\"]+)" 2>/dev/null | sed -E 's/.*\/([^"]+)/\1/' | grep -v '^$')
|
||||
fi
|
||||
|
||||
if [ -z "$pathlist" ]; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "$pathlist"
|
||||
}
|
||||
|
||||
# Function to check if a path should be included
|
||||
should_include_path() {
|
||||
local path="$1"
|
||||
|
||||
# Always exclude paths ending with /node (consensus client endpoints)
|
||||
if [[ "$path" =~ /node$ ]]; then
|
||||
if [ "$VERBOSE" = true ]; then
|
||||
echo " Path $path excluded: ends with /node"
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
|
||||
for word in "${path_blacklist[@]}"; do
|
||||
# Unescape the pattern (handle \-node -> -node)
|
||||
pattern=$(echo "$word" | sed 's/\\-/-/g')
|
||||
# Use -- to prevent grep from interpreting pattern as options
|
||||
if echo "$path" | grep -qE -- "$pattern"; then
|
||||
if [ "$VERBOSE" = true ]; then
|
||||
echo " Path $path matches blacklist pattern: $word"
|
||||
fi
|
||||
return 1
|
||||
fi
|
||||
done
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Function to backup peers from a single RPC endpoint
|
||||
backup_peers_from_path() {
|
||||
local compose_file="$1"
|
||||
local path="$2"
|
||||
local compose_name="${compose_file%.yml}"
|
||||
|
||||
# Sanitize compose name and path for filename
|
||||
local safe_compose_name=$(echo "$compose_name" | sed 's/[^a-zA-Z0-9_-]/_/g')
|
||||
local safe_path=$(echo "$path" | sed 's|[^a-zA-Z0-9_-]|_|g')
|
||||
|
||||
# Ensure path starts with /
|
||||
if [[ ! "$path" =~ ^/ ]]; then
|
||||
path="/$path"
|
||||
fi
|
||||
|
||||
local RPC_URL="${PROTO}://${DOMAIN}${path}"
|
||||
|
||||
# Try admin_peers first (returns detailed peer info)
|
||||
response=$(curl --ipv4 -L -s -X POST "$RPC_URL" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data '{"jsonrpc":"2.0","method":"admin_peers","params":[],"id":1}' \
|
||||
--max-time 10 2>/dev/null)
|
||||
|
||||
# Check for curl errors
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "✗ Failed to connect to $compose_file ($path): curl error"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if we got a valid response
|
||||
if echo "$response" | jq -e '.result' > /dev/null 2>&1; then
|
||||
peer_count=$(echo "$response" | jq -r '.result | length')
|
||||
|
||||
if [ "$peer_count" -gt 0 ]; then
|
||||
# Extract enodes
|
||||
enodes=$(echo "$response" | jq -r '.result[].enode' 2>/dev/null | grep -v '^$' | grep -v '^null$')
|
||||
|
||||
if [ -n "$enodes" ]; then
|
||||
# Create backup file
|
||||
local backup_file="$BACKUP_DIR/${safe_compose_name}__${safe_path}__${TIMESTAMP}.json"
|
||||
|
||||
# Create JSON structure with metadata
|
||||
{
|
||||
echo "{"
|
||||
echo " \"compose_file\": \"$compose_file\","
|
||||
echo " \"rpc_path\": \"$path\","
|
||||
echo " \"rpc_url\": \"$RPC_URL\","
|
||||
echo " \"timestamp\": \"$TIMESTAMP\","
|
||||
echo " \"peer_count\": $peer_count,"
|
||||
echo " \"peers\": ["
|
||||
|
||||
# Write enodes as JSON array
|
||||
first=true
|
||||
while IFS= read -r enode; do
|
||||
if [ -z "$enode" ] || [ "$enode" = "null" ]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if [ "$first" = true ]; then
|
||||
first=false
|
||||
else
|
||||
echo ","
|
||||
fi
|
||||
|
||||
# Escape the enode string for JSON
|
||||
escaped_enode=$(echo "$enode" | sed 's/\\/\\\\/g' | sed 's/"/\\"/g')
|
||||
echo -n " \"$escaped_enode\""
|
||||
done <<< "$enodes"
|
||||
|
||||
echo ""
|
||||
echo " ]"
|
||||
echo "}"
|
||||
} > "$backup_file"
|
||||
|
||||
# Also create a simple text file with just enodes (one per line) for easy playback
|
||||
local backup_txt_file="$BACKUP_DIR/${safe_compose_name}__${safe_path}__${TIMESTAMP}.txt"
|
||||
echo "$enodes" > "$backup_txt_file"
|
||||
|
||||
# Extract just the filename for display
|
||||
backup_filename=$(basename "$backup_file" 2>/dev/null || echo "${backup_file##*/}")
|
||||
echo "✓ Backed up $peer_count peer(s) from $compose_file ($path) to $backup_filename"
|
||||
return 0
|
||||
fi
|
||||
else
|
||||
if [ "$VERBOSE" = true ]; then
|
||||
echo "⚠ No peers found for $compose_file ($path)"
|
||||
fi
|
||||
return 2 # Return 2 for "no peers" (not a failure, just nothing to backup)
|
||||
fi
|
||||
else
|
||||
# Check if this is a method not found error (consensus client or admin API disabled)
|
||||
error_code=$(echo "$response" | jq -r '.error.code // empty' 2>/dev/null)
|
||||
error_message=$(echo "$response" | jq -r '.error.message // empty' 2>/dev/null)
|
||||
|
||||
if [ -n "$error_code" ] && [ "$error_code" != "null" ]; then
|
||||
# Check if it's a method not found error (likely consensus client)
|
||||
if [ "$error_code" = "-32601" ] || [ "$error_code" = "32601" ]; then
|
||||
# Method not found - likely consensus client, skip silently
|
||||
return 1
|
||||
else
|
||||
# Other error
|
||||
echo "✗ $compose_file ($path): RPC error $error_code - ${error_message:-unknown error}"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Try net_peerCount as fallback (but we can't get enodes from this)
|
||||
response=$(curl --ipv4 -L -s -X POST "$RPC_URL" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data '{"jsonrpc":"2.0","method":"net_peerCount","params":[],"id":1}' \
|
||||
--max-time 10 2>/dev/null)
|
||||
|
||||
if echo "$response" | jq -e '.result' > /dev/null 2>&1; then
|
||||
hex_value=$(echo "$response" | jq -r '.result')
|
||||
# Convert hex to decimal (net_peerCount returns hex like "0x10")
|
||||
peer_count=$((hex_value))
|
||||
if [ "$peer_count" -gt 0 ]; then
|
||||
echo "⚠ $compose_file ($path) has $peer_count peer(s) but admin_peers not available (cannot backup enodes)"
|
||||
else
|
||||
echo "⚠ $compose_file ($path): no peers connected"
|
||||
fi
|
||||
else
|
||||
# Couldn't get peer count either
|
||||
if [ -z "$response" ]; then
|
||||
echo "✗ $compose_file ($path): no response from RPC endpoint"
|
||||
else
|
||||
echo "✗ $compose_file ($path): RPC endpoint not accessible or invalid"
|
||||
fi
|
||||
fi
|
||||
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main execution
|
||||
if [ -z "$COMPOSE_FILE" ]; then
|
||||
echo "Error: COMPOSE_FILE not found in $BASEPATH/.env" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Split COMPOSE_FILE by colon
|
||||
IFS=':' read -ra parts <<< "$COMPOSE_FILE"
|
||||
|
||||
total_backed_up=0
|
||||
total_failed=0
|
||||
total_skipped=0
|
||||
total_no_peers=0
|
||||
|
||||
echo "Starting peer backup at $(date)"
|
||||
echo "Backup directory: $BACKUP_DIR"
|
||||
echo "COMPOSE_FILE contains: ${#parts[@]} compose file(s)"
|
||||
echo ""
|
||||
|
||||
# Process each compose file
|
||||
for part in "${parts[@]}"; do
|
||||
# Handle compose file name - part might already have .yml or might not
|
||||
if [[ "$part" == *.yml ]]; then
|
||||
compose_file="$part"
|
||||
else
|
||||
compose_file="${part}.yml"
|
||||
fi
|
||||
|
||||
# Check if file exists
|
||||
if [ ! -f "$BASEPATH/$compose_file" ]; then
|
||||
echo "⚠ Skipping $compose_file: file not found"
|
||||
total_skipped=$((total_skipped + 1))
|
||||
continue
|
||||
fi
|
||||
|
||||
# Check blacklist
|
||||
include=true
|
||||
for word in "${blacklist[@]}"; do
|
||||
# Use -- to prevent grep from interpreting pattern as options
|
||||
if echo "$compose_file" | grep -qE -- "$word"; then
|
||||
include=false
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$include" = false ]; then
|
||||
total_skipped=$((total_skipped + 1))
|
||||
continue
|
||||
fi
|
||||
|
||||
# Extract RPC paths from compose file
|
||||
paths=$(extract_rpc_paths "$compose_file")
|
||||
|
||||
if [ -z "$paths" ]; then
|
||||
echo "⚠ Skipping $compose_file: no RPC paths found"
|
||||
total_skipped=$((total_skipped + 1))
|
||||
continue
|
||||
fi
|
||||
|
||||
# Process each path
|
||||
path_found=false
|
||||
# Use while loop with read to safely handle paths with spaces or special characters
|
||||
while IFS= read -r path || [ -n "$path" ]; do
|
||||
# Skip empty paths
|
||||
if [ -z "$path" ]; then
|
||||
continue
|
||||
fi
|
||||
# Check path blacklist
|
||||
if should_include_path "$path"; then
|
||||
path_found=true
|
||||
backup_peers_from_path "$compose_file" "$path"
|
||||
exit_code=$?
|
||||
if [ $exit_code -eq 0 ]; then
|
||||
total_backed_up=$((total_backed_up + 1))
|
||||
elif [ $exit_code -eq 2 ]; then
|
||||
# No peers (not a failure)
|
||||
total_no_peers=$((total_no_peers + 1))
|
||||
else
|
||||
total_failed=$((total_failed + 1))
|
||||
fi
|
||||
else
|
||||
if [ "$VERBOSE" = true ]; then
|
||||
echo "⚠ Skipping path $path from $compose_file: blacklisted"
|
||||
fi
|
||||
fi
|
||||
done <<< "$paths"
|
||||
|
||||
if [ "$path_found" = false ]; then
|
||||
total_skipped=$((total_skipped + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Backup Summary"
|
||||
echo "=========================================="
|
||||
echo "Total nodes backed up: $total_backed_up"
|
||||
if [ $total_no_peers -gt 0 ]; then
|
||||
echo "Total nodes with no peers: $total_no_peers"
|
||||
fi
|
||||
echo "Total nodes failed: $total_failed"
|
||||
echo "Total nodes skipped: $total_skipped"
|
||||
echo "Backup directory: $BACKUP_DIR"
|
||||
echo "Completed at $(date)"
|
||||
echo ""
|
||||
|
||||
# Optional: Clean up old backups (keep last 30 days)
|
||||
if [ -n "$CLEANUP_OLD_BACKUPS" ] && [ "$CLEANUP_OLD_BACKUPS" = "true" ]; then
|
||||
echo "Cleaning up backups older than 30 days..."
|
||||
find "$BACKUP_DIR" -name "*.json" -type f -mtime +30 -delete
|
||||
find "$BACKUP_DIR" -name "*.txt" -type f -mtime +30 -delete
|
||||
echo "Cleanup complete"
|
||||
fi
|
||||
|
||||
exit 0
|
||||
|
||||
1
base-mainnet-op-erigon-archive-trace.yml
Symbolic link
1
base-mainnet-op-erigon-archive-trace.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
op/erigon/base-mainnet-op-erigon-archive-trace.yml
|
||||
1
base-mainnet-op-reth-archive-trace.yml
Symbolic link
1
base-mainnet-op-reth-archive-trace.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
op/reth/base-mainnet-op-reth-archive-trace.yml
|
||||
1
base-mainnet-op-reth-pruned.yml
Symbolic link
1
base-mainnet-op-reth-pruned.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
op/reth/base-mainnet-op-reth-pruned-trace.yml
|
||||
1
base-mainnet.yml
Symbolic link
1
base-mainnet.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
op/geth/base-mainnet-op-geth-pruned-pebble-path.yml
|
||||
1
base-sepolia-op-reth-pruned.yml
Symbolic link
1
base-sepolia-op-reth-pruned.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
op/reth/base-sepolia-op-reth-pruned-trace.yml
|
||||
1
base-sepolia.yml
Symbolic link
1
base-sepolia.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
op/geth/base-sepolia-op-geth-pruned-pebble-path.yml
|
||||
27
benchmark-proxy/Dockerfile
Normal file
27
benchmark-proxy/Dockerfile
Normal file
@@ -0,0 +1,27 @@
|
||||
# Build stage
|
||||
FROM golang:1.21-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Initialize Go modules if not already done
|
||||
RUN go mod init benchmark-proxy
|
||||
|
||||
# Add the dependency before building
|
||||
RUN go get github.com/gorilla/websocket
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Build the application with CGO disabled for a static binary
|
||||
RUN CGO_ENABLED=0 go build -ldflags="-s -w" -o benchmark-proxy main.go
|
||||
|
||||
# Runtime stage (if you're using a multi-stage build)
|
||||
FROM alpine:latest
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy the binary from the build stage
|
||||
COPY --from=builder /app/benchmark-proxy .
|
||||
|
||||
# Run the application
|
||||
ENTRYPOINT ["./benchmark-proxy"]
|
||||
7
benchmark-proxy/go.mod
Normal file
7
benchmark-proxy/go.mod
Normal file
@@ -0,0 +1,7 @@
|
||||
module benchmark-proxy
|
||||
|
||||
go 1.21
|
||||
|
||||
require github.com/gorilla/websocket v1.5.1
|
||||
|
||||
require golang.org/x/net v0.17.0 // indirect
|
||||
4
benchmark-proxy/go.sum
Normal file
4
benchmark-proxy/go.sum
Normal file
@@ -0,0 +1,4 @@
|
||||
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
|
||||
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
|
||||
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
|
||||
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
|
||||
3235
benchmark-proxy/main.go
Normal file
3235
benchmark-proxy/main.go
Normal file
File diff suppressed because it is too large
Load Diff
22
benchmark_proxy_rust/Cargo.toml
Normal file
22
benchmark_proxy_rust/Cargo.toml
Normal file
@@ -0,0 +1,22 @@
|
||||
[package]
|
||||
name = "benchmark_proxy_rust"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
hyper = { version = "0.14", features = ["full"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
tokio-tungstenite = { version = "0.21", features = ["native-tls"] }
|
||||
log = "0.4"
|
||||
env_logger = "0.10"
|
||||
dashmap = "5.5"
|
||||
reqwest = { version = "0.11", features = ["json", "rustls-tls"], default-features = false }
|
||||
thiserror = "1.0"
|
||||
futures-util = "0.3"
|
||||
http = "0.2"
|
||||
url = "2.5"
|
||||
lazy_static = "1.4.0"
|
||||
343
benchmark_proxy_rust/src/block_height_tracker.rs
Normal file
343
benchmark_proxy_rust/src/block_height_tracker.rs
Normal file
@@ -0,0 +1,343 @@
|
||||
use crate::{config::AppConfig, structures::Backend};
|
||||
use dashmap::DashMap;
|
||||
use futures_util::{stream::SplitSink, SinkExt, StreamExt};
|
||||
use log::{debug, error, info, warn};
|
||||
use serde_json::json;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{Arc, Mutex},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
use tokio::{
|
||||
net::TcpStream,
|
||||
sync::watch,
|
||||
task::JoinHandle,
|
||||
time::sleep,
|
||||
};
|
||||
use tokio_tungstenite::{
|
||||
connect_async,
|
||||
tungstenite::{protocol::Message as TungsteniteMessage, Error as TungsteniteError},
|
||||
MaybeTlsStream, WebSocketStream,
|
||||
};
|
||||
use url::Url;
|
||||
|
||||
const RECONNECT_DELAY: Duration = Duration::from_secs(10);
|
||||
|
||||
#[derive(serde::Deserialize, Debug)]
|
||||
struct SubscriptionMessage {
|
||||
#[allow(dead_code)] // May not be used if only checking method
|
||||
jsonrpc: Option<String>,
|
||||
method: Option<String>,
|
||||
params: Option<SubscriptionParams>,
|
||||
result: Option<serde_json::Value>, // For subscription ID confirmation
|
||||
id: Option<serde_json::Value>, // For request echo
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize, Debug)]
|
||||
struct SubscriptionParams {
|
||||
subscription: String,
|
||||
result: HeaderData,
|
||||
}
|
||||
|
||||
#[derive(serde::Deserialize, Debug)]
|
||||
struct HeaderData {
|
||||
number: String, // Hex string like "0x123"
|
||||
// Add other fields like "hash" if ever needed for more advanced logic
|
||||
}
|
||||
|
||||
pub struct BlockHeightTracker {
|
||||
config: Arc<AppConfig>,
|
||||
backends: Vec<Backend>,
|
||||
block_heights: Arc<DashMap<String, u64>>,
|
||||
last_update_times: Arc<DashMap<String, SystemTime>>,
|
||||
shutdown_tx: watch::Sender<bool>,
|
||||
tasks: Arc<Mutex<Vec<JoinHandle<()>>>>,
|
||||
enable_detailed_logs: bool,
|
||||
}
|
||||
|
||||
impl BlockHeightTracker {
|
||||
pub fn new(
|
||||
config: Arc<AppConfig>,
|
||||
all_backends: &[Backend],
|
||||
) -> Option<Arc<Self>> {
|
||||
if !config.enable_block_height_tracking {
|
||||
info!("BlockHeightTracker disabled by configuration.");
|
||||
return None;
|
||||
}
|
||||
|
||||
info!("Initializing BlockHeightTracker for {} backends.", all_backends.len());
|
||||
let (shutdown_tx, _shutdown_rx) = watch::channel(false); // _shutdown_rx cloned by tasks
|
||||
|
||||
Some(Arc::new(Self {
|
||||
config: config.clone(),
|
||||
backends: all_backends.to_vec(), // Clones the slice into a Vec
|
||||
block_heights: Arc::new(DashMap::new()),
|
||||
last_update_times: Arc::new(DashMap::new()),
|
||||
shutdown_tx,
|
||||
tasks: Arc::new(Mutex::new(Vec::new())),
|
||||
enable_detailed_logs: config.enable_detailed_logs,
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn start_monitoring(self: Arc<Self>) {
|
||||
if self.backends.is_empty() {
|
||||
info!("BHT: No backends configured for monitoring.");
|
||||
return;
|
||||
}
|
||||
info!("BHT: Starting block height monitoring for {} backends.", self.backends.len());
|
||||
let mut tasks_guard = self.tasks.lock().unwrap();
|
||||
for backend in self.backends.clone() {
|
||||
// Only monitor if backend has a URL, primarily for non-primary roles or specific needs
|
||||
// For this implementation, we assume all backends in the list are candidates.
|
||||
let task_self = self.clone();
|
||||
let task_backend = backend.clone(); // Clone backend for the task
|
||||
let task_shutdown_rx = self.shutdown_tx.subscribe();
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
task_self
|
||||
.monitor_backend_connection(task_backend, task_shutdown_rx)
|
||||
.await;
|
||||
});
|
||||
tasks_guard.push(task);
|
||||
}
|
||||
}
|
||||
|
||||
async fn monitor_backend_connection(
|
||||
self: Arc<Self>,
|
||||
backend: Backend,
|
||||
mut shutdown_rx: watch::Receiver<bool>,
|
||||
) {
|
||||
info!("BHT: Starting monitoring for backend: {}", backend.name);
|
||||
loop { // Outer reconnect loop
|
||||
tokio::select! {
|
||||
biased;
|
||||
_ = shutdown_rx.changed() => {
|
||||
if *shutdown_rx.borrow() {
|
||||
info!("BHT: Shutdown signal received for {}, terminating monitoring.", backend.name);
|
||||
break; // Break outer reconnect loop
|
||||
}
|
||||
}
|
||||
_ = tokio::time::sleep(Duration::from_millis(10)) => { // Give a chance for shutdown signal before attempting connection
|
||||
// Proceed to connection attempt
|
||||
}
|
||||
}
|
||||
if *shutdown_rx.borrow() { break; }
|
||||
|
||||
|
||||
let mut ws_url = backend.url.clone();
|
||||
let scheme = if backend.url.scheme() == "https" { "wss" } else { "ws" };
|
||||
if let Err(_e) = ws_url.set_scheme(scheme) {
|
||||
error!("BHT: Failed to set scheme to {} for backend {}: {}", scheme, backend.name, backend.url);
|
||||
sleep(RECONNECT_DELAY).await;
|
||||
continue;
|
||||
}
|
||||
|
||||
if self.enable_detailed_logs {
|
||||
debug!("BHT: Attempting to connect to {} for backend {}", ws_url, backend.name);
|
||||
}
|
||||
|
||||
match connect_async(ws_url.clone()).await {
|
||||
Ok((ws_stream, _response)) => {
|
||||
if self.enable_detailed_logs {
|
||||
info!("BHT: Successfully connected to WebSocket for backend: {}", backend.name);
|
||||
}
|
||||
let (mut write, mut read) = ws_stream.split();
|
||||
|
||||
let subscribe_payload = json!({
|
||||
"jsonrpc": "2.0",
|
||||
"method": "eth_subscribe",
|
||||
"params": ["newHeads"],
|
||||
"id": 1 // Static ID for this subscription
|
||||
});
|
||||
|
||||
if let Err(e) = write.send(TungsteniteMessage::Text(subscribe_payload.to_string())).await {
|
||||
error!("BHT: Failed to send eth_subscribe to {}: {}. Retrying connection.", backend.name, e);
|
||||
// Connection will be retried by the outer loop after delay
|
||||
sleep(RECONNECT_DELAY).await;
|
||||
continue;
|
||||
}
|
||||
if self.enable_detailed_logs {
|
||||
debug!("BHT: Sent eth_subscribe payload to {}", backend.name);
|
||||
}
|
||||
|
||||
// Inner message reading loop
|
||||
loop {
|
||||
tokio::select! {
|
||||
biased;
|
||||
_ = shutdown_rx.changed() => {
|
||||
if *shutdown_rx.borrow() {
|
||||
info!("BHT: Shutdown signal for {}, closing WebSocket and stopping.", backend.name);
|
||||
// Attempt to close the WebSocket gracefully
|
||||
let _ = write.send(TungsteniteMessage::Close(None)).await;
|
||||
break; // Break inner message_read_loop
|
||||
}
|
||||
}
|
||||
maybe_message = read.next() => {
|
||||
match maybe_message {
|
||||
Some(Ok(message)) => {
|
||||
match message {
|
||||
TungsteniteMessage::Text(text_msg) => {
|
||||
if self.enable_detailed_logs {
|
||||
debug!("BHT: Received text from {}: {}", backend.name, text_msg);
|
||||
}
|
||||
match serde_json::from_str::<SubscriptionMessage>(&text_msg) {
|
||||
Ok(parsed_msg) => {
|
||||
if parsed_msg.method.as_deref() == Some("eth_subscription") {
|
||||
if let Some(params) = parsed_msg.params {
|
||||
let block_num_str = params.result.number;
|
||||
match u64::from_str_radix(block_num_str.trim_start_matches("0x"), 16) {
|
||||
Ok(block_num) => {
|
||||
self.block_heights.insert(backend.name.clone(), block_num);
|
||||
self.last_update_times.insert(backend.name.clone(), SystemTime::now());
|
||||
if self.enable_detailed_logs {
|
||||
debug!("BHT: Updated block height for {}: {} (raw: {})", backend.name, block_num, block_num_str);
|
||||
}
|
||||
}
|
||||
Err(e) => error!("BHT: Failed to parse block number hex '{}' for {}: {}", block_num_str, backend.name, e),
|
||||
}
|
||||
}
|
||||
} else if parsed_msg.id == Some(json!(1)) && parsed_msg.result.is_some() {
|
||||
if self.enable_detailed_logs {
|
||||
info!("BHT: Received subscription confirmation from {}: {:?}", backend.name, parsed_msg.result);
|
||||
}
|
||||
} else {
|
||||
if self.enable_detailed_logs {
|
||||
debug!("BHT: Received other JSON message from {}: {:?}", backend.name, parsed_msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
if self.enable_detailed_logs {
|
||||
warn!("BHT: Failed to parse JSON from {}: {}. Message: {}", backend.name, e, text_msg);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
TungsteniteMessage::Binary(bin_msg) => {
|
||||
if self.enable_detailed_logs {
|
||||
debug!("BHT: Received binary message from {} ({} bytes), ignoring.", backend.name, bin_msg.len());
|
||||
}
|
||||
}
|
||||
TungsteniteMessage::Ping(ping_data) => {
|
||||
if self.enable_detailed_logs { debug!("BHT: Received Ping from {}, sending Pong.", backend.name); }
|
||||
// tokio-tungstenite handles Pongs automatically by default if feature "rustls-pong" or "native-tls-pong" is enabled.
|
||||
// If not, manual send:
|
||||
// if let Err(e) = write.send(TungsteniteMessage::Pong(ping_data)).await {
|
||||
// error!("BHT: Failed to send Pong to {}: {}", backend.name, e);
|
||||
// break; // Break inner loop, connection might be unstable
|
||||
// }
|
||||
}
|
||||
TungsteniteMessage::Pong(_) => { /* Usually no action needed */ }
|
||||
TungsteniteMessage::Close(_) => {
|
||||
if self.enable_detailed_logs { info!("BHT: WebSocket closed by server for {}.", backend.name); }
|
||||
break; // Break inner loop
|
||||
}
|
||||
TungsteniteMessage::Frame(_) => { /* Raw frame, usually not handled directly */ }
|
||||
}
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
match e {
|
||||
TungsteniteError::ConnectionClosed | TungsteniteError::AlreadyClosed => {
|
||||
if self.enable_detailed_logs { info!("BHT: WebSocket connection closed for {}.", backend.name); }
|
||||
}
|
||||
_ => {
|
||||
error!("BHT: Error reading from WebSocket for {}: {:?}. Attempting reconnect.", backend.name, e);
|
||||
}
|
||||
}
|
||||
break; // Break inner loop, will trigger reconnect
|
||||
}
|
||||
None => {
|
||||
if self.enable_detailed_logs { info!("BHT: WebSocket stream ended for {}. Attempting reconnect.", backend.name); }
|
||||
break; // Break inner loop, will trigger reconnect
|
||||
}
|
||||
}
|
||||
}
|
||||
} // End of inner select
|
||||
if *shutdown_rx.borrow() { break; } // Ensure inner loop breaks if shutdown occurred
|
||||
} // End of inner message reading loop
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("BHT: Failed to connect to WebSocket for backend {}: {:?}. Retrying after delay.", backend.name, e);
|
||||
}
|
||||
}
|
||||
// If we are here, it means the connection was dropped or failed. Wait before retrying.
|
||||
if !*shutdown_rx.borrow() { // Don't sleep if shutting down
|
||||
sleep(RECONNECT_DELAY).await;
|
||||
}
|
||||
} // End of outer reconnect loop
|
||||
info!("BHT: Stopped monitoring backend {}.", backend.name);
|
||||
}
|
||||
|
||||
pub fn is_secondary_behind(&self, secondary_name: &str) -> bool {
|
||||
if !self.config.enable_block_height_tracking { return false; } // If tracking is off, assume not behind
|
||||
|
||||
let primary_info = self.backends.iter().find(|b| b.role == "primary");
|
||||
let primary_name = match primary_info {
|
||||
Some(b) => b.name.clone(),
|
||||
None => {
|
||||
if self.enable_detailed_logs {
|
||||
warn!("BHT: No primary backend configured for is_secondary_behind check.");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
let primary_height_opt = self.block_heights.get(&primary_name).map(|h_ref| *h_ref.value());
|
||||
|
||||
let primary_height = match primary_height_opt {
|
||||
Some(h) => h,
|
||||
None => {
|
||||
if self.enable_detailed_logs {
|
||||
debug!("BHT: Primary '{}' height unknown for is_secondary_behind check with {}.", primary_name, secondary_name);
|
||||
}
|
||||
return false; // Primary height unknown, can't reliably determine if secondary is behind
|
||||
}
|
||||
};
|
||||
|
||||
let secondary_height_opt = self.block_heights.get(secondary_name).map(|h_ref| *h_ref.value());
|
||||
|
||||
match secondary_height_opt {
|
||||
Some(secondary_height_val) => {
|
||||
if primary_height > secondary_height_val {
|
||||
let diff = primary_height - secondary_height_val;
|
||||
let is_behind = diff > self.config.max_blocks_behind;
|
||||
if self.enable_detailed_logs && is_behind {
|
||||
debug!("BHT: Secondary '{}' (height {}) is behind primary '{}' (height {}). Diff: {}, Max allowed: {}",
|
||||
secondary_name, secondary_height_val, primary_name, primary_height, diff, self.config.max_blocks_behind);
|
||||
}
|
||||
return is_behind;
|
||||
}
|
||||
false // Secondary is not behind or is ahead
|
||||
}
|
||||
None => {
|
||||
if self.enable_detailed_logs {
|
||||
debug!("BHT: Secondary '{}' height unknown, considering it behind primary '{}' (height {}).", secondary_name, primary_name, primary_height);
|
||||
}
|
||||
true // Secondary height unknown, assume it's behind if primary height is known
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_block_height_status(&self) -> HashMap<String, u64> {
|
||||
self.block_heights
|
||||
.iter()
|
||||
.map(|entry| (entry.key().clone(), *entry.value()))
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub async fn stop(&self) {
|
||||
info!("BHT: Sending shutdown signal to all monitoring tasks...");
|
||||
if self.shutdown_tx.send(true).is_err() {
|
||||
error!("BHT: Failed to send shutdown signal. Tasks might not terminate gracefully.");
|
||||
}
|
||||
|
||||
let mut tasks_guard = self.tasks.lock().unwrap();
|
||||
info!("BHT: Awaiting termination of {} monitoring tasks...", tasks_guard.len());
|
||||
for task in tasks_guard.drain(..) {
|
||||
if let Err(e) = task.await {
|
||||
error!("BHT: Error awaiting task termination: {:?}", e);
|
||||
}
|
||||
}
|
||||
info!("BHT: All monitoring tasks terminated.");
|
||||
}
|
||||
}
|
||||
169
benchmark_proxy_rust/src/config.rs
Normal file
169
benchmark_proxy_rust/src/config.rs
Normal file
@@ -0,0 +1,169 @@
|
||||
use std::env;
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
use url::Url;
|
||||
use thiserror::Error;
|
||||
use log::{warn, info};
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum ConfigError {
|
||||
#[error("Failed to parse environment variable '{var_name}': {source}")]
|
||||
ParseError {
|
||||
var_name: String,
|
||||
source: Box<dyn std::error::Error + Send + Sync>,
|
||||
},
|
||||
#[error("Missing required environment variable: {var_name}")]
|
||||
MissingVariable { var_name: String },
|
||||
#[error("Invalid URL format for '{var_name}': {url_str} - {source}")]
|
||||
UrlParseError {
|
||||
var_name: String,
|
||||
url_str: String,
|
||||
source: url::ParseError,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AppConfig {
|
||||
pub listen_addr: String,
|
||||
pub primary_backend_url: Url,
|
||||
pub secondary_backend_urls: Vec<Url>,
|
||||
pub summary_interval_secs: u64,
|
||||
pub enable_detailed_logs: bool,
|
||||
pub enable_secondary_probing: bool,
|
||||
pub probe_interval_secs: u64,
|
||||
pub min_delay_buffer_ms: u64,
|
||||
pub probe_methods: Vec<String>,
|
||||
pub enable_block_height_tracking: bool,
|
||||
pub max_blocks_behind: u64,
|
||||
pub enable_expensive_method_routing: bool,
|
||||
pub max_body_size_bytes: usize,
|
||||
pub http_client_timeout_secs: u64,
|
||||
pub request_context_timeout_secs: u64,
|
||||
}
|
||||
|
||||
// Helper function to get and parse environment variables
|
||||
fn get_env_var<T: FromStr>(key: &str, default_value: T) -> T
|
||||
where
|
||||
<T as FromStr>::Err: std::fmt::Display,
|
||||
{
|
||||
match env::var(key) {
|
||||
Ok(val_str) => match val_str.parse::<T>() {
|
||||
Ok(val) => val,
|
||||
Err(e) => {
|
||||
warn!(
|
||||
"Failed to parse environment variable '{}' with value '{}': {}. Using default: {:?}",
|
||||
key, val_str, e, default_value
|
||||
);
|
||||
default_value
|
||||
}
|
||||
},
|
||||
Err(_) => default_value,
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function for boolean environment variables
|
||||
fn get_env_var_bool(key: &str, default_value: bool) -> bool {
|
||||
match env::var(key) {
|
||||
Ok(val_str) => val_str.to_lowercase() == "true",
|
||||
Err(_) => default_value,
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function for Vec<String> from comma-separated string
|
||||
fn get_env_var_vec_string(key: &str, default_value: Vec<String>) -> Vec<String> {
|
||||
match env::var(key) {
|
||||
Ok(val_str) => {
|
||||
if val_str.is_empty() {
|
||||
default_value
|
||||
} else {
|
||||
val_str.split(',').map(|s| s.trim().to_string()).collect()
|
||||
}
|
||||
}
|
||||
Err(_) => default_value,
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function for Vec<Url> from comma-separated string
|
||||
fn get_env_var_vec_url(key: &str, default_value: Vec<Url>) -> Result<Vec<Url>, ConfigError> {
|
||||
match env::var(key) {
|
||||
Ok(val_str) => {
|
||||
if val_str.is_empty() {
|
||||
return Ok(default_value);
|
||||
}
|
||||
val_str
|
||||
.split(',')
|
||||
.map(|s| s.trim())
|
||||
.filter(|s| !s.is_empty())
|
||||
.map(|url_str| {
|
||||
Url::parse(url_str).map_err(|e| ConfigError::UrlParseError {
|
||||
var_name: key.to_string(),
|
||||
url_str: url_str.to_string(),
|
||||
source: e,
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
Err(_) => Ok(default_value),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub fn load_from_env() -> Result<AppConfig, ConfigError> {
|
||||
info!("Loading configuration from environment variables...");
|
||||
|
||||
let primary_backend_url_str = env::var("PRIMARY_BACKEND_URL").map_err(|_| {
|
||||
ConfigError::MissingVariable {
|
||||
var_name: "PRIMARY_BACKEND_URL".to_string(),
|
||||
}
|
||||
})?;
|
||||
let primary_backend_url =
|
||||
Url::parse(&primary_backend_url_str).map_err(|e| ConfigError::UrlParseError {
|
||||
var_name: "PRIMARY_BACKEND_URL".to_string(),
|
||||
url_str: primary_backend_url_str,
|
||||
source: e,
|
||||
})?;
|
||||
|
||||
let secondary_backend_urls = get_env_var_vec_url("SECONDARY_BACKEND_URLS", Vec::new())?;
|
||||
|
||||
let config = AppConfig {
|
||||
listen_addr: get_env_var("LISTEN_ADDR", "127.0.0.1:8080".to_string()),
|
||||
primary_backend_url,
|
||||
secondary_backend_urls,
|
||||
summary_interval_secs: get_env_var("SUMMARY_INTERVAL_SECS", 60),
|
||||
enable_detailed_logs: get_env_var_bool("ENABLE_DETAILED_LOGS", false),
|
||||
enable_secondary_probing: get_env_var_bool("ENABLE_SECONDARY_PROBING", true),
|
||||
probe_interval_secs: get_env_var("PROBE_INTERVAL_SECS", 10),
|
||||
min_delay_buffer_ms: get_env_var("MIN_DELAY_BUFFER_MS", 500),
|
||||
probe_methods: get_env_var_vec_string(
|
||||
"PROBE_METHODS",
|
||||
vec!["eth_blockNumber".to_string(), "net_version".to_string()],
|
||||
),
|
||||
enable_block_height_tracking: get_env_var_bool("ENABLE_BLOCK_HEIGHT_TRACKING", true),
|
||||
max_blocks_behind: get_env_var("MAX_BLOCKS_BEHIND", 5),
|
||||
enable_expensive_method_routing: get_env_var_bool("ENABLE_EXPENSIVE_METHOD_ROUTING", false),
|
||||
max_body_size_bytes: get_env_var("MAX_BODY_SIZE_BYTES", 10 * 1024 * 1024), // 10MB
|
||||
http_client_timeout_secs: get_env_var("HTTP_CLIENT_TIMEOUT_SECS", 30),
|
||||
request_context_timeout_secs: get_env_var("REQUEST_CONTEXT_TIMEOUT_SECS", 35),
|
||||
};
|
||||
|
||||
info!("Configuration loaded successfully: {:?}", config);
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
impl AppConfig {
|
||||
pub fn http_client_timeout(&self) -> Duration {
|
||||
Duration::from_secs(self.http_client_timeout_secs)
|
||||
}
|
||||
|
||||
pub fn request_context_timeout(&self) -> Duration {
|
||||
Duration::from_secs(self.request_context_timeout_secs)
|
||||
}
|
||||
|
||||
pub fn summary_interval(&self) -> Duration {
|
||||
Duration::from_secs(self.summary_interval_secs)
|
||||
}
|
||||
|
||||
pub fn probe_interval(&self) -> Duration {
|
||||
Duration::from_secs(self.probe_interval_secs)
|
||||
}
|
||||
}
|
||||
12
benchmark_proxy_rust/src/main.rs
Normal file
12
benchmark_proxy_rust/src/main.rs
Normal file
@@ -0,0 +1,12 @@
|
||||
pub mod structures;
|
||||
pub mod config;
|
||||
pub mod stats_collector;
|
||||
pub mod secondary_probe;
|
||||
pub mod block_height_tracker;
|
||||
pub mod rpc_utils;
|
||||
pub mod request_handler;
|
||||
pub mod websocket_handler;
|
||||
|
||||
fn main() {
|
||||
println!("Hello, world!");
|
||||
}
|
||||
272
benchmark_proxy_rust/src/request_handler.rs
Normal file
272
benchmark_proxy_rust/src/request_handler.rs
Normal file
@@ -0,0 +1,272 @@
|
||||
use bytes::Bytes;
|
||||
use hyper::{Body, Request, Response, StatusCode};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::mpsc;
|
||||
use log;
|
||||
|
||||
use crate::config::AppConfig;
|
||||
use crate::stats_collector::StatsCollector;
|
||||
use crate::secondary_probe::SecondaryProbe;
|
||||
use crate::block_height_tracker::BlockHeightTracker;
|
||||
use crate::structures::{Backend, BatchInfo};
|
||||
use crate::rpc_utils;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum BackendResult {
|
||||
Success {
|
||||
backend_name: String,
|
||||
response: reqwest::Response, // Send the whole reqwest::Response
|
||||
duration: std::time::Duration,
|
||||
},
|
||||
Error {
|
||||
backend_name: String,
|
||||
error: reqwest::Error, // Send the reqwest::Error
|
||||
duration: std::time::Duration,
|
||||
},
|
||||
}
|
||||
|
||||
fn calculate_secondary_delay(
|
||||
batch_info: &crate::structures::BatchInfo,
|
||||
probe: &Option<Arc<crate::secondary_probe::SecondaryProbe>>,
|
||||
stats: &Arc<crate::stats_collector::StatsCollector>,
|
||||
_config: &Arc<crate::config::AppConfig>, // _config might be used later for more complex logic
|
||||
) -> std::time::Duration {
|
||||
let mut max_delay = std::time::Duration::from_millis(0);
|
||||
let default_delay = std::time::Duration::from_millis(25); // Default from Go
|
||||
|
||||
if batch_info.methods.is_empty() {
|
||||
return default_delay;
|
||||
}
|
||||
|
||||
for method_name in &batch_info.methods {
|
||||
let current_method_delay = if let Some(p) = probe {
|
||||
p.get_delay_for_method(method_name)
|
||||
} else {
|
||||
// This will use the stubbed method from StatsCollector which currently returns 25ms
|
||||
stats.get_primary_p75_for_method(method_name)
|
||||
};
|
||||
if current_method_delay > max_delay {
|
||||
max_delay = current_method_delay;
|
||||
}
|
||||
}
|
||||
|
||||
if max_delay == std::time::Duration::from_millis(0) { // if all methods were unknown or had 0 delay
|
||||
if let Some(p) = probe {
|
||||
// Go code uses: probe.minResponseTime + probe.minDelayBuffer
|
||||
// probe.get_delay_for_method("") would approximate this if it falls back to min_response_time + buffer
|
||||
return p.get_delay_for_method(""); // Assuming empty method falls back to base delay
|
||||
}
|
||||
return default_delay;
|
||||
}
|
||||
max_delay
|
||||
}
|
||||
|
||||
pub async fn handle_http_request(
|
||||
req: Request<Body>,
|
||||
config: Arc<AppConfig>,
|
||||
stats_collector: Arc<StatsCollector>,
|
||||
http_client: Arc<reqwest::Client>,
|
||||
secondary_probe: Option<Arc<SecondaryProbe>>,
|
||||
block_height_tracker: Option<Arc<BlockHeightTracker>>,
|
||||
all_backends: Arc<Vec<Backend>>,
|
||||
) -> Result<Response<Body>, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let _overall_start_time = std::time::Instant::now(); // To be used later with request_context_timeout
|
||||
|
||||
// 1. Read and limit request body
|
||||
let limited_body = hyper::body::Limited::new(req.into_body(), config.max_body_size_bytes);
|
||||
let body_bytes = match hyper::body::to_bytes(limited_body).await {
|
||||
Ok(bytes) => bytes,
|
||||
Err(e) => {
|
||||
log::error!("Failed to read request body or limit exceeded: {}", e);
|
||||
let mut err_resp = Response::new(Body::from(format!("Request body error: {}", e)));
|
||||
*err_resp.status_mut() = if e.is::<hyper::Error>() && e.downcast_ref::<hyper::Error>().map_or(false, |he| he.is_body_write_aborted() || format!("{}", he).contains("Too Large")) { // A bit heuristic for "Too Large"
|
||||
StatusCode::PAYLOAD_TOO_LARGE
|
||||
} else {
|
||||
StatusCode::BAD_REQUEST
|
||||
};
|
||||
return Ok(err_resp);
|
||||
}
|
||||
};
|
||||
|
||||
// 2. Parse Batch Info
|
||||
let batch_info = match rpc_utils::parse_batch_info(&body_bytes) {
|
||||
Ok(info) => info,
|
||||
Err(e) => {
|
||||
log::error!("Invalid JSON-RPC request: {}", e);
|
||||
let mut err_resp = Response::new(Body::from(format!("Invalid JSON-RPC: {}", e)));
|
||||
*err_resp.status_mut() = StatusCode::BAD_REQUEST;
|
||||
return Ok(err_resp);
|
||||
}
|
||||
};
|
||||
|
||||
let display_method = if batch_info.is_batch {
|
||||
format!("batch[{}]", batch_info.request_count)
|
||||
} else {
|
||||
batch_info.methods.get(0).cloned().unwrap_or_else(|| "unknown".to_string())
|
||||
};
|
||||
log::info!("Received request: Method: {}, IsBatch: {}, NumMethods: {}", display_method, batch_info.is_batch, batch_info.methods.len());
|
||||
|
||||
// 3. Calculate Secondary Delay
|
||||
let secondary_delay = calculate_secondary_delay(&batch_info, &secondary_probe, &stats_collector, &config);
|
||||
if config.enable_detailed_logs {
|
||||
log::debug!("Method: {}, Calculated secondary delay: {:?}", display_method, secondary_delay);
|
||||
}
|
||||
|
||||
// 4. Backend Filtering & Expensive Method Routing
|
||||
let mut target_backends: Vec<Backend> = (*all_backends).clone();
|
||||
|
||||
if batch_info.has_stateful {
|
||||
log::debug!("Stateful method detected in request '{}', targeting primary only.", display_method);
|
||||
target_backends.retain(|b| b.role == "primary");
|
||||
} else {
|
||||
// Filter by block height
|
||||
if let Some(bht) = &block_height_tracker {
|
||||
if config.enable_block_height_tracking { // Check if feature is enabled
|
||||
target_backends.retain(|b| {
|
||||
if b.role != "primary" && bht.is_secondary_behind(&b.name) {
|
||||
if config.enable_detailed_logs { log::info!("Skipping secondary {}: behind in block height for request {}", b.name, display_method); }
|
||||
// TODO: Add stat for skipped due to block height
|
||||
false
|
||||
} else { true }
|
||||
});
|
||||
}
|
||||
}
|
||||
// Filter by probe availability
|
||||
if let Some(sp) = &secondary_probe {
|
||||
if config.enable_secondary_probing { // Check if feature is enabled
|
||||
target_backends.retain(|b| {
|
||||
if b.role != "primary" && !sp.is_backend_available(&b.name) {
|
||||
if config.enable_detailed_logs { log::info!("Skipping secondary {}: not available via probe for request {}", b.name, display_method); }
|
||||
// TODO: Add stat for skipped due to probe unavailable
|
||||
false
|
||||
} else { true }
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let is_req_expensive = batch_info.methods.iter().any(|m| rpc_utils::is_expensive_method(m)) ||
|
||||
batch_info.methods.iter().any(|m| stats_collector.is_expensive_method_by_stats(m)); // Stubbed
|
||||
|
||||
if config.enable_expensive_method_routing && is_req_expensive && !batch_info.has_stateful {
|
||||
log::debug!("Expensive method detected in request {}. Attempting to route to a secondary.", display_method);
|
||||
// TODO: Complex expensive method routing logic.
|
||||
// For now, this placeholder doesn't change target_backends.
|
||||
// A real implementation would try to find the best secondary or stick to primary if none are suitable.
|
||||
}
|
||||
|
||||
// 5. Concurrent Request Dispatch
|
||||
let (response_tx, mut response_rx) = mpsc::channel::<BackendResult>(target_backends.len().max(1));
|
||||
let mut dispatched_count = 0;
|
||||
|
||||
for backend in target_backends { // target_backends is now filtered
|
||||
dispatched_count += 1;
|
||||
let task_body_bytes = body_bytes.clone();
|
||||
let task_http_client = http_client.clone();
|
||||
let task_response_tx = response_tx.clone();
|
||||
// task_backend_name, task_backend_url, task_backend_role are cloned from 'backend'
|
||||
let task_backend_name = backend.name.clone();
|
||||
let task_backend_url = backend.url.clone();
|
||||
let task_backend_role = backend.role.clone();
|
||||
let task_secondary_delay = secondary_delay;
|
||||
let task_config_detailed_logs = config.enable_detailed_logs;
|
||||
let task_http_timeout = config.http_client_timeout(); // Get Duration from config
|
||||
|
||||
tokio::spawn(async move {
|
||||
let backend_req_start_time = std::time::Instant::now();
|
||||
|
||||
if task_backend_role != "primary" {
|
||||
if task_config_detailed_logs {
|
||||
log::debug!("Secondary backend {} for request {} delaying for {:?}", task_backend_name, display_method, task_secondary_delay);
|
||||
}
|
||||
tokio::time::sleep(task_secondary_delay).await;
|
||||
}
|
||||
|
||||
let result = task_http_client
|
||||
.post(task_backend_url)
|
||||
.header("Content-Type", "application/json")
|
||||
// TODO: Copy relevant headers from original request 'req.headers()'
|
||||
.body(task_body_bytes)
|
||||
.timeout(task_http_timeout)
|
||||
.send()
|
||||
.await;
|
||||
|
||||
let duration = backend_req_start_time.elapsed();
|
||||
|
||||
match result {
|
||||
Ok(resp) => {
|
||||
if task_config_detailed_logs {
|
||||
log::debug!("Backend {} for request {} responded with status {}", task_backend_name, display_method, resp.status());
|
||||
}
|
||||
if task_response_tx.send(BackendResult::Success {
|
||||
backend_name: task_backend_name,
|
||||
response: resp,
|
||||
duration,
|
||||
}).await.is_err() {
|
||||
log::error!("Failed to send success to channel for request {}: receiver dropped", display_method);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
if task_config_detailed_logs {
|
||||
log::error!("Backend {} for request {} request failed: {}", task_backend_name, display_method, err);
|
||||
}
|
||||
if task_response_tx.send(BackendResult::Error {
|
||||
backend_name: task_backend_name,
|
||||
error: err,
|
||||
duration,
|
||||
}).await.is_err() {
|
||||
log::error!("Failed to send error to channel for request {}: receiver dropped", display_method);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
drop(response_tx);
|
||||
|
||||
if dispatched_count == 0 {
|
||||
log::warn!("No backends available to dispatch request for method {}", display_method);
|
||||
// TODO: Add stat for no backend available
|
||||
let mut err_resp = Response::new(Body::from("No available backends for this request type."));
|
||||
*err_resp.status_mut() = StatusCode::SERVICE_UNAVAILABLE;
|
||||
return Ok(err_resp);
|
||||
}
|
||||
|
||||
// Placeholder: return the first received response
|
||||
if let Some(first_result) = response_rx.recv().await {
|
||||
if config.enable_detailed_logs {
|
||||
log::info!("First backend response for request {}: {:?}", display_method, first_result);
|
||||
}
|
||||
|
||||
match first_result {
|
||||
BackendResult::Success { backend_name: _, response: reqwest_resp, duration: _ } => {
|
||||
let mut hyper_resp_builder = Response::builder().status(reqwest_resp.status());
|
||||
for (name, value) in reqwest_resp.headers().iter() {
|
||||
hyper_resp_builder = hyper_resp_builder.header(name.clone(), value.clone());
|
||||
}
|
||||
let hyper_resp = hyper_resp_builder
|
||||
.body(Body::wrap_stream(reqwest_resp.bytes_stream()))
|
||||
.unwrap_or_else(|e| {
|
||||
log::error!("Error building response from backend for request {}: {}", display_method, e);
|
||||
let mut err_resp = Response::new(Body::from("Error processing backend response"));
|
||||
*err_resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
|
||||
err_resp
|
||||
});
|
||||
return Ok(hyper_resp);
|
||||
}
|
||||
BackendResult::Error { backend_name, error, duration: _ } => {
|
||||
log::error!("First response for request {} was an error from {}: {}", display_method, backend_name, error);
|
||||
let mut err_resp = Response::new(Body::from(format!("Error from backend {}: {}", backend_name, error)));
|
||||
*err_resp.status_mut() = StatusCode::BAD_GATEWAY;
|
||||
return Ok(err_resp);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log::error!("No responses received from any dispatched backend for method {}", display_method);
|
||||
// TODO: Add stat for no response received
|
||||
let mut err_resp = Response::new(Body::from("No response from any backend."));
|
||||
*err_resp.status_mut() = StatusCode::GATEWAY_TIMEOUT;
|
||||
return Ok(err_resp);
|
||||
}
|
||||
// Note: Overall request context timeout and full response aggregation logic are still TODOs.
|
||||
}
|
||||
92
benchmark_proxy_rust/src/rpc_utils.rs
Normal file
92
benchmark_proxy_rust/src/rpc_utils.rs
Normal file
@@ -0,0 +1,92 @@
|
||||
use crate::structures::{BatchInfo, JsonRpcRequest};
|
||||
use std::collections::HashSet;
|
||||
use log;
|
||||
use serde_json; // Added for parsing
|
||||
|
||||
fn get_stateful_methods() -> HashSet<&'static str> {
|
||||
[
|
||||
"eth_newFilter", "eth_newBlockFilter", "eth_newPendingTransactionFilter",
|
||||
"eth_getFilterChanges", "eth_getFilterLogs", "eth_uninstallFilter",
|
||||
"eth_subscribe", "eth_unsubscribe", "eth_subscription", // "eth_subscription" is a notification, not a method client calls.
|
||||
// But if it appears in a batch for some reason, it's state-related.
|
||||
]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn get_expensive_methods() -> HashSet<&'static str> {
|
||||
[
|
||||
// Ethereum Debug API (typically Geth-specific)
|
||||
"debug_traceBlockByHash", "debug_traceBlockByNumber", "debug_traceCall", "debug_traceTransaction",
|
||||
"debug_storageRangeAt", "debug_getModifiedAccountsByHash", "debug_getModifiedAccountsByNumber",
|
||||
// Erigon/OpenEthereum Trace Module (more standard)
|
||||
"trace_block", "trace_call", "trace_callMany", "trace_filter", "trace_get", "trace_rawTransaction",
|
||||
"trace_replayBlockTransactions", "trace_replayTransaction", "trace_transaction",
|
||||
// Specific combinations that might be considered extra expensive
|
||||
"trace_replayBlockTransactions#vmTrace", // Example, depends on actual usage if # is method part
|
||||
"trace_replayTransaction#vmTrace",
|
||||
]
|
||||
.iter()
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref STATEFUL_METHODS: HashSet<&'static str> = get_stateful_methods();
|
||||
static ref EXPENSIVE_METHODS: HashSet<&'static str> = get_expensive_methods();
|
||||
}
|
||||
|
||||
pub fn is_stateful_method(method: &str) -> bool {
|
||||
STATEFUL_METHODS.contains(method)
|
||||
}
|
||||
|
||||
pub fn is_expensive_method(method: &str) -> bool {
|
||||
EXPENSIVE_METHODS.contains(method)
|
||||
}
|
||||
|
||||
pub fn parse_batch_info(body_bytes: &[u8]) -> Result<BatchInfo, String> {
|
||||
if body_bytes.is_empty() {
|
||||
return Err("Empty request body".to_string());
|
||||
}
|
||||
|
||||
// Try parsing as a batch (array) first
|
||||
match serde_json::from_slice::<Vec<JsonRpcRequest>>(body_bytes) {
|
||||
Ok(batch_reqs) => {
|
||||
if batch_reqs.is_empty() {
|
||||
return Err("Empty batch request".to_string());
|
||||
}
|
||||
let mut methods = Vec::new();
|
||||
let mut has_stateful = false;
|
||||
for req in &batch_reqs {
|
||||
methods.push(req.method.clone());
|
||||
if is_stateful_method(&req.method) {
|
||||
has_stateful = true;
|
||||
}
|
||||
}
|
||||
Ok(BatchInfo {
|
||||
is_batch: true,
|
||||
methods,
|
||||
request_count: batch_reqs.len(),
|
||||
has_stateful,
|
||||
})
|
||||
}
|
||||
Err(_e_batch) => {
|
||||
// If not a batch, try parsing as a single request
|
||||
match serde_json::from_slice::<JsonRpcRequest>(body_bytes) {
|
||||
Ok(single_req) => Ok(BatchInfo {
|
||||
is_batch: false,
|
||||
methods: vec![single_req.method.clone()],
|
||||
request_count: 1,
|
||||
has_stateful: is_stateful_method(&single_req.method),
|
||||
}),
|
||||
Err(_e_single) => {
|
||||
// Log the actual errors if needed for debugging, but return a generic one
|
||||
log::debug!("Failed to parse as batch: {}", _e_batch);
|
||||
log::debug!("Failed to parse as single: {}", _e_single);
|
||||
Err("Invalid JSON-RPC request format. Not a valid single request or batch.".to_string())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
383
benchmark_proxy_rust/src/secondary_probe.rs
Normal file
383
benchmark_proxy_rust/src/secondary_probe.rs
Normal file
@@ -0,0 +1,383 @@
|
||||
use crate::{
|
||||
config::AppConfig,
|
||||
structures::{Backend, JsonRpcRequest},
|
||||
};
|
||||
use chrono::Utc;
|
||||
use dashmap::DashMap;
|
||||
use log::{debug, error, info, warn};
|
||||
use reqwest::Client;
|
||||
use serde_json::json;
|
||||
use std::{
|
||||
cmp::min,
|
||||
sync::{
|
||||
atomic::{AtomicU32, Ordering},
|
||||
Arc, Mutex, RwLock,
|
||||
},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
use tokio::sync::watch;
|
||||
|
||||
const PROBE_REQUEST_COUNT: usize = 10;
|
||||
const DEFAULT_MIN_RESPONSE_TIME_MS: u64 = 15;
|
||||
const PROBE_CYCLE_DELAY_MS: u64 = 10;
|
||||
|
||||
pub struct SecondaryProbe {
|
||||
config: Arc<AppConfig>,
|
||||
backends: Vec<Backend>, // Only secondary backends
|
||||
client: Client,
|
||||
min_response_time: Arc<RwLock<Duration>>,
|
||||
method_timings: Arc<DashMap<String, Duration>>, // method_name -> min_duration
|
||||
backend_timings: Arc<DashMap<String, Duration>>, // backend_name -> min_duration
|
||||
|
||||
// Health state per backend
|
||||
backend_available: Arc<DashMap<String, bool>>,
|
||||
backend_error_count: Arc<DashMap<String, AtomicU32>>,
|
||||
backend_consecutive_success_count: Arc<DashMap<String, AtomicU32>>, // For recovery
|
||||
backend_last_success: Arc<DashMap<String, Mutex<SystemTime>>>,
|
||||
|
||||
last_probe_time: Arc<Mutex<SystemTime>>,
|
||||
failure_count: Arc<AtomicU32>, // Consecutive overall probe cycle failures
|
||||
last_success_time: Arc<Mutex<SystemTime>>, // Last time any probe in an overall cycle succeeded
|
||||
|
||||
shutdown_tx: watch::Sender<bool>,
|
||||
shutdown_rx: watch::Receiver<bool>,
|
||||
enable_detailed_logs: bool,
|
||||
}
|
||||
|
||||
impl SecondaryProbe {
|
||||
pub fn new(
|
||||
config: Arc<AppConfig>,
|
||||
all_backends: &[Backend],
|
||||
client: Client,
|
||||
) -> Option<Arc<Self>> {
|
||||
let secondary_backends: Vec<Backend> = all_backends
|
||||
.iter()
|
||||
.filter(|b| b.role.to_lowercase() == "secondary")
|
||||
.cloned()
|
||||
.collect();
|
||||
|
||||
if secondary_backends.is_empty() {
|
||||
info!("No secondary backends configured. SecondaryProbe will not be initialized.");
|
||||
return None;
|
||||
}
|
||||
|
||||
info!(
|
||||
"Initializing SecondaryProbe for {} secondary backends.",
|
||||
secondary_backends.len()
|
||||
);
|
||||
|
||||
let backend_available = Arc::new(DashMap::new());
|
||||
let backend_error_count = Arc::new(DashMap::new());
|
||||
let backend_consecutive_success_count = Arc::new(DashMap::new());
|
||||
let backend_last_success = Arc::new(DashMap::new());
|
||||
|
||||
for backend in &secondary_backends {
|
||||
backend_available.insert(backend.name.clone(), true);
|
||||
backend_error_count.insert(backend.name.clone(), AtomicU32::new(0));
|
||||
backend_consecutive_success_count.insert(backend.name.clone(), AtomicU32::new(0));
|
||||
backend_last_success.insert(backend.name.clone(), Mutex::new(SystemTime::now()));
|
||||
info!(" - Backend '{}' ({}) initialized as available.", backend.name, backend.url);
|
||||
}
|
||||
|
||||
let (shutdown_tx, shutdown_rx) = watch::channel(false);
|
||||
|
||||
Some(Arc::new(Self {
|
||||
config: config.clone(),
|
||||
backends: secondary_backends,
|
||||
client,
|
||||
min_response_time: Arc::new(RwLock::new(Duration::from_millis(
|
||||
DEFAULT_MIN_RESPONSE_TIME_MS, // Or load from config if needed
|
||||
))),
|
||||
method_timings: Arc::new(DashMap::new()),
|
||||
backend_timings: Arc::new(DashMap::new()),
|
||||
backend_available,
|
||||
backend_error_count,
|
||||
backend_consecutive_success_count,
|
||||
backend_last_success,
|
||||
last_probe_time: Arc::new(Mutex::new(SystemTime::now())),
|
||||
failure_count: Arc::new(AtomicU32::new(0)),
|
||||
last_success_time: Arc::new(Mutex::new(SystemTime::now())),
|
||||
shutdown_tx,
|
||||
shutdown_rx, // Receiver is cloneable
|
||||
enable_detailed_logs: config.enable_detailed_logs,
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn start_periodic_probing(self: Arc<Self>) {
|
||||
if self.backends.is_empty() {
|
||||
info!("No secondary backends to probe. Periodic probing will not start.");
|
||||
return;
|
||||
}
|
||||
|
||||
info!(
|
||||
"Starting periodic probing for {} secondary backends. Probe interval: {}s. Probe methods: {:?}. Max errors: {}, Recovery threshold: {}.",
|
||||
self.backends.len(),
|
||||
self.config.probe_interval_secs,
|
||||
self.config.probe_methods,
|
||||
self.config.max_error_threshold,
|
||||
self.config.recovery_threshold
|
||||
);
|
||||
|
||||
// Run initial probe
|
||||
let initial_probe_self = self.clone();
|
||||
tokio::spawn(async move {
|
||||
if initial_probe_self.enable_detailed_logs {
|
||||
debug!("Running initial probe...");
|
||||
}
|
||||
initial_probe_self.run_probe().await;
|
||||
if initial_probe_self.enable_detailed_logs {
|
||||
debug!("Initial probe finished.");
|
||||
}
|
||||
});
|
||||
|
||||
// Start periodic probing task
|
||||
let mut interval = tokio::time::interval(self.config.probe_interval());
|
||||
let mut shutdown_rx_clone = self.shutdown_rx.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = interval.tick() => {
|
||||
if self.enable_detailed_logs {
|
||||
debug!("Running periodic probe cycle...");
|
||||
}
|
||||
self.run_probe().await;
|
||||
if self.enable_detailed_logs {
|
||||
debug!("Periodic probe cycle finished.");
|
||||
}
|
||||
}
|
||||
res = shutdown_rx_clone.changed() => {
|
||||
if res.is_err() || *shutdown_rx_clone.borrow() {
|
||||
info!("SecondaryProbe: Shutdown signal received or channel closed, stopping periodic probing.");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("SecondaryProbe: Periodic probing task has stopped.");
|
||||
});
|
||||
}
|
||||
|
||||
async fn run_probe(&self) {
|
||||
let mut successful_probes_in_overall_cycle = 0;
|
||||
let mut temp_method_timings: DashMap<String, Duration> = DashMap::new(); // method_name -> min_duration for this cycle
|
||||
let mut temp_backend_timings: DashMap<String, Duration> = DashMap::new(); // backend_name -> min_duration for this cycle
|
||||
let mut temp_overall_min_response_time = Duration::MAX;
|
||||
|
||||
for backend in &self.backends {
|
||||
let mut backend_cycle_successful_probes = 0;
|
||||
let mut backend_cycle_min_duration = Duration::MAX;
|
||||
|
||||
for method_name in &self.config.probe_methods {
|
||||
let mut method_min_duration_for_backend_this_cycle = Duration::MAX;
|
||||
|
||||
for i in 0..PROBE_REQUEST_COUNT {
|
||||
let probe_id = format!(
|
||||
"probe-{}-{}-{}-{}",
|
||||
backend.name,
|
||||
method_name,
|
||||
Utc::now().timestamp_nanos_opt().unwrap_or_else(|| SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap_or_default().as_nanos() as i64),
|
||||
i
|
||||
);
|
||||
let request_body = JsonRpcRequest {
|
||||
method: method_name.clone(),
|
||||
params: Some(json!([])),
|
||||
id: Some(json!(probe_id)),
|
||||
jsonrpc: Some("2.0".to_string()),
|
||||
};
|
||||
|
||||
let start_time = SystemTime::now();
|
||||
match self.client.post(backend.url.clone()).json(&request_body).timeout(self.config.http_client_timeout()).send().await {
|
||||
Ok(response) => {
|
||||
let duration = start_time.elapsed().unwrap_or_default();
|
||||
if response.status().is_success() {
|
||||
// TODO: Optionally parse JSON RPC response for error field
|
||||
backend_cycle_successful_probes += 1;
|
||||
successful_probes_in_overall_cycle += 1;
|
||||
|
||||
method_min_duration_for_backend_this_cycle = min(method_min_duration_for_backend_this_cycle, duration);
|
||||
backend_cycle_min_duration = min(backend_cycle_min_duration, duration);
|
||||
temp_overall_min_response_time = min(temp_overall_min_response_time, duration);
|
||||
|
||||
if self.enable_detailed_logs {
|
||||
debug!("Probe success: {} method {} ID {} took {:?}.", backend.name, method_name, probe_id, duration);
|
||||
}
|
||||
} else {
|
||||
if self.enable_detailed_logs {
|
||||
warn!("Probe failed (HTTP status {}): {} method {} ID {}. Body: {:?}", response.status(), backend.name, method_name, probe_id, response.text().await.unwrap_or_default());
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
if self.enable_detailed_logs {
|
||||
warn!("Probe error (request failed): {} method {} ID {}: {:?}", backend.name, method_name, probe_id, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(PROBE_CYCLE_DELAY_MS)).await;
|
||||
} // End of PROBE_REQUEST_COUNT loop
|
||||
|
||||
if method_min_duration_for_backend_this_cycle != Duration::MAX {
|
||||
temp_method_timings
|
||||
.entry(method_name.clone())
|
||||
.and_modify(|current_min| *current_min = min(*current_min, method_min_duration_for_backend_this_cycle))
|
||||
.or_insert(method_min_duration_for_backend_this_cycle);
|
||||
}
|
||||
} // End of probe_methods loop
|
||||
|
||||
if backend_cycle_min_duration != Duration::MAX {
|
||||
temp_backend_timings.insert(backend.name.clone(), backend_cycle_min_duration);
|
||||
}
|
||||
self.update_backend_health(&backend.name, backend_cycle_successful_probes > 0);
|
||||
if self.enable_detailed_logs {
|
||||
debug!(
|
||||
"Probe sub-cycle for backend {}: {} successful probes. Min duration for this backend this cycle: {:?}. Current health: available={}",
|
||||
backend.name,
|
||||
backend_cycle_successful_probes,
|
||||
if backend_cycle_min_duration == Duration::MAX { None } else { Some(backend_cycle_min_duration) },
|
||||
self.is_backend_available(&backend.name)
|
||||
);
|
||||
}
|
||||
} // End of backends loop
|
||||
|
||||
// Update overall timings if any probe in the cycle was successful
|
||||
if successful_probes_in_overall_cycle > 0 {
|
||||
if temp_overall_min_response_time != Duration::MAX {
|
||||
let mut min_resp_time_guard = self.min_response_time.write().unwrap();
|
||||
*min_resp_time_guard = min(*min_resp_time_guard, temp_overall_min_response_time);
|
||||
if self.enable_detailed_logs {
|
||||
debug!("Global min_response_time updated to: {:?}", *min_resp_time_guard);
|
||||
}
|
||||
}
|
||||
|
||||
for entry in temp_method_timings.iter() {
|
||||
self.method_timings
|
||||
.entry(entry.key().clone())
|
||||
.and_modify(|current_min| *current_min = min(*current_min, *entry.value()))
|
||||
.or_insert(*entry.value());
|
||||
if self.enable_detailed_logs {
|
||||
debug!("Global method_timing for {} updated/set to: {:?}", entry.key(), *entry.value());
|
||||
}
|
||||
}
|
||||
|
||||
for entry in temp_backend_timings.iter() {
|
||||
self.backend_timings
|
||||
.entry(entry.key().clone())
|
||||
.and_modify(|current_min| *current_min = min(*current_min, *entry.value()))
|
||||
.or_insert(*entry.value());
|
||||
if self.enable_detailed_logs {
|
||||
debug!("Global backend_timing for {} updated/set to: {:?}", entry.key(), *entry.value());
|
||||
}
|
||||
}
|
||||
|
||||
self.failure_count.store(0, Ordering::Relaxed);
|
||||
*self.last_success_time.lock().unwrap() = SystemTime::now();
|
||||
if self.enable_detailed_logs {
|
||||
info!("Overall probe cycle completed with {} successes. Overall failure count reset.", successful_probes_in_overall_cycle);
|
||||
}
|
||||
|
||||
} else {
|
||||
let prev_failures = self.failure_count.fetch_add(1, Ordering::Relaxed);
|
||||
warn!(
|
||||
"Overall probe cycle completed with NO successful probes. Overall failure count incremented to {}.",
|
||||
prev_failures + 1
|
||||
);
|
||||
}
|
||||
|
||||
*self.last_probe_time.lock().unwrap() = SystemTime::now();
|
||||
}
|
||||
|
||||
fn update_backend_health(&self, backend_name: &str, is_cycle_success: bool) {
|
||||
let current_availability = self.is_backend_available(backend_name);
|
||||
let error_count_entry = self.backend_error_count.entry(backend_name.to_string()).or_insert_with(|| AtomicU32::new(0));
|
||||
let consecutive_success_entry = self.backend_consecutive_success_count.entry(backend_name.to_string()).or_insert_with(|| AtomicU32::new(0));
|
||||
|
||||
if is_cycle_success {
|
||||
error_count_entry.store(0, Ordering::Relaxed);
|
||||
consecutive_success_entry.fetch_add(1, Ordering::Relaxed);
|
||||
if let Some(mut last_success_guard) = self.backend_last_success.get_mut(backend_name) {
|
||||
*last_success_guard.lock().unwrap() = SystemTime::now();
|
||||
}
|
||||
|
||||
if !current_availability {
|
||||
let successes = consecutive_success_entry.load(Ordering::Relaxed);
|
||||
if successes >= self.config.recovery_threshold {
|
||||
self.backend_available.insert(backend_name.to_string(), true);
|
||||
info!("Backend {} recovered and is now AVAILABLE ({} consecutive successes met threshold {}).", backend_name, successes, self.config.recovery_threshold);
|
||||
consecutive_success_entry.store(0, Ordering::Relaxed); // Reset after recovery
|
||||
} else {
|
||||
if self.enable_detailed_logs {
|
||||
debug!("Backend {} had a successful probe cycle. Consecutive successes: {}. Needs {} for recovery.", backend_name, successes, self.config.recovery_threshold);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if self.enable_detailed_logs {
|
||||
debug!("Backend {} remains available, successful probe cycle.", backend_name);
|
||||
}
|
||||
}
|
||||
} else { // Probe cycle failed for this backend
|
||||
consecutive_success_entry.store(0, Ordering::Relaxed); // Reset consecutive successes on any failure
|
||||
let current_errors = error_count_entry.fetch_add(1, Ordering::Relaxed) + 1; // +1 because fetch_add returns previous value
|
||||
|
||||
if current_availability && current_errors >= self.config.max_error_threshold {
|
||||
self.backend_available.insert(backend_name.to_string(), false);
|
||||
warn!(
|
||||
"Backend {} has become UNAVAILABLE due to {} errors (threshold {}).",
|
||||
backend_name, current_errors, self.config.max_error_threshold
|
||||
);
|
||||
} else {
|
||||
if self.enable_detailed_logs {
|
||||
if current_availability {
|
||||
debug!("Backend {} is still available but error count increased to {}. Max errors before unavailable: {}", backend_name, current_errors, self.config.max_error_threshold);
|
||||
} else {
|
||||
debug!("Backend {} remains UNAVAILABLE, error count now {}.", backend_name, current_errors);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_delay_for_method(&self, method_name: &str) -> Duration {
|
||||
let base_delay = self
|
||||
.method_timings
|
||||
.get(method_name)
|
||||
.map(|timing_ref| *timing_ref.value())
|
||||
.unwrap_or_else(|| *self.min_response_time.read().unwrap()); // Read lock
|
||||
|
||||
let buffer = Duration::from_millis(self.config.min_delay_buffer_ms);
|
||||
let calculated_delay = base_delay.saturating_add(buffer);
|
||||
|
||||
let overall_failures = self.failure_count.load(Ordering::Relaxed);
|
||||
// Consider last_success_time to see if failures are recent and persistent
|
||||
let time_since_last_overall_success = SystemTime::now()
|
||||
.duration_since(*self.last_success_time.lock().unwrap()) // Lock for last_success_time
|
||||
.unwrap_or_default();
|
||||
|
||||
// Fallback logic: if many consecutive failures AND last success was long ago
|
||||
if overall_failures >= 3 && time_since_last_overall_success > self.config.probe_interval().saturating_mul(3) {
|
||||
warn!(
|
||||
"Probes failing ({} consecutive, last overall success {:?} ago). Using conservative fixed delay for method {}.",
|
||||
overall_failures, time_since_last_overall_success, method_name
|
||||
);
|
||||
return Duration::from_millis(self.config.min_delay_buffer_ms.saturating_mul(3));
|
||||
}
|
||||
|
||||
if self.enable_detailed_logs {
|
||||
debug!("Delay for method '{}': base {:?}, buffer {:?}, final {:?}", method_name, base_delay, buffer, calculated_delay);
|
||||
}
|
||||
calculated_delay
|
||||
}
|
||||
|
||||
pub fn is_backend_available(&self, backend_name: &str) -> bool {
|
||||
self.backend_available
|
||||
.get(backend_name)
|
||||
.map_or(false, |entry| *entry.value())
|
||||
}
|
||||
|
||||
pub fn stop(&self) {
|
||||
info!("SecondaryProbe: Sending shutdown signal...");
|
||||
if self.shutdown_tx.send(true).is_err() {
|
||||
error!("Failed to send shutdown signal to SecondaryProbe task. It might have already stopped or had no receiver.");
|
||||
}
|
||||
}
|
||||
}
|
||||
290
benchmark_proxy_rust/src/stats_collector.rs
Normal file
290
benchmark_proxy_rust/src/stats_collector.rs
Normal file
@@ -0,0 +1,290 @@
|
||||
use crate::structures::{ResponseStats, WebSocketStats, CuDataPoint, Backend};
|
||||
use crate::block_height_tracker::BlockHeightTracker;
|
||||
use crate::secondary_probe::SecondaryProbe;
|
||||
use std::time::{Duration, SystemTime};
|
||||
use std::sync::{Arc, Mutex, atomic::{AtomicU64, Ordering}};
|
||||
use dashmap::DashMap;
|
||||
use log::{debug, error, info, warn};
|
||||
|
||||
pub struct StatsCollector {
|
||||
pub request_stats: Arc<Mutex<Vec<ResponseStats>>>,
|
||||
pub method_stats: Arc<DashMap<String, Mutex<Vec<Duration>>>>, // method_name -> list of durations for primary
|
||||
pub backend_method_stats: Arc<DashMap<String, DashMap<String, Mutex<Vec<Duration>>>>>, // backend_name -> method_name -> list of durations
|
||||
pub backend_wins: Arc<DashMap<String, AtomicU64>>, // backend_name -> count
|
||||
pub method_backend_wins: Arc<DashMap<String, DashMap<String, AtomicU64>>>, // method_name -> backend_name -> count
|
||||
pub first_response_durations: Arc<Mutex<Vec<Duration>>>,
|
||||
pub actual_first_response_durations: Arc<Mutex<Vec<Duration>>>,
|
||||
pub method_first_response_durations: Arc<DashMap<String, Mutex<Vec<Duration>>>>,
|
||||
pub method_actual_first_response_durations: Arc<DashMap<String, Mutex<Vec<Duration>>>>,
|
||||
pub total_requests: Arc<AtomicU64>>,
|
||||
pub error_count: Arc<AtomicU64>>,
|
||||
pub skipped_secondary_requests: Arc<AtomicU64>>,
|
||||
pub ws_stats: Arc<Mutex<Vec<WebSocketStats>>>,
|
||||
pub total_ws_connections: Arc<AtomicU64>>,
|
||||
pub app_start_time: SystemTime,
|
||||
pub interval_start_time: Arc<Mutex<SystemTime>>,
|
||||
pub summary_interval: Duration,
|
||||
pub method_cu_prices: Arc<DashMap<String, u64>>,
|
||||
pub total_cu: Arc<AtomicU64>>,
|
||||
pub method_cu: Arc<DashMap<String, AtomicU64>>, // method_name -> total CU for this method in interval
|
||||
pub historical_cu: Arc<Mutex<Vec<CuDataPoint>>>,
|
||||
pub has_secondary_backends: bool,
|
||||
// Placeholders for probe and tracker - actual types will be defined later
|
||||
// pub secondary_probe: Option<Arc<SecondaryProbe>>,
|
||||
// pub block_height_tracker: Option<Arc<BlockHeightTracker>>,
|
||||
}
|
||||
|
||||
impl StatsCollector {
|
||||
pub fn new(summary_interval: Duration, has_secondary_backends: bool) -> Self {
|
||||
let method_cu_prices = Arc::new(DashMap::new());
|
||||
Self::init_cu_prices(&method_cu_prices);
|
||||
|
||||
StatsCollector {
|
||||
request_stats: Arc::new(Mutex::new(Vec::new())),
|
||||
method_stats: Arc::new(DashMap::new()),
|
||||
backend_method_stats: Arc::new(DashMap::new()),
|
||||
backend_wins: Arc::new(DashMap::new()),
|
||||
method_backend_wins: Arc::new(DashMap::new()),
|
||||
first_response_durations: Arc::new(Mutex::new(Vec::new())),
|
||||
actual_first_response_durations: Arc::new(Mutex::new(Vec::new())),
|
||||
method_first_response_durations: Arc::new(DashMap::new()),
|
||||
method_actual_first_response_durations: Arc::new(DashMap::new()),
|
||||
total_requests: Arc::new(AtomicU64::new(0)),
|
||||
error_count: Arc::new(AtomicU64::new(0)),
|
||||
skipped_secondary_requests: Arc::new(AtomicU64::new(0)),
|
||||
ws_stats: Arc::new(Mutex::new(Vec::new())),
|
||||
total_ws_connections: Arc::new(AtomicU64::new(0)),
|
||||
app_start_time: SystemTime::now(),
|
||||
interval_start_time: Arc::new(Mutex::new(SystemTime::now())),
|
||||
summary_interval,
|
||||
method_cu_prices,
|
||||
total_cu: Arc::new(AtomicU64::new(0)),
|
||||
method_cu: Arc::new(DashMap::new()),
|
||||
historical_cu: Arc::new(Mutex::new(Vec::new())),
|
||||
has_secondary_backends,
|
||||
}
|
||||
}
|
||||
|
||||
fn init_cu_prices(prices_map: &DashMap<String, u64>) {
|
||||
// Base CU
|
||||
prices_map.insert("eth_call".to_string(), 100);
|
||||
prices_map.insert("eth_estimateGas".to_string(), 150);
|
||||
prices_map.insert("eth_getLogs".to_string(), 200);
|
||||
prices_map.insert("eth_sendRawTransaction".to_string(), 250);
|
||||
prices_map.insert("trace_call".to_string(), 300);
|
||||
prices_map.insert("trace_replayBlockTransactions".to_string(), 500);
|
||||
// Default for unknown methods
|
||||
prices_map.insert("default".to_string(), 50);
|
||||
}
|
||||
|
||||
pub fn add_stats(&self, stats_vec: Vec<ResponseStats>) {
|
||||
if stats_vec.is_empty() {
|
||||
warn!("add_stats called with empty stats_vec");
|
||||
return;
|
||||
}
|
||||
|
||||
self.total_requests.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
let mut primary_stats: Option<&ResponseStats> = None;
|
||||
let mut winning_backend_name: Option<String> = None;
|
||||
let mut actual_first_response_duration: Option<Duration> = None;
|
||||
let mut first_response_duration_from_primary_or_fastest_secondary: Option<Duration> = None;
|
||||
|
||||
// Find the 'actual-first-response' if present and the primary response
|
||||
for stat in &stats_vec {
|
||||
if stat.backend_name == "actual-first-response" {
|
||||
actual_first_response_duration = Some(stat.duration);
|
||||
} else if stat.backend_name.contains("-primary") { // Assuming primary name contains "-primary"
|
||||
primary_stats = Some(stat);
|
||||
}
|
||||
}
|
||||
|
||||
let method_name = primary_stats.map_or_else(
|
||||
|| stats_vec.first().map_or_else(|| "unknown".to_string(), |s| s.method.clone()),
|
||||
|ps| ps.method.clone()
|
||||
);
|
||||
|
||||
|
||||
// Determine winning backend and first_response_duration_from_primary_or_fastest_secondary
|
||||
if self.has_secondary_backends {
|
||||
let mut fastest_duration = Duration::MAX;
|
||||
for stat in stats_vec.iter().filter(|s| s.backend_name != "actual-first-response" && s.error.is_none()) {
|
||||
if stat.duration < fastest_duration {
|
||||
fastest_duration = stat.duration;
|
||||
winning_backend_name = Some(stat.backend_name.clone());
|
||||
}
|
||||
}
|
||||
if fastest_duration != Duration::MAX {
|
||||
first_response_duration_from_primary_or_fastest_secondary = Some(fastest_duration);
|
||||
}
|
||||
} else {
|
||||
// If no secondary backends, primary is the winner if no error
|
||||
if let Some(ps) = primary_stats {
|
||||
if ps.error.is_none() {
|
||||
winning_backend_name = Some(ps.backend_name.clone());
|
||||
first_response_duration_from_primary_or_fastest_secondary = Some(ps.duration);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no winner determined yet (e.g. all errored, or no secondary and primary errored),
|
||||
// and if primary_stats exists, consider it as the "winner" for error tracking purposes.
|
||||
if winning_backend_name.is_none() && primary_stats.is_some() {
|
||||
winning_backend_name = Some(primary_stats.unwrap().backend_name.clone());
|
||||
}
|
||||
|
||||
|
||||
// Update backend_wins and method_backend_wins
|
||||
if let Some(ref winner_name) = winning_backend_name {
|
||||
self.backend_wins.entry(winner_name.clone()).or_insert_with(|| AtomicU64::new(0)).fetch_add(1, Ordering::Relaxed);
|
||||
self.method_backend_wins.entry(method_name.clone()).or_default().entry(winner_name.clone()).or_insert_with(|| AtomicU64::new(0)).fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
// Update first_response_durations and actual_first_response_durations
|
||||
if let Some(duration) = first_response_duration_from_primary_or_fastest_secondary {
|
||||
self.first_response_durations.lock().unwrap().push(duration);
|
||||
self.method_first_response_durations.entry(method_name.clone()).or_insert_with(|| Mutex::new(Vec::new())).lock().unwrap().push(duration);
|
||||
}
|
||||
|
||||
if let Some(duration) = actual_first_response_duration {
|
||||
self.actual_first_response_durations.lock().unwrap().push(duration);
|
||||
self.method_actual_first_response_durations.entry(method_name.clone()).or_insert_with(|| Mutex::new(Vec::new())).lock().unwrap().push(duration);
|
||||
}
|
||||
|
||||
|
||||
let mut request_stats_guard = self.request_stats.lock().unwrap();
|
||||
for stat in stats_vec {
|
||||
if stat.backend_name == "actual-first-response" { // Already handled
|
||||
continue;
|
||||
}
|
||||
|
||||
request_stats_guard.push(stat.clone());
|
||||
|
||||
if stat.error.is_some() {
|
||||
if stat.error.as_deref() == Some("skipped by primary due to min_delay_buffer") {
|
||||
self.skipped_secondary_requests.fetch_add(1, Ordering::Relaxed);
|
||||
} else {
|
||||
self.error_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
// Update backend_method_stats for all backends
|
||||
self.backend_method_stats
|
||||
.entry(stat.backend_name.clone())
|
||||
.or_default()
|
||||
.entry(stat.method.clone())
|
||||
.or_insert_with(|| Mutex::new(Vec::new()))
|
||||
.lock()
|
||||
.unwrap()
|
||||
.push(stat.duration);
|
||||
|
||||
|
||||
// If the winning backend is primary and it's not a batch (batch handled separately), update method_stats and CUs
|
||||
// Assuming primary_stats contains the correct method name for CU calculation
|
||||
if let Some(ref winner_name_val) = winning_backend_name {
|
||||
if &stat.backend_name == winner_name_val && stat.backend_name.contains("-primary") && stat.error.is_none() {
|
||||
// Update method_stats (for primary)
|
||||
self.method_stats
|
||||
.entry(stat.method.clone())
|
||||
.or_insert_with(|| Mutex::new(Vec::new()))
|
||||
.lock()
|
||||
.unwrap()
|
||||
.push(stat.duration);
|
||||
|
||||
// Update CU
|
||||
let cu_price = self.method_cu_prices.get(&stat.method).map_or_else(
|
||||
|| self.method_cu_prices.get("default").map_or(0, |p| *p.value()),
|
||||
|p| *p.value()
|
||||
);
|
||||
if cu_price > 0 {
|
||||
self.total_cu.fetch_add(cu_price, Ordering::Relaxed);
|
||||
self.method_cu.entry(stat.method.clone()).or_insert_with(|| AtomicU64::new(0)).fetch_add(cu_price, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_batch_stats(&self, methods: &[String], duration: Duration, backend_name: &str) {
|
||||
if !backend_name.contains("-primary") { // Only primary processes batches directly for now
|
||||
warn!("add_batch_stats called for non-primary backend: {}", backend_name);
|
||||
return;
|
||||
}
|
||||
|
||||
let mut batch_cu: u64 = 0;
|
||||
for method_name in methods {
|
||||
let cu_price = self.method_cu_prices.get(method_name).map_or_else(
|
||||
|| self.method_cu_prices.get("default").map_or(0, |p| *p.value()),
|
||||
|p| *p.value()
|
||||
);
|
||||
batch_cu += cu_price;
|
||||
|
||||
if cu_price > 0 {
|
||||
self.method_cu.entry(method_name.clone()).or_insert_with(|| AtomicU64::new(0)).fetch_add(cu_price, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
// Update method_stats for each method in the batch on the primary
|
||||
self.method_stats
|
||||
.entry(method_name.clone())
|
||||
.or_insert_with(|| Mutex::new(Vec::new()))
|
||||
.lock()
|
||||
.unwrap()
|
||||
.push(duration); // Using the same duration for all methods in the batch as an approximation
|
||||
|
||||
// Update backend_method_stats
|
||||
self.backend_method_stats
|
||||
.entry(backend_name.to_string())
|
||||
.or_default()
|
||||
.entry(method_name.clone())
|
||||
.or_insert_with(|| Mutex::new(Vec::new()))
|
||||
.lock()
|
||||
.unwrap()
|
||||
.push(duration);
|
||||
}
|
||||
|
||||
if batch_cu > 0 {
|
||||
self.total_cu.fetch_add(batch_cu, Ordering::Relaxed);
|
||||
}
|
||||
// Note: total_requests is incremented by add_stats which should be called for the overall batch request
|
||||
}
|
||||
|
||||
|
||||
pub fn add_websocket_stats(&self, ws_stat: WebSocketStats) {
|
||||
if ws_stat.error.is_some() {
|
||||
self.error_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
self.ws_stats.lock().unwrap().push(ws_stat);
|
||||
self.total_ws_connections.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
// STUBBED METHODS - to be implemented later
|
||||
pub fn get_primary_p75_for_method(&self, _method: &str) -> std::time::Duration {
|
||||
// Placeholder: return a default fixed duration
|
||||
log::debug!("StatsCollector::get_primary_p75_for_method called (stub)");
|
||||
std::time::Duration::from_millis(25) // Default from Go's calculateBatchDelay fallback
|
||||
}
|
||||
|
||||
pub fn get_primary_p50_for_method(&self, _method: &str) -> std::time::Duration {
|
||||
// Placeholder: return a default fixed duration
|
||||
log::debug!("StatsCollector::get_primary_p50_for_method called (stub)");
|
||||
std::time::Duration::from_millis(15)
|
||||
}
|
||||
|
||||
pub fn is_expensive_method_by_stats(&self, _method: &str) -> bool {
|
||||
// Placeholder: always return false
|
||||
log::debug!("StatsCollector::is_expensive_method_by_stats called (stub)");
|
||||
false
|
||||
}
|
||||
|
||||
pub fn select_best_secondary_for_expensive_method(
|
||||
&self,
|
||||
_method: &str,
|
||||
_backends: &[Backend],
|
||||
_block_height_tracker: &Option<Arc<BlockHeightTracker>>,
|
||||
_secondary_probe: &Option<Arc<SecondaryProbe>>,
|
||||
) -> Option<Backend> {
|
||||
// Placeholder: always return None
|
||||
log::debug!("StatsCollector::select_best_secondary_for_expensive_method called (stub)");
|
||||
None
|
||||
}
|
||||
}
|
||||
107
benchmark_proxy_rust/src/structures.rs
Normal file
107
benchmark_proxy_rust/src/structures.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
use serde::{Serialize, Deserialize};
|
||||
use url::Url;
|
||||
use http::StatusCode;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct JsonRpcRequest {
|
||||
pub method: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub id: Option<serde_json::Value>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub jsonrpc: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub params: Option<serde_json::Value>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct BatchInfo {
|
||||
pub is_batch: bool,
|
||||
pub methods: Vec<String>,
|
||||
pub request_count: usize,
|
||||
pub has_stateful: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct Backend {
|
||||
#[serde(with = "url_serde")]
|
||||
pub url: Url,
|
||||
pub name: String,
|
||||
pub role: String, // Consider an enum BackendRole { Primary, Secondary } later
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct ResponseStats {
|
||||
pub backend_name: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(with = "http_serde_status_code_option", default)]
|
||||
pub status_code: Option<StatusCode>,
|
||||
#[serde(with = "humantime_serde")]
|
||||
pub duration: Duration,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<String>,
|
||||
pub method: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
||||
pub struct WebSocketStats {
|
||||
pub backend_name: String,
|
||||
pub error: Option<String>, // Default Option<String> serde is fine
|
||||
pub connect_time: std::time::Duration, // Default Duration serde (secs/nanos struct)
|
||||
pub is_active: bool,
|
||||
pub client_to_backend_messages: u64,
|
||||
pub backend_to_client_messages: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct CuDataPoint {
|
||||
pub timestamp: SystemTime,
|
||||
pub cu: u64,
|
||||
}
|
||||
|
||||
// Helper module for serializing/deserializing Option<http::StatusCode>
|
||||
mod http_serde_status_code_option {
|
||||
use http::StatusCode;
|
||||
use serde::{self, Deserializer, Serializer, AsOwned};
|
||||
|
||||
pub fn serialize<S>(status_code: &Option<StatusCode>, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match status_code {
|
||||
Some(sc) => serializer.serialize_some(&sc.as_u16()),
|
||||
None => serializer.serialize_none(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<StatusCode>, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
Option::<u16>::deserialize(deserializer)?
|
||||
.map(|code| StatusCode::from_u16(code).map_err(serde::de::Error::custom))
|
||||
.transpose()
|
||||
}
|
||||
}
|
||||
|
||||
// Helper module for serializing/deserializing url::Url
|
||||
mod url_serde {
|
||||
use url::Url;
|
||||
use serde::{self, Deserializer, Serializer};
|
||||
|
||||
pub fn serialize<S>(url: &Url, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_str(url.as_str())
|
||||
}
|
||||
|
||||
pub fn deserialize<'de, D>(deserializer: D) -> Result<Url, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
String::deserialize(deserializer)?
|
||||
.parse()
|
||||
.map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
228
benchmark_proxy_rust/src/websocket_handler.rs
Normal file
228
benchmark_proxy_rust/src/websocket_handler.rs
Normal file
@@ -0,0 +1,228 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use hyper::{Body, Request, Response, StatusCode};
|
||||
use hyper_tungstenite::HyperWebsocket;
|
||||
use log;
|
||||
use tokio_tungstenite::tungstenite::protocol::Message;
|
||||
use futures_util::{stream::StreamExt, sink::SinkExt};
|
||||
|
||||
use crate::config::AppConfig;
|
||||
use crate::stats_collector::StatsCollector;
|
||||
use crate::structures::{Backend, WebSocketStats}; // Ensure WebSocketStats has new fields
|
||||
|
||||
pub async fn handle_websocket_request(
|
||||
mut req: Request<Body>,
|
||||
app_config: Arc<AppConfig>,
|
||||
stats_collector: Arc<StatsCollector>,
|
||||
all_backends: Arc<Vec<Backend>>,
|
||||
) -> Result<Response<Body>, Box<dyn std::error::Error + Send + Sync + 'static>> {
|
||||
let upgrade_start_time = Instant::now();
|
||||
|
||||
// Check for upgrade request
|
||||
if !hyper_tungstenite::is_upgrade_request(&req) {
|
||||
log::warn!("Not a WebSocket upgrade request");
|
||||
let mut resp = Response::new(Body::from("Not a WebSocket upgrade request"));
|
||||
*resp.status_mut() = StatusCode::BAD_REQUEST;
|
||||
return Ok(resp);
|
||||
}
|
||||
|
||||
// Attempt to upgrade the connection
|
||||
let (response, websocket) = match hyper_tungstenite::upgrade(&mut req, None) {
|
||||
Ok((resp, ws)) => (resp, ws),
|
||||
Err(e) => {
|
||||
log::error!("WebSocket upgrade failed: {}", e);
|
||||
let mut resp = Response::new(Body::from(format!("WebSocket upgrade failed: {}", e)));
|
||||
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; // Or BAD_REQUEST
|
||||
return Ok(resp);
|
||||
}
|
||||
};
|
||||
|
||||
// Spawn a task to handle the WebSocket connection after sending 101
|
||||
tokio::spawn(async move {
|
||||
match websocket.await {
|
||||
Ok(ws_stream) => {
|
||||
let client_ws_stream = ws_stream;
|
||||
if app_config.enable_detailed_logs {
|
||||
log::info!("Client WebSocket connection established.");
|
||||
}
|
||||
// Successfully upgraded client connection, now connect to primary backend
|
||||
proxy_websocket_to_primary(client_ws_stream, app_config, stats_collector, all_backends).await;
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Error awaiting client WebSocket upgrade: {}", e);
|
||||
// No actual client WS connection to record stats against other than the failed upgrade attempt
|
||||
let stats = WebSocketStats {
|
||||
backend_name: "client_upgrade_failed".to_string(),
|
||||
error: Some(format!("Client WS upgrade await error: {}", e)),
|
||||
connect_time: upgrade_start_time.elapsed(),
|
||||
is_active: false,
|
||||
client_to_backend_messages: 0,
|
||||
backend_to_client_messages: 0,
|
||||
};
|
||||
stats_collector.add_websocket_stats(stats);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Return the 101 Switching Protocols response to the client
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
async fn proxy_websocket_to_primary(
|
||||
mut client_ws_stream: HyperWebsocket, // Made mutable for close()
|
||||
app_config: Arc<AppConfig>,
|
||||
stats_collector: Arc<StatsCollector>,
|
||||
all_backends: Arc<Vec<Backend>>,
|
||||
) {
|
||||
let connect_to_primary_start_time = Instant::now();
|
||||
let mut client_to_backend_msg_count: u64 = 0;
|
||||
let mut backend_to_client_msg_count: u64 = 0;
|
||||
let mut ws_stats_error: Option<String> = None;
|
||||
let mut backend_name_for_stats = "primary_unknown".to_string();
|
||||
|
||||
// 1. Find Primary Backend
|
||||
let primary_backend = match all_backends.iter().find(|b| b.role == "primary") {
|
||||
Some(pb) => {
|
||||
backend_name_for_stats = pb.name.clone();
|
||||
pb
|
||||
}
|
||||
None => {
|
||||
log::error!("No primary backend configured for WebSocket proxy.");
|
||||
ws_stats_error = Some("No primary backend configured".to_string());
|
||||
// Close client connection gracefully if possible
|
||||
let _ = client_ws_stream.close(None).await; // HyperWebsocket uses close method
|
||||
// Record stats and return
|
||||
let stats = WebSocketStats {
|
||||
backend_name: backend_name_for_stats,
|
||||
error: ws_stats_error,
|
||||
connect_time: connect_to_primary_start_time.elapsed(),
|
||||
is_active: false,
|
||||
client_to_backend_messages,
|
||||
backend_to_client_messages,
|
||||
};
|
||||
stats_collector.add_websocket_stats(stats);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
backend_name_for_stats = primary_backend.name.clone(); // Ensure it's set if primary_backend was found
|
||||
|
||||
// 2. Connect to Primary Backend's WebSocket
|
||||
let mut ws_url = primary_backend.url.clone();
|
||||
let scheme = if ws_url.scheme() == "https" { "wss" } else { "ws" };
|
||||
if ws_url.set_scheme(scheme).is_err() {
|
||||
log::error!("Failed to set WebSocket scheme for backend URL: {}", primary_backend.url);
|
||||
ws_stats_error = Some(format!("Invalid backend URL scheme for {}", primary_backend.url));
|
||||
let _ = client_ws_stream.close(None).await;
|
||||
let stats = WebSocketStats {
|
||||
backend_name: backend_name_for_stats,
|
||||
error: ws_stats_error,
|
||||
connect_time: connect_to_primary_start_time.elapsed(),
|
||||
is_active: false,
|
||||
client_to_backend_messages,
|
||||
backend_to_client_messages,
|
||||
};
|
||||
stats_collector.add_websocket_stats(stats);
|
||||
return;
|
||||
}
|
||||
|
||||
let backend_connect_attempt_time = Instant::now();
|
||||
let backend_ws_result = tokio_tungstenite::connect_async(ws_url.clone()).await;
|
||||
let connect_duration = backend_connect_attempt_time.elapsed(); // This is backend connection time
|
||||
|
||||
let backend_ws_stream_conn = match backend_ws_result {
|
||||
Ok((stream, _response)) => {
|
||||
if app_config.enable_detailed_logs {
|
||||
log::info!("Successfully connected to primary backend WebSocket: {}", primary_backend.name);
|
||||
}
|
||||
stream
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("Failed to connect to primary backend {} WebSocket: {}", primary_backend.name, e);
|
||||
ws_stats_error = Some(format!("Primary backend connect error: {}", e));
|
||||
let _ = client_ws_stream.close(None).await; // Close client connection
|
||||
let stats = WebSocketStats {
|
||||
backend_name: backend_name_for_stats,
|
||||
error: ws_stats_error,
|
||||
connect_time: connect_duration,
|
||||
is_active: false,
|
||||
client_to_backend_messages,
|
||||
backend_to_client_messages,
|
||||
};
|
||||
stats_collector.add_websocket_stats(stats);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// 3. Proxying Logic
|
||||
let (mut client_ws_tx, mut client_ws_rx) = client_ws_stream.split();
|
||||
let (mut backend_ws_tx, mut backend_ws_rx) = backend_ws_stream_conn.split();
|
||||
|
||||
let client_to_backend_task = async {
|
||||
while let Some(msg_result) = client_ws_rx.next().await {
|
||||
match msg_result {
|
||||
Ok(msg) => {
|
||||
if app_config.enable_detailed_logs { log::trace!("C->B: {:?}", msg); }
|
||||
if backend_ws_tx.send(msg).await.is_err() {
|
||||
if app_config.enable_detailed_logs { log::debug!("Error sending to backend, C->B loop breaking."); }
|
||||
break;
|
||||
}
|
||||
client_to_backend_msg_count += 1;
|
||||
}
|
||||
Err(e) => {
|
||||
log::warn!("Error reading from client WebSocket: {}", e);
|
||||
// Use a closure to capture `e` by reference for the format macro.
|
||||
ws_stats_error.get_or_insert_with(|| { let e_ref = &e; format!("Client read error: {}", e_ref) });
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Try to close the backend sink gracefully if client read loop ends
|
||||
if app_config.enable_detailed_logs { log::debug!("C->B proxy loop finished. Closing backend_ws_tx.");}
|
||||
let _ = backend_ws_tx.close().await;
|
||||
};
|
||||
|
||||
let backend_to_client_task = async {
|
||||
while let Some(msg_result) = backend_ws_rx.next().await {
|
||||
match msg_result {
|
||||
Ok(msg) => {
|
||||
if app_config.enable_detailed_logs { log::trace!("B->C: {:?}", msg); }
|
||||
if client_ws_tx.send(msg).await.is_err() {
|
||||
if app_config.enable_detailed_logs { log::debug!("Error sending to client, B->C loop breaking."); }
|
||||
break;
|
||||
}
|
||||
backend_to_client_msg_count += 1;
|
||||
}
|
||||
Err(e) => {
|
||||
log::warn!("Error reading from backend WebSocket: {}", e);
|
||||
// Use a closure to capture `e` by reference for the format macro.
|
||||
ws_stats_error.get_or_insert_with(|| { let e_ref = &e; format!("Backend read error: {}", e_ref) });
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Try to close the client sink gracefully if backend read loop ends
|
||||
if app_config.enable_detailed_logs { log::debug!("B->C proxy loop finished. Closing client_ws_tx.");}
|
||||
let _ = client_ws_tx.close().await;
|
||||
};
|
||||
|
||||
// Run both proxy tasks concurrently
|
||||
tokio::join!(client_to_backend_task, backend_to_client_task);
|
||||
|
||||
if app_config.enable_detailed_logs {
|
||||
log::info!("WebSocket proxying ended for {}. Client->Backend: {}, Backend->Client: {}. Error: {:?}",
|
||||
backend_name_for_stats, client_to_backend_msg_count, backend_to_client_msg_count, ws_stats_error);
|
||||
}
|
||||
|
||||
let final_session_duration = connect_to_primary_start_time.elapsed();
|
||||
|
||||
let final_stats = WebSocketStats {
|
||||
backend_name: backend_name_for_stats,
|
||||
error: ws_stats_error,
|
||||
connect_time: final_session_duration,
|
||||
is_active: false, // Session is now over
|
||||
client_to_backend_messages,
|
||||
backend_to_client_messages,
|
||||
};
|
||||
stats_collector.add_websocket_stats(final_stats);
|
||||
}
|
||||
1
berachain-bartio.yml
Symbolic link
1
berachain-bartio.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
berachain/reth/berachain-bartio-reth-archive-trace.yml
|
||||
1
berachain-bepolia-archive.yml
Symbolic link
1
berachain-bepolia-archive.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
berachain/reth/berachain-bepolia-reth-archive-trace.yml
|
||||
1
berachain-mainnet-archive.yml
Symbolic link
1
berachain-mainnet-archive.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
berachain/reth/berachain-mainnet-reth-archive-trace.yml
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user