initial plume

This commit is contained in:
goldsquid
2025-10-03 20:32:14 +07:00
parent 8549223fae
commit 11a67d32c7
3 changed files with 381 additions and 0 deletions

View File

@@ -0,0 +1,188 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/plume-mainnet-nitro-archive-leveldb-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/plume-mainnet-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
plume-mainnet-archive:
image: ${PLUME_NITRO_IMAGE:-offchainlabs/nitro-node}:${PLUME_MAINNET_NITRO_VERSION:-v3.7.4-9244576}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.caching.database-cache=${PLUME_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_DATABASE_CACHE:-2048}
- --execution.caching.snapshot-cache=${PLUME_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_SNAPSHOT_CACHE:-400}
- --execution.caching.trie-clean-cache=${PLUME_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_CLEAN_CACHE:-600}
- --execution.caching.trie-dirty-cache=${PLUME_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_TRIE_DIRTY_CACHE:-1024}
- --execution.forwarding-target=https://rpc-plume-mainnet-1.t.conduit.xyz
- --execution.rpc.gas-cap=5500000000
- --execution.rpc.log-history=0
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das-plume-mainnet-1.t.conduit.xyz
- --node.data-availability.sequencer-inbox-address=0x85eC1b9138a8b9659A51e2b51bb0861901040b59
- --node.feed.input.url=wss://feed-plume-mainnet-1.t.conduit.xyz
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/plume-mainnet-archive
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${PLUME_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-plume-mainnet-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./arb/plume/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.plume-mainnet-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/plume-mainnet-archive
- traefik.http.services.plume-mainnet-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.plume-mainnet-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.plume-mainnet-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.plume-mainnet-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/plume-mainnet-archive`) || Path(`/plume-mainnet-archive/`))}
- ${NO_SSL:+traefik.http.routers.plume-mainnet-nitro-archive-leveldb-hash.rule=Path(`/plume-mainnet-archive`) || Path(`/plume-mainnet-archive/`)}
- traefik.http.routers.plume-mainnet-nitro-archive-leveldb-hash.middlewares=plume-mainnet-nitro-archive-leveldb-hash-stripprefix, ipallowlist
plume-mainnet-archive-relay:
image: ${PLUME_CELESTIA_IMAGE:-}:${PLUME_MAINNET_CELESTIA_VERSION:-}
ports:
- 21272:21272
- 21272:21272/udp
entrypoint: [/bin/celestia-server]
command:
- --celestia.namespace-id="00000d048007a33abfeb"
- --celestia.rpc="${CELESTIA_MAINNET_RPC}"
- --das.enable
- --das.rpc.url="http://plume-mainnet-archive-provider:9880"
- --fallback-enabled
- --log-level="INFO"
- --rpc-addr="0.0.0.0"
- --rpc-port="26657"
restart: unless-stopped
networks:
- chains
volumes:
- ./arb/plume/mainnet:/config
- .jwtsecret:/jwtsecret:ro
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
plume-mainnet-archive-provider:
# same image as the client
image: ${PLUME_NITRO_IMAGE:-offchainlabs/nitro-node}:${PLUME_MAINNET_NITRO_VERSION:-v3.7.4-9244576}
expose:
- 9880
entrypoint: /usr/local/bin/daprovider
command:
- --das-server.addr="0.0.0.0"
- --das-server.data-availability.enable
- --das-server.data-availability.parent-chain-node-url=${PLUME_PLUME_MAINNET_ARCHIVE_EXECUTION_RPC}
- --das-server.data-availability.rest-aggregator.enable
- --das-server.data-availability.rest-aggregator.urls=https://das-plume-mainnet-1.t.conduit.xyz
- --das-server.data-availability.sequencer-inbox-address=0x85eC1b9138a8b9659A51e2b51bb0861901040b59
- --das-server.port="9880"
logging: *logging-defaults
volumes:
plume-mainnet-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: plume
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,192 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/plume-mainnet-nitro-pruned-pebble-path.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/plume-mainnet \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
plume-mainnet:
image: ${PLUME_NITRO_IMAGE:-offchainlabs/nitro-node}:${PLUME_MAINNET_NITRO_VERSION:-v3.7.4-9244576}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=${PLUME_MAINNET_ARCHIVE_DB:-false}
- --execution.caching.database-cache=${PLUME_MAINNET_NITRO_PRUNED_PEBBLE_PATH_DATABASE_CACHE:-2048}
- --execution.caching.snapshot-cache=${PLUME_MAINNET_NITRO_PRUNED_PEBBLE_PATH_SNAPSHOT_CACHE:-400}
- --execution.caching.state-scheme=path
- --execution.caching.trie-clean-cache=${PLUME_MAINNET_NITRO_PRUNED_PEBBLE_PATH_TRIE_CLEAN_CACHE:-600}
- --execution.caching.trie-dirty-cache=${PLUME_MAINNET_NITRO_PRUNED_PEBBLE_PATH_TRIE_DIRTY_CACHE:-1024}
- --execution.forwarding-target=https://rpc-plume-mainnet-1.t.conduit.xyz
- --execution.rpc.gas-cap=5500000000
- --execution.rpc.state-scheme=path
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das-plume-mainnet-1.t.conduit.xyz
- --node.data-availability.sequencer-inbox-address=0x85eC1b9138a8b9659A51e2b51bb0861901040b59
- --node.feed.input.url=wss://feed-plume-mainnet-1.t.conduit.xyz
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/plume-mainnet
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${PLUME_MAINNET_NITRO_PRUNED_PEBBLE_PATH_DATA:-plume-mainnet-nitro-pruned-pebble-path}:/root/.arbitrum
- ./arb/plume/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.plume-mainnet-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/plume-mainnet
- traefik.http.services.plume-mainnet-nitro-pruned-pebble-path.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.plume-mainnet-nitro-pruned-pebble-path.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.plume-mainnet-nitro-pruned-pebble-path.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.plume-mainnet-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && (Path(`/plume-mainnet`) || Path(`/plume-mainnet/`))}
- ${NO_SSL:+traefik.http.routers.plume-mainnet-nitro-pruned-pebble-path.rule=Path(`/plume-mainnet`) || Path(`/plume-mainnet/`)}
- traefik.http.routers.plume-mainnet-nitro-pruned-pebble-path.middlewares=plume-mainnet-nitro-pruned-pebble-path-stripprefix, ipallowlist
plume-mainnet-relay:
image: ${PLUME_CELESTIA_IMAGE:-}:${PLUME_MAINNET_CELESTIA_VERSION:-}
ports:
- 20604:20604
- 20604:20604/udp
entrypoint: [/bin/celestia-server]
command:
- --celestia.namespace-id="00000d048007a33abfeb"
- --celestia.rpc="${CELESTIA_MAINNET_RPC}"
- --das.enable
- --das.rpc.url="http://plume-mainnet-provider:9880"
- --fallback-enabled
- --log-level="INFO"
- --rpc-addr="0.0.0.0"
- --rpc-port="26657"
restart: unless-stopped
networks:
- chains
volumes:
- ./arb/plume/mainnet:/config
- .jwtsecret:/jwtsecret:ro
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
plume-mainnet-provider:
# same image as the client
image: ${PLUME_NITRO_IMAGE:-offchainlabs/nitro-node}:${PLUME_MAINNET_NITRO_VERSION:-v3.7.4-9244576}
expose:
- 9880
entrypoint: /usr/local/bin/daprovider
command:
- --das-server.addr="0.0.0.0"
- --das-server.data-availability.enable
- --das-server.data-availability.parent-chain-node-url=${PLUME_PLUME_MAINNET_EXECUTION_RPC}
- --das-server.data-availability.rest-aggregator.enable
- --das-server.data-availability.rest-aggregator.urls=https://das-plume-mainnet-1.t.conduit.xyz
- --das-server.data-availability.sequencer-inbox-address=0x85eC1b9138a8b9659A51e2b51bb0861901040b59
- --das-server.port="9880"
logging: *logging-defaults
volumes:
plume-mainnet-nitro-pruned-pebble-path:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: plume
method-groups:
enabled:
- debug
- filter
methods:
disabled:
# not compatible with path state scheme
- name: debug_traceBlockByHash
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...