228 Commits

Author SHA1 Message Date
cventastic
96c90dee4b only touch setupdone for pokt bootstrap if tar command succeeds 2022-04-14 18:40:18 +02:00
cventastic
121c44446c add old snapshot 2022-04-14 18:34:43 +02:00
cventastic
6785eec7c6 use different ports 2022-04-14 18:20:24 +02:00
cventastic
a5aacfd4d9 add bootstrap... 2022-04-14 18:18:20 +02:00
cventastic
3859c921ac add mainnet volume 2022-04-14 18:10:46 +02:00
cventastic
7b0dae1272 moved volumes from env to volumes :/ 2022-04-14 18:07:39 +02:00
cventastic
34d27c0bfb pokt-test preparation for beta-update 2022-04-14 18:06:13 +02:00
cventastic
5f3f9cd471 pokt-test preparation for beta-update 2022-04-14 17:57:22 +02:00
cventastic
82fee8d3d6 wrong handling of variables 2022-04-07 13:47:41 +02:00
cventastic
f610486fcb missing space 2022-04-07 13:42:11 +02:00
cventastic
367fe7e355 deleted example config, added polygon healthcheck 2022-04-07 13:37:54 +02:00
cventastic
1f6e128144 added bsc
removed merge notation in prometheus-lt-1.yml
2022-03-31 13:09:45 +02:00
cventastic
bfe6db1eda put timeout from 30 to 60s 2022-03-25 11:41:38 +01:00
cventastic
ee90c39c29 changed erigon for geth on another backend 2022-03-25 11:35:41 +01:00
cventastic
f960e2a87c added polygon 2022-03-24 17:01:12 +01:00
czarly
d6264c93bd merge 2022-02-13 21:50:23 +00:00
czarly
2c8788a373 combined haproxy with the lt config 2022-02-13 21:44:05 +00:00
Your Name
1c81b55c1b missing haproxy config 2022-02-13 21:32:07 +01:00
Your Name
b202565599 missing haproxy config 2022-02-13 21:32:07 +01:00
Your Name
68d4f07d67 refactor the compose files in modules and start to generate prometheus scrape targets. added haproxy. 2022-02-13 20:49:01 +01:00
Your Name
51c432ae78 refactor the compose files in modules and start to generate prometheus scrape targets. added haproxy. 2022-02-13 20:49:01 +01:00
cventastic
34ad7988b4 added healthcheck to mon-client 2022-02-11 11:34:30 +01:00
cventastic
3eda933c49 added healthcheck wireguard 2022-02-11 11:30:31 +01:00
cventastic
91f49a9fef add datasource rpc-de-02 2022-02-11 11:02:38 +01:00
cventastic
6bd308e0f9 removed blank line ropsten-healthcheck.sh
removed Authentication, not available anymore. using whitelist now...
2022-02-10 09:49:53 +01:00
cventastic
4d129c0e46 removed space in filename.... 2022-02-10 09:40:17 +01:00
cventastic
c1b3cc99ad re-add haproxy 2022-02-10 09:33:05 +01:00
cventastic
79ab34fc17 cant put network in 2022-02-03 17:36:26 +01:00
cventastic
b3dd63d00a added networks. dont check this out on rpc-de-1 xD 2022-02-03 17:34:10 +01:00
cventastic
d9369f4f9a added datasource for pokt 2022-01-30 10:19:33 +01:00
cventastic
42f9a262c9 🤦 2022-01-30 10:11:31 +01:00
cventastic
f83ed0170f changed names added. pokt-lt-1 metrics 2022-01-30 10:05:30 +01:00
cventastic
a27a6be96f changed names. added datasources 2022-01-30 09:33:44 +01:00
cventastic
6296e14282 i already established thats not working... forgot it for rpc. 2022-01-30 09:07:27 +01:00
cventastic
14483ea8a2 geth-mainnet doesnt exist anymore. forgot to delete volume 2022-01-30 08:54:59 +01:00
cventastic
d8d5c6e436 no bsc here 2022-01-30 08:27:14 +01:00
cventastic
b359aec426 changed harmony volume name for style reasons 2022-01-30 08:16:38 +01:00
cventastic
05f4e2acb7 wrong env 2022-01-30 05:11:48 +01:00
cventastic
e21e48b8eb use bigger and better nodeexporter dashboard. 2022-01-27 20:42:22 +01:00
cventastic
a734ac1171 doesnt seem to work 2022-01-27 20:07:38 +01:00
cventastic
4f422e48a1 changed port... double usage... 2022-01-27 19:57:28 +01:00
cventastic
1331b184d5 wrong volume name 2022-01-27 19:50:07 +01:00
cventastic
ff3e1b47c8 added 4 lithuanian pokt nodes.
we will have to use --scale if that goes on in this speed.
because docker-files don't scale.
added docker-compose.pokt-lt-01.yml
2022-01-27 18:59:13 +01:00
cventastic
2cf4bac577 dont need pushgateway atm maybe later.. 2022-01-27 18:09:15 +01:00
cventastic
d29348e5fc dont need pushgateway atm maybe later.. 2022-01-27 15:55:48 +01:00
cventastic
b9c9379ba1 added harmony-rpc bootstrap 2022-01-27 03:05:40 +01:00
cventastic
893ec05e73 add metrics port for prometheus 2022-01-25 17:13:43 +01:00
cventastic
67b0026430 add tcp ports for harmony to outside world 2022-01-25 17:08:29 +01:00
cventastic
4608233b1e added hmy cli tool 2022-01-25 16:06:44 +01:00
cventastic
05f67d7e59 we dont need geth-mainnet anymore 2022-01-25 13:55:09 +01:00
cventastic
c81945a6eb update README_POKT.md 2022-01-25 13:10:22 +01:00
cventastic
127fcd1d6f no bash just shell 2022-01-24 12:04:45 +01:00
cventastic
888251469f new dashboards 2022-01-24 11:03:31 +01:00
cventastic
d3b529bc58 new dashboards 2022-01-24 11:03:08 +01:00
cventastic
dac6a5c0eb Docs say: --prometheus enable HTTP / Prometheus requests (default true) 2022-01-24 10:36:27 +01:00
cventastic
05554dedbf fix underscore 2022-01-24 10:29:43 +01:00
cventastic
b71bbcd45d add prometheus_harmony.yml 2022-01-24 10:25:36 +01:00
cventastic
83ef8d47ad fix harmony.sh RUN command
update README_HARMONY.md
enable prometheus for harmony.sh
2022-01-24 10:25:02 +01:00
cventastic
32e474347f 🤦 2022-01-24 09:45:46 +01:00
cventastic
47667f1aeb bootstrap debugging 2022-01-24 08:47:48 +01:00
cventastic
297dd6dddb bootstrap debugging 2022-01-24 08:38:31 +01:00
cventastic
126daf767f create folders 2022-01-24 08:34:51 +01:00
cventastic
6093e279ea uncomment download in harmony.sh after manual move 2022-01-24 08:30:11 +01:00
cventastic
7df451b58c uncomment download in harmony.sh after manual move 2022-01-24 08:24:15 +01:00
cventastic
25cf170ed6 fix bootstrap skript harmony.sh. fix volumes of harmony docker file 2022-01-24 08:20:32 +01:00
cventastic
7eb466e383 fix README_POKT.md 2022-01-23 22:44:18 +01:00
cventastic
59cdc5eba2 added pokt/config.json to prepare.sh 2022-01-23 21:15:18 +01:00
cventastic
7f25960c5a added pokt config with metrics enabled... 2022-01-23 21:11:11 +01:00
cventastic
e6098398c6 added tendermint metrics 2022-01-23 19:46:34 +01:00
cventastic
d27fc8d269 update README.md 2022-01-23 16:58:18 +01:00
cventastic
7ea656b6c2 cat/EOF doesnt like to be inside if statement 2022-01-23 16:53:34 +01:00
cventastic
1d272cdc75 forgot to add script 2022-01-23 16:47:36 +01:00
cventastic
1713f8b759 added bootstrap to harmony 2022-01-23 16:47:08 +01:00
cventastic
3c8c624c10 shard 1 and no persisten bootnodes, let harmony choose 2022-01-23 15:13:45 +01:00
cventastic
6b18040473 remove comment 2022-01-23 14:07:38 +01:00
cventastic
4bff8010e6 added harmony validator compose-file 2022-01-23 13:34:01 +01:00
cventastic
c99af9a7ca hide query path. good i found out before someone else did :O 2022-01-22 09:41:33 +01:00
cventastic
ed8188db80 why everyone does their own regexp 2022-01-22 09:29:02 +01:00
cventastic
f311c54be4 syn 2022-01-22 08:26:08 +01:00
cventastic
4cd11a6c79 fix ) 2022-01-22 08:03:22 +01:00
cventastic
c5a77a8e3a hide query path 2022-01-22 07:50:29 +01:00
cventastic
2f4178daba sort chains, update readme, rename mon-compose files 2022-01-19 00:26:36 +01:00
cventastic
69770762f3 fix prometheus.yml 2022-01-15 03:11:32 +01:00
cventastic
b9ab440ef4 traefik metrics 2022-01-13 23:48:12 +01:00
cventastic
5639cf70b3 added metrics for traefik 2022-01-13 23:38:40 +01:00
cventastic
749e9ccc22 fix variable name 2022-01-13 23:12:33 +01:00
cventastic
9718b6645e Merge branch 'main' of github.com:cventastic/POKT_DOKT 2022-01-13 23:07:53 +01:00
cventastic
cc926af991 added traefik metrics and more chains 2022-01-13 23:07:39 +01:00
Sebastian
db7e7a51b5 open avalanche ports and revert version pinning 2022-01-13 19:53:02 +01:00
Sebastian
3f3e19a675 open avalanche ports and revert version pinning 2022-01-13 19:51:18 +01:00
Sebastian
a4faf06b74 open avalanche ports and revert version pinning 2022-01-13 19:50:33 +01:00
Sebastian
1b14565870 Merge branch 'main' of github.com:cventastic/POKT_DOKT into main 2022-01-13 19:32:15 +01:00
Sebastian
2b24b182c8 pin avax version 2022-01-13 19:32:08 +01:00
cventastic
019e9db840 fix env 2022-01-13 18:38:59 +01:00
cventastic
91cec100f9 added scond pokt node 2022-01-13 18:27:01 +01:00
cventastic
06a09c9dfb second node on iokula 2022-01-13 18:21:09 +01:00
cventastic
fbf7c7406d added pokt monitoring for earnings. work in progress 2022-01-13 17:33:27 +01:00
cventastic
3bccfa038e not working 2022-01-12 19:19:42 +01:00
cventastic
e07f2359dd fix script 2022-01-12 19:06:08 +01:00
cventastic
54558d6e5a add erigon, simple stupid alarming 2022-01-12 19:00:12 +01:00
cventastic
b1dfc66c5f new xdai.json 2022-01-12 17:09:20 +01:00
Sebastian
d61180a5cd fix 2022-01-12 14:53:24 +01:00
Sebastian
23ee7b3fe4 added fuse 2022-01-12 14:38:45 +01:00
Sebastian
be9a775de5 added bsc to the compose file 2022-01-12 14:01:46 +01:00
Sebastian
e5fd725e3e fix avalanche volume mount point 2022-01-12 08:26:11 +01:00
cventastic
258e67da0b updated readme 2022-01-11 15:04:19 +01:00
cventastic
93c4f95ac8 fix harmony test 2022-01-11 15:00:52 +01:00
cventastic
099f6347c7 added harmony 2022-01-11 14:57:37 +01:00
cventastic
478b9bba91 funzt nicht 2022-01-11 11:02:43 +01:00
cventastic
4af8f8fb51 added filebased logging 2022-01-11 10:59:15 +01:00
cventastic
5cb05a7a2c wg0.conf file needs some other kind of deployment, maybe ansible? 2022-01-11 10:42:34 +01:00
cventastic
d807c66150 added harmony to relay 2022-01-11 10:37:43 +01:00
cventastic
b3b08b22b3 add wg0.conf otherwise a folder gets created?! 2022-01-10 16:02:31 +01:00
cventastic
ccedf8d7e7 added iokula pokt node 2022-01-10 15:53:11 +01:00
cventastic
d56b59e47a removed fuse 2022-01-10 12:07:04 +01:00
cventastic
e896bc69be add harmony example rpc request 2022-01-10 12:00:16 +01:00
cventastic
8ec9a3156b added correct datadir to hrmny 2022-01-05 11:57:59 +01:00
cventastic
ded282998c update readme, update testnet-relay domain variable 2022-01-04 17:02:10 +01:00
cventastic
86b739b61b grafana new version for alerting 2022-01-03 16:45:19 +01:00
cventastic
3c51faa5d4 added variable, deleted redundant stuff 2022-01-03 14:10:18 +01:00
cventastic
2076f71dd5 added to dashboards 2022-01-03 13:48:09 +01:00
cventastic
af666cbdd7 datasource needed. variable not working as expected :O 2022-01-03 13:24:22 +01:00
cventastic
340e443f0e Merge branch 'main' of github.com:cventastic/POKT_DOKT 2022-01-03 13:17:08 +01:00
cventastic
30b0fa26be leave datasource empty for variable 2022-01-03 13:17:01 +01:00
Sebastian
5073933208 Merge branch 'main' of github.com:cventastic/POKT_DOKT into main 2022-01-03 13:08:55 +01:00
Sebastian
7c0b95e47f add fuse network 2022-01-03 13:08:49 +01:00
cventastic
6206373f04 edited datasource 2022-01-03 12:59:39 +01:00
cventastic
da9569ba97 test old files 2022-01-03 12:54:11 +01:00
cventastic
d2fd3e8a2e newline for yml 2022-01-03 12:47:57 +01:00
cventastic
6e8be0bee5 Merge branch 'main' of github.com:cventastic/POKT_DOKT 2022-01-03 12:33:07 +01:00
cventastic
1ec1148786 deleted redundant grafana dashboard 2022-01-03 12:33:03 +01:00
Sebastian
5e63e8589b Merge branch 'main' of github.com:cventastic/POKT_DOKT into main 2022-01-03 12:32:07 +01:00
Sebastian
4904b09f8c make xdai p2p sync port available 2022-01-03 12:32:00 +01:00
cventastic
9202948739 specific path not working 2022-01-03 12:29:51 +01:00
cventastic
c57a8396c9 try out specific path 2022-01-03 12:27:45 +01:00
Sebastian
8dcae76a80 Merge branch 'main' of github.com:cventastic/POKT_DOKT into main 2022-01-03 12:26:28 +01:00
Sebastian
33c9df931c add xdai 2022-01-03 12:26:21 +01:00
cventastic
39ed18953e updated readme 2022-01-03 12:07:44 +01:00
cventastic
43c8ca8872 Merge branch 'main' of github.com:cventastic/POKT_DOKT 2022-01-03 12:03:35 +01:00
cventastic
df007ec024 added pokt-de-1 to monitoring 2022-01-03 12:03:20 +01:00
Sebastian
70bd91277b remove the kernel options that do not work 2022-01-03 11:59:51 +01:00
Sebastian
c5cbde2d42 Merge branch 'main' of github.com:cventastic/POKT_DOKT into main 2022-01-03 11:53:37 +01:00
Sebastian
8cb58bc0cf limits to the harmony container 2022-01-03 11:53:33 +01:00
cventastic
591f305077 changed datasource for pokt-test 2022-01-03 11:52:50 +01:00
cventastic
d4dd7c1049 added test-pokt to grafana 2022-01-03 11:41:10 +01:00
cventastic
324f92294f test provider name change 2022-01-03 11:30:40 +01:00
Sebastian
472b208ad3 but now it's done 2022-01-03 11:29:33 +01:00
Sebastian
a711631f84 but now 2022-01-03 11:17:07 +01:00
Sebastian
f5a7bf912e this is it 2022-01-03 11:15:04 +01:00
Sebastian
d202032ee1 Merge branch 'main' of github.com:cventastic/POKT_DOKT into main 2022-01-03 11:07:07 +01:00
Sebastian
f20640c36e yo fix 2022-01-03 11:06:59 +01:00
cventastic
496136edeb Merge branch 'main' of github.com:cventastic/POKT_DOKT 2022-01-03 10:52:53 +01:00
cventastic
351246021b added traefik to pokt-test 2022-01-03 10:52:39 +01:00
Sebastian
9120c4b029 added harmony to the compose file 2022-01-03 10:46:17 +01:00
cventastic
b1e7d3b6b5 remove basicauth 2022-01-03 10:10:35 +01:00
cventastic
901694f4a6 fix typo 2021-12-31 01:13:08 +01:00
cventastic
62edb08bd6 added basic auth 2021-12-31 00:58:14 +01:00
cventastic
eacf6535b2 added traefik 2021-12-30 23:19:47 +01:00
cventastic
6491889c1b ident 2021-12-30 03:32:24 +01:00
cventastic
aeb6e72139 ident 2021-12-30 03:30:57 +01:00
cventastic
a521e0c258 ident 2021-12-30 03:27:23 +01:00
cventastic
e12f2267f5 ident 2021-12-30 03:24:27 +01:00
cventastic
44f277b77d mon 2021-12-30 03:22:09 +01:00
cventastic
510f7db452 mon 2021-12-30 03:19:15 +01:00
cventastic
012c6b6c9e mon 2021-12-30 03:10:26 +01:00
cventastic
a28bacd70d mon 2021-12-30 03:08:26 +01:00
cventastic
7200666d17 mon 2021-12-30 02:27:18 +01:00
cventastic
2d51371e40 mon 2021-12-30 02:24:25 +01:00
cventastic
baa59c67b9 mon 2021-12-30 02:08:35 +01:00
cventastic
0d0e15ee22 mon 2021-12-30 02:04:48 +01:00
cventastic
79599d9ae6 mon 2021-12-30 01:56:02 +01:00
cventastic
6e41db128a mon 2021-12-30 01:37:59 +01:00
cventastic
59addb79f4 mon 2021-12-30 01:11:31 +01:00
cventastic
bf367928ec mon 2021-12-30 01:07:05 +01:00
cventastic
7638132e52 mon 2021-12-30 01:00:10 +01:00
cventastic
2b6bc0909f mon 2021-12-30 00:59:20 +01:00
cventastic
e632c866b0 mon 2021-12-30 00:56:30 +01:00
cventastic
82093d60e8 mon 2021-12-30 00:45:00 +01:00
cventastic
d05a5e51ed mon 2021-12-30 00:37:40 +01:00
cventastic
5e10b5b69f mon 2021-12-30 00:26:38 +01:00
cventastic
cedc18d11d mon 2021-12-30 00:06:31 +01:00
cventastic
81b375a398 added metrics to chains 2021-12-29 23:55:08 +01:00
cventastic
e55eb4b439 mon 2021-12-29 23:08:35 +01:00
cventastic
a2a0d2ba8b mon 2021-12-29 23:02:30 +01:00
cventastic
763ce51530 mon 2021-12-29 22:59:30 +01:00
cventastic
47d2863f1e mon 2021-12-29 22:57:47 +01:00
cventastic
c69a6bf08b mon 2021-12-29 22:38:13 +01:00
cventastic
84236b8de7 sed back file 2021-12-29 21:58:58 +01:00
cventastic
c36e81aaeb mon 2021-12-29 21:58:16 +01:00
cventastic
2b3cb73b48 fix path issue 2021-12-29 20:49:41 +01:00
cventastic
4c05514207 monitoring 2021-12-29 20:37:01 +01:00
cventastic
1210ec6cf8 invisible skeleton dirs :O 2021-12-29 19:09:19 +01:00
cventastic
7050872fc8 whitespaces :O 2021-12-29 19:01:16 +01:00
cventastic
c0ea42376d added pokt query mainnet 2021-12-29 16:51:44 +01:00
cventastic
3788c55941 added pokt query 2021-12-29 16:47:07 +01:00
cventastic
e8d7fe3c0d homogen ports on stake servers 2021-12-29 16:39:02 +01:00
cventastic
0c6a55b773 relay.sh 2021-12-29 16:34:22 +01:00
cventastic
751f3db3cb no traefic for staking node for now 2021-12-29 14:55:04 +01:00
cventastic
ba1b091257 refactor 2021-12-29 14:49:35 +01:00
cventastic
619b8245cd added right shebang 2021-12-29 02:27:32 +01:00
cventastic
0747c10839 put whitelist to services 2021-12-29 01:39:48 +01:00
cventastic
cf195e161d workaround 2021-12-28 17:02:03 +01:00
cventastic
27392854d4 permissions for multiple runs 2021-12-28 16:48:10 +01:00
cventastic
a0f6ed6dc5 added traefik to testnetstaker 2021-12-28 15:55:58 +01:00
cventastic
8865df89e5 update README.md 2021-12-28 15:33:03 +01:00
cventastic
f43985edb9 update README.md 2021-12-28 14:56:02 +01:00
cventastic
e901b88030 no infile substitution 2021-12-28 14:45:41 +01:00
cventastic
a407f96e11 rename 2021-12-28 14:40:24 +01:00
cventastic
2c03e48d48 rename 2021-12-28 14:39:25 +01:00
cventastic
a932bd8e3b changed env to bash 2021-12-28 14:15:15 +01:00
cventastic
3b25d87098 make script executable 2021-12-28 14:14:43 +01:00
cventastic
a732bac611 changed path 2021-12-28 14:12:36 +01:00
cventastic
2607864a10 addded preparation skript 2021-12-28 14:04:11 +01:00
cventastic
9d5f4298c3 added relaychains for mainnet 2021-12-28 13:15:15 +01:00
cventastic
d32a161681 setting relaymode for front nodes 2021-12-28 12:21:52 +01:00
cventastic
53753a8c8c rpc node no relay 2021-12-28 12:14:34 +01:00
cventastic
723979e176 open p2p 2021-12-28 11:45:37 +01:00
cventastic
565a765c71 added new chains for testnet 2021-12-27 12:56:30 +01:00
cventastic
050d82021c whitelist fixed 2021-12-27 12:14:32 +01:00
cventastic
605321e12c ??? 2021-12-27 12:02:48 +01:00
cventastic
1afae2e6e6 reorg middleware 2021-12-27 11:57:45 +01:00
cventastic
59a7c17436 add ip whitelist 2021-12-27 11:45:47 +01:00
cventastic
32478b158f change hostip vs domain 2021-12-27 11:39:23 +01:00
cventastic
8b00b96450 add ip whitelist 2021-12-27 11:36:06 +01:00
cventastic
ea10406e71 add ip whitelist 2021-12-27 11:21:31 +01:00
cventastic
a418d0e1e2 remove more networks 2021-12-27 11:13:17 +01:00
cventastic
b7e2f76e58 Merge pull request #2 from cventastic/split
Split
2021-12-27 10:10:24 +00:00
cventastic
7c1e67b70d Merge pull request #1 from cventastic/split
Split
2021-12-27 09:52:17 +00:00
782 changed files with 3151 additions and 563394 deletions

1
.gitignore vendored
View File

@@ -1 +0,0 @@
.env

355
README.md
View File

@@ -1,353 +1,4 @@
# Blockchain Node Configurations
### Docs
This directory contains Docker Compose configurations for various blockchain networks and node implementations.
## Directory Structure
- Root level YAML files (e.g. `ethereum-mainnet.yml`, `arbitrum-one.yml`) - Main Docker Compose configurations for specific networks
- Network-specific subdirectories - Contain additional configurations, genesis files, and client-specific implementations
- Utility scripts (e.g. `show-networks.sh`, `logs.sh`) - Helper scripts for managing and monitoring nodes
## Node Types
This repository supports multiple node types for various blockchain networks:
- **Ethereum networks**: Mainnet, Sepolia, Holesky
- **Layer 2 networks**: Arbitrum, Optimism, Base, Scroll, ZKSync Era, etc.
- **Alternative L1 networks**: Avalanche, BSC, Fantom, Polygon, etc.
Most networks have both archive and pruned node configurations available, with support for different client implementations (Geth, Erigon, Reth, etc.).
## Quick Start
1. Create a `.env` file in this directory (see example below)
2. Select which node configurations you want to run by adding them to the `COMPOSE_FILE` variable
3. Run `docker compose up -d`
4. Access your RPC endpoints at `https://yourdomain.tld/path` or `http://localhost:port`
### Example .env File
```bash
# Domain settings
DOMAIN=203-0-113-42.traefik.me # Use your PUBLIC IP with dots replaced by hyphens
MAIL=your-email@example.com # Required for Let's Encrypt SSL
WHITELIST=0.0.0.0/0 # IP whitelist for access (0.0.0.0/0 allows all)
# Public IP (required for many chains)
IP=203.0.113.42 # Your PUBLIC IP (get it with: curl ipinfo.io/ip)
# Network settings
CHAINS_SUBNET=192.168.0.0/26
# RPC provider endpoints (fallback/bootstrap nodes)
ETHEREUM_MAINNET_EXECUTION_RPC=https://ethereum-rpc.publicnode.com
ETHEREUM_MAINNET_EXECUTION_WS=wss://ethereum-rpc.publicnode.com
ETHEREUM_MAINNET_BEACON_REST=https://ethereum-beacon-api.publicnode.com
ETHEREUM_SEPOLIA_EXECUTION_RPC=https://ethereum-sepolia-rpc.publicnode.com
ETHEREUM_SEPOLIA_EXECUTION_WS=wss://ethereum-sepolia-rpc.publicnode.com
ETHEREUM_SEPOLIA_BEACON_REST=https://ethereum-sepolia-beacon-api.publicnode.com
ARBITRUM_SEPOLIA_EXECUTION_RPC=https://arbitrum-sepolia-rpc.publicnode.com
ARBITRUM_SEPOLIA_EXECUTION_WS=wss://arbitrum-sepolia-rpc.publicnode.com
# SSL settings (set NO_SSL=true to disable SSL)
# NO_SSL=true
# Docker Compose configuration
# Always include base.yml and rpc.yml, then add the networks you want
COMPOSE_FILE=base.yml:rpc.yml:ethereum-mainnet.yml
```
## Usage
To start nodes defined in your `.env` file:
```bash
docker compose up -d
```
### Ports
The default ports are defined in the templates. They are randomised to avoid conflicts. Some configurations can require 7 ports to be opened for P2P discovery. Docker will override any UFW firewall rule that you define on the host. You should prevent the containers to try to reach out to other nodes on local IP ranges.
You can use the following service definition as a starting point. Replace the {{ chains_subnet }} with the subnet of your network. Default is 192.168.0.0/26.
```
[Unit]
Description= iptables firewall docker fix
After=docker.service
[Service]
ExecStart=/usr/local/bin/iptables-firewall.sh start
RemainAfterExit=true
StandardOutput=journal
[Install]
WantedBy=multi-user.target
```
```bash
#!/bin/bash
PATH="/sbin:/usr/sbin:/bin:/usr/bin"
# Flush existing rules in the DOCKER-USER chain
# this is potentially dangerous if other scripts write in that chain too but for now this should be the only one
iptables -F DOCKER-USER
# block heise.de to test it's working. ./ping.sh heise.de will ping from a container in the subnet.
iptables -I DOCKER-USER -s {{ chains_subnet }} -d 193.99.144.80/32 -j REJECT
# block local networks
iptables -I DOCKER-USER -s {{ chains_subnet }} -d 192.168.0.0/16 -j REJECT
iptables -I DOCKER-USER -s {{ chains_subnet }} -d 172.16.0.0/12 -j REJECT
iptables -I DOCKER-USER -s {{ chains_subnet }} -d 10.0.0.0/8 -j REJECT
# accept the subnet so containers can reach each other.
iptables -I DOCKER-USER -s {{ chains_subnet }} -d {{ chains_subnet }} -j ACCEPT
# I don't know why that is
iptables -I DOCKER-USER -s {{ chains_subnet }} -d 10.13.13.0/24 -j ACCEPT
```
### Node Structure
In general Nodes can have one or all of the following components:
- a client (execution layer)
- a node (for consensus)
- a relay (for data availability access)
- a database (external to the client mostly zk rollups, can have mulitple databases)
- a proxy (to map http access and websockets to the same endpoint)
The simplest examples have only a client. The compose files define one entrypoint to query the node. usually it's the client otherwise it's the proxy. some clients have multiple entrypoints because they allow to query the consensus layer and the execution layer.
In the root folder of this repository you can find convenience yml files which are symlinks to specific compose files. The naming for the symlinks follow the principle {network_name}-{chain_name}.yml which leaves the client and bd type unspecified so they are defaults.
### Syncing
The configurations aim to work standalone restoring state as much as possible from public sources. Using snapshots can help syncing faster. For some configurations it's not reasonably possible to maintain a version that can be bootstrapped from scratch using only the compose file.
### Naming conventions
- default client is the default client for the network. Usually it's geth or op-geth.
- default sync mode is pruned. If available clients are snap synced.
- default node is op-node or prysm or whatever is the default for the network (e.g. beacon-kit for berachain, goat for goat, etc.)
- default sync mode for nodes is pruned
- default client for archive nodes is (op-)erigon or (op-)reth
- default sync mode for (op-)reth and (op-)erigon is archive-trace.
- default sync mode for erigon3 is pruned-trace.
- default db is postgres
- default proxy is nginx
#### Node features
The idea is to assume a default node configuration that is able to drive the execution client. In case the beacon node database has special features then the file name would include the features after a double hyphen. e.g. `ethereum-mainnet-geth-pruned-pebble-hash--lighthouse-pruned-blobs.yml` would be a node that has a pruned execution client and a pruned beacon node database with a complete blob history.
#### Container names
The docker containers are generally named using the base name and the component suffix. The base name is generally the network name and the chain name and the sync mode archive in case of archive nodes. The rationale is that it doesn't make sense to run 2 pruned nodes for the same chain on the same machine as well as 2 archive nodes for the same chain. The volumes that are created in /var/lib/docker/volumes are using the full name of the node including the sync mode and database features. This is to allow switching out the implementation of parts of the configuration and not causing conflicts, e.g. exchanging prysm for nimbus as node implementation but keep using the same exection client. Environment variables are also using the full name of the component that they are defined for.
## Utility Scripts
This directory includes several useful scripts to help you manage and monitor your nodes:
### Status and Monitoring
- `show-status.sh [config-name]` - Check sync status of all configured nodes (or specific config if provided)
- `show-db-size.sh` - Display disk usage of all Docker volumes, sorted by size
- `show-networks.sh` - List all available network configurations
- `show-running.sh` - List currently running containers
- `sync-status.sh <config-name>` - Check synchronization status of a specific configuration
- `logs.sh <config-name>` - View logs of all containers for a specific configuration
- `latest.sh <config-name>` - Get the latest block number and hash of a local node
- `ping.sh <container-name>` - Test connectivity to a container from inside the Docker network
### Node Management
- `stop.sh <config-name>` - Stop all containers for a specific configuration
- `force-recreate.sh <config-name>` - Force recreate all containers for a specific configuration
- `backup-node.sh <config-name> [webdav_url]` - Backup Docker volumes for a configuration (locally or to WebDAV)
- `restore-volumes.sh <config-name> [http_url]` - Restore Docker volumes from backup (local or HTTP source)
- `cleanup-backups.sh` - Clean up old backup files
- `list-backups.sh` - List available backup files
- `op-wheel.sh` - Tool for Optimism rollup maintenance, including rewinding to a specific block
Note: `<config-name>` refers to the compose file name without the .yml extension (e.g., `ethereum-mainnet` for ethereum-mainnet.yml)
#### Nuclear option to recreate a node
```bash
./stop.sh <config-name> && ./rm.sh <config-name> && ./delete-volumes.sh <config-name> && ./force-recreate.sh <config-name> && ./logs.sh <config-name>
```
#### Debugging tips
To get the configuration name for one of the commands use `./show-status.sh` which lists all the configrations and their status to copy paste for further inspection with e.g. `./catchup.sh <config-name>` or repeated use of `./latest.sh <config-name>` which will give you and idea if the sync is actually progressing and if it is on the canonical chain.
Note: some configurations use staged sync which means that there is no measurable progress on the RPC in between bacthes of processed blocks. In any case `./logs.sh <config-name>` will give you insights into problems, potentially filtered by a LLM to spot common errors. It could be that clients are syncing slower than the chain progresses.
#### Further automation
You can chain `./success-if-almost-synced.sh <config-name> <age-of-last-block-in-seconds-to-be-considered-almost-synced>` with other scripts to create more complex automation, e.g. notify you once a node synced up to chainhead or adding the node to the dshackle configuration or taking a backup to clone the node to a different server.
#### OP Wheel Usage Example
Be aware that this is dangerous because you skip every check for your rollups op-geth execution client database to be consistent.
```bash
# Rewind an Optimism rollup to a specific block
./op-wheel.sh engine set-forkchoice --unsafe=0x111AC7F --safe=0x111AC7F --finalized=0x111AC7F \
--engine=http://op-lisk-sepolia:8551/ --engine.open=http://op-lisk-sepolia:8545 \
--engine.jwt-secret-path=/jwtsecret
```
Nuclear option:
```bash
# Finalize the latest locally available block of an Optimism rollup
./op-wheel-finalize-latest-block.sh <client_service_name> (<node_service_name>)
```
Where `<client_service_name>` is the name of the client service in the compose file and `<node_service_name>` is the name of the node service in the compose file which defaults to `<client_service_name>-node`.
## SSL Certificates and IP Configuration
### Public IP Configuration
Many blockchain nodes require your public IP address to function properly:
1. Get your public IP address:
```bash
curl ipinfo.io/ip
```
2. Add this IP to your `.env` file:
```bash
IP=203.0.113.42 # Your actual public IP
```
3. This IP is used by several chains for P2P discovery and network communication
### SSL Certificates with Traefik
This system uses Traefik as a reverse proxy for SSL certificates:
1. By default, certificates are obtained from Let's Encrypt
2. Use your **public** IP address with traefik.me by replacing dots with hyphens
```
# If your public IP is 203.0.113.42
DOMAIN=203-0-113-42.traefik.me
```
3. Traefik.me automatically generates valid SSL certificates for this domain
4. For production, use your own domain and set MAIL for Let's Encrypt notifications
5. To disable SSL, set `NO_SSL=true` in your .env file
## Configuration
Each network configuration includes:
- Node client software (Geth, Erigon, etc.)
- Synchronization type (archive or pruned)
- Database backend and configuration
- Network-specific parameters
## Accessing RPC Endpoints
Once your nodes are running, you can access the RPC endpoints at:
- HTTPS: `https://yourdomain.tld/ethereum` (or other network paths)
- HTTP: `http://yourdomain.tld/ethereum` (or other network paths)
- WebSocket: `wss://yourdomain.tld/ethereum` (same URL as HTTP/HTTPS)
All services use standard ports (80 for HTTP, 443 for HTTPS), so no port specification is required in the URL.
## Resource Requirements
Different node types have different hardware requirements:
- Pruned Ethereum node: ~500GB disk, 8GB RAM
- Archive Ethereum node: ~2TB disk, 16GB RAM
- L2 nodes typically require less resources than L1 nodes
- Consider using SSD or NVMe storage for better performance
## DRPC Integration
This system includes support for DRPC (Decentralized RPC) integration, allowing you to monetize your RPC nodes by selling excess capacity:
### Setting Up DRPC
1. Add `drpc.yml` to your `COMPOSE_FILE` variable in `.env`
2. Configure the following variables in your `.env` file:
```
GW_DOMAIN=your-gateway-domain.com
GW_REDIS_RAM=2gb # Memory allocation for Redis
DRPC_VERSION=0.64.16 # Or latest version
```
3. Generate the upstream configurations for dshackle:
```bash
# Using domain URLs (default)
./upstreams.sh
```
The `upstreams.sh` script automatically detects all running nodes on your machine and generates the appropriate configuration for the dshackle load balancer. This allows you to connect your nodes to drpc.org and sell RPC capacity.
For more information about DRPC, visit [drpc.org](https://drpc.org/).
## Supported Networks
This repository supports a comprehensive range of blockchain networks:
### Layer 1 Networks
- **Major Networks**: Ethereum (Mainnet, Sepolia, Holesky), BSC, Polygon, Avalanche, Gnosis
- **Alternative L1s**: Fantom, Core, Berachain, Ronin, Viction, Fuse, Tron, ThunderCore
- **Emerging L1s**: Goat, AlephZero, Haqq, Taiko, Rootstock
### Layer 2 Networks
- **OP Stack**: Optimism, Base, Zora, Mode, Blast, Fraxtal, Bob, Boba, Worldchain, Metal, Ink, Lisk, SNAX, Celo
- **Arbitrum Ecosystem**: Arbitrum One, Arbitrum Nova, Everclear, Playblock, Real, Connext, OpenCampusCodex
- **Other L2s**: Linea, Scroll, zkSync Era, Metis, Moonbeam
Most networks support multiple node implementations (Geth, Erigon, Reth) and environments (mainnet, testnet).
## Backup and Restore System
This repository includes a comprehensive backup and restore system for Docker volumes:
### Local Backups
- `backup-node.sh <config-name>` - Create a backup of all volumes for a configuration to the `/backup` directory
- `restore-volumes.sh <config-name>` - Restore volumes from the latest backup in the `/backup` directory
### Remote Backups
To serve backups via HTTP and WebDAV:
1. Add `backup-http.yml` to your `COMPOSE_FILE` variable in `.env`
2. This exposes:
- HTTP access to backups at `https://yourdomain.tld/backup`
- WebDAV access at `https://yourdomain.tld/dav`
### Cross-Server Backup and Restore
For multi-server setups:
1. On server A: Include `backup-http.yml` in `COMPOSE_FILE` to serve backups
2. On server B: Use restore from server A's backups:
```bash
# Restore directly from server A
./restore-volumes.sh ethereum-mainnet https://serverA.domain.tld/backup/
```
3. Create backups on server B and send to server A via WebDAV:
```bash
# Backup to server A's WebDAV
./backup-node.sh ethereum-mainnet https://serverA.domain.tld/dav
```
This allows for efficient volume transfers between servers without needing SSH access.
[Pocket Validator](README_POKT.md) </br>
[Harmony Validator](harmony/README_HARMONY.md)

76
README_POKT.md Normal file
View File

@@ -0,0 +1,76 @@
Tested on Ubuntu 20.04.3 LTS
#### Prerequisites:
docker <br />
docker-compose <br />
DNS A-Record pointing to your server <br />
Wireguard-Server: Paste wireguard wg0.conf from wireguard-server to wireguard/config/wg0.conf <br />
.env File inside POKT-DOKT with secrets <br />
#### Usage
```
git clone https://github.com/cventastic/POKT_DOKT.git
cd POKT_DOKT
git reset --hard origin/main && git pull && ./util/prepare.sh
```
# EXAMPLES
Start POKT in relay mode:
```
command: pocket start --simulateRelay
```
If you want to activly relay. You also have to Stake! <br />
Testnet-Faucet: (https://faucet.pokt.network/) <br />
How to stake: https://docs.pokt.network/home/paths/node-runner#stake-the-validator <br />
POKT QUERY for simulate-relay mode:
```
Pockt-Testnet:
curl -X POST --data '{"relay_network_id":"0002","payload":{"data":"{}","method":"POST","path":"v1/query/height","headers":{}}}' http://localhost:8082/v1/client/sim
Pocket-Mainnet:
curl -X POST --data '{"relay_network_id":"0001","payload":{"data":"{}","method":"POST","path":"v1/query/height","headers":{}}}' http://localhost:8081/v1/client/sim
```
GETH QUERY (from whitelisted servers e.g pokt-test) for simulate-relay mode:
```
Pocket-Testnet:
curl -X POST --data '{"relay_network_id":"0020","payload":{"data":"{\"jsonrpc\":\"2.0\",\"method\":\"eth_getBalance\",\"params\":[\"0x1a8c807a6E4F624fCab01FEBf76a541d31B8345A\", \"latest\"],\"id\":1}","method":"POST","path":"","headers":{}}}' http://127.0.0.1:8082/v1/client/sim
curl -v -X POST --data '{"relay_network_id":"0020","payload":{"data":"{\"jsonrpc\":\"2.0\",\"method\":\"eth_syncing\",\"params\":[],\"id\":1}","method":"POST","path":"","headers":{}}}' http://127.0.0.1:8082/v1/client/sim
Pocket-Mainnet:
curl -X POST --data '{"relay_network_id":"0021","payload":{"data":"{\"jsonrpc\":\"2.0\",\"method\":\"eth_getBalance\",\"params\":[\"0x1a8c807a6E4F624fCab01FEBf76a541d31B8345A\", \"latest\"],\"id\":1}","method":"POST","path":"","headers":{}}}' http://127.0.0.1:8081/v1/client/sim
curl -v -X POST --data '{"relay_network_id":"0021","payload":{"data":"{\"jsonrpc\":\"2.0\",\"method\":\"eth_syncing\",\"params\":[],\"id\":1}","method":"POST","path":"","headers":{}}}' http://127.0.0.1:8081/v1/client/sim
```
STANDARD GETH QUERY (from whitelistet server)
```
curl -X POST -H "Content-Type: application/json" --data '{"jsonrpc":"2.0","method":"eth_syncing","params":[],"id":1}' https://$RPCNODE/goerli
```
# SSL
I you want to test SSL comment in:
```
# - "--certificatesresolvers.myresolver.acme.caserver=https://acme-staging-v02.api.letsencrypt.org/directory"
```
Check if there is a file here /traefic/letsencrypt/acme.json if yes, you have to delete it. <br />
Otherwise traefik will not issue the certificate for an existing domain. <br />
#### TODO !!!!
Bootstrapping from Snapshots <br />
Link-Timezone into containers.
AVALANCHE:
- Archive?
- Monitoring https://docs.avax.network/build/tools/dashboards/README
### Notes
#### Monitoring
Telegram get group ids the bot is in:
```curl -X POST https://api.telegram.org/bot$TELEGRAM_API_TOKEN/getUpdates```
There has to be an event in the channel for the bot to get updates
### neue NODE
.env
rpc-timeout
whitelist

View File

@@ -1 +0,0 @@
abstract/external-node/abstract-mainnet-external-node-pruned.yml

View File

@@ -1 +0,0 @@
abstract/external-node/abstract-testnet-external-node-pruned.yml

View File

@@ -1,172 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:abstract/external-node/abstract-mainnet-external-node-archive.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/abstract-mainnet-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
abstract-mainnet-archive-client:
image: ${ABSTRACT_EXTERNAL_NODE_IMAGE:-matterlabs/external-node}:${ABSTRACT_MAINNET_EXTERNAL_NODE_VERSION:-v27.5.7}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
ports:
- 12612:12612
- 12612:12612/udp
expose:
- 8545
- 8546
environment:
- DATABASE_POOL_SIZE=50
- DATABASE_URL=postgres://postgres:notsecurepassword@abstract-mainnet-archive-db:5430/zksync_local_ext_node
- EN_API_NAMESAPCES=eth,net,web3,debug,pubsub,debug,zks
- EN_ETH_CLIENT_URL=${ETHEREUM_MAINNET_EXECUTION_RPC}
- EN_HEALTHCHECK_PORT=3081
- EN_HTTP_PORT=8545
- EN_L1_CHAIN_ID=1
- EN_L2_CHAIN_ID=2741
- EN_MAIN_NODE_URL=https://api.mainnet.abs.xyz
- EN_MAX_RESPONSE_BODY_SIZE_MB=30
- EN_MAX_RESPONSE_BODY_SIZE_OVERRIDES_MB=eth_getLogs=100,eth_getBlockReceipts=None
- EN_MERKLE_TREE_PATH=./db/ext-node/lightweight
- EN_PROMETHEUS_PORT=3322
- EN_PRUNING_ENABLED=false
- EN_REQ_ENTITIES_LIMIT=100000
- EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL=raas-abstract-mainnet-external-node-snapshots
- EN_SNAPSHOTS_OBJECT_STORE_MODE=GCSAnonymousReadOnly
- EN_SNAPSHOTS_RECOVERY_ENABLED=true
- EN_STATE_CACHE_PATH=./db/ext-node/state_keeper
- EN_WS_PORT=8546
- RUST_LOG=warn,zksync=info,zksync_core::metadata_calculator=debug,zksync_state=info,zksync_utils=info,zksync_web3_decl::client=error
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ABSTRACT_MAINNET_EXTERNAL_NODE_ARCHIVE_DATA:-abstract-mainnet-external-node-archive}:/db
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
abstract-mainnet-archive:
image: nginx
expose:
- '80'
environment:
PROXY_HOST: abstract-mainnet-archive-client
RPC_PATH: ''
RPC_PORT: 8545
WS_PATH: ''
WS_PORT: 8546
restart: unless-stopped
depends_on:
- abstract-mainnet-archive-client
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.abstract-mainnet-external-node-archive-stripprefix.stripprefix.prefixes=/abstract-mainnet-archive
- traefik.http.services.abstract-mainnet-external-node-archive.loadbalancer.server.port=80
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-archive.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-archive.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-archive.rule=Host(`$DOMAIN`) && (Path(`/abstract-mainnet-archive`) || Path(`/abstract-mainnet-archive/`))}
- ${NO_SSL:+traefik.http.routers.abstract-mainnet-external-node-archive.rule=Path(`/abstract-mainnet-archive`) || Path(`/abstract-mainnet-archive/`)}
- traefik.http.routers.abstract-mainnet-external-node-archive.middlewares=abstract-mainnet-external-node-archive-stripprefix, ipallowlist
abstract-mainnet-archive-db:
image: postgres:14
expose:
- 5430
environment:
- PGPORT=5430
- POSTGRES_PASSWORD=notsecurepassword
command: >
postgres
-c max_connections=200
-c log_error_verbosity=terse
-c shared_buffers=2GB
-c effective_cache_size=4GB
-c maintenance_work_mem=1GB
-c checkpoint_completion_target=0.9
-c random_page_cost=1.1
-c effective_io_concurrency=200
-c min_wal_size=4GB
-c max_wal_size=16GB
-c max_worker_processes=16
-c checkpoint_timeout=1800
networks:
- chains
volumes:
- ${ABSTRACT_MAINNET_EXTERNAL_NODE_ARCHIVE__DB_DATA:-abstract-mainnet-external-node-archive_db}:/var/lib/postgresql/data
healthcheck:
interval: 1s
timeout: 3s
test: [CMD-SHELL, psql -U postgres -c "select exists (select * from pg_stat_activity where datname = '' and application_name = 'pg_restore')" | grep -e ".f$$"]
logging: *logging-defaults
volumes:
abstract-mainnet-external-node-archive:
abstract-mainnet-external-node-archive_db:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: abstract
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -1,172 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:abstract/external-node/abstract-mainnet-external-node-pruned.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/abstract-mainnet \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
abstract-mainnet-client:
image: ${ABSTRACT_EXTERNAL_NODE_IMAGE:-matterlabs/external-node}:${ABSTRACT_MAINNET_EXTERNAL_NODE_VERSION:-v27.5.7}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
ports:
- 14370:14370
- 14370:14370/udp
expose:
- 8545
- 8546
environment:
- DATABASE_POOL_SIZE=50
- DATABASE_URL=postgres://postgres:notsecurepassword@abstract-mainnet-db:5430/zksync_local_ext_node
- EN_API_NAMESAPCES=eth,net,web3,debug,pubsub,debug,zks
- EN_ETH_CLIENT_URL=${ETHEREUM_MAINNET_EXECUTION_RPC}
- EN_HEALTHCHECK_PORT=3081
- EN_HTTP_PORT=8545
- EN_L1_CHAIN_ID=1
- EN_L2_CHAIN_ID=2741
- EN_MAIN_NODE_URL=https://api.mainnet.abs.xyz
- EN_MAX_RESPONSE_BODY_SIZE_MB=30
- EN_MAX_RESPONSE_BODY_SIZE_OVERRIDES_MB=eth_getLogs=100,eth_getBlockReceipts=None
- EN_MERKLE_TREE_PATH=./db/ext-node/lightweight
- EN_PROMETHEUS_PORT=3322
- EN_PRUNING_ENABLED=true
- EN_REQ_ENTITIES_LIMIT=100000
- EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL=raas-abstract-mainnet-external-node-snapshots
- EN_SNAPSHOTS_OBJECT_STORE_MODE=GCSAnonymousReadOnly
- EN_SNAPSHOTS_RECOVERY_ENABLED=true
- EN_STATE_CACHE_PATH=./db/ext-node/state_keeper
- EN_WS_PORT=8546
- RUST_LOG=warn,zksync=info,zksync_core::metadata_calculator=debug,zksync_state=info,zksync_utils=info,zksync_web3_decl::client=error
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ABSTRACT_MAINNET_EXTERNAL_NODE_PRUNED_DATA:-abstract-mainnet-external-node-pruned}:/db
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
abstract-mainnet:
image: nginx
expose:
- '80'
environment:
PROXY_HOST: abstract-mainnet-client
RPC_PATH: ''
RPC_PORT: 8545
WS_PATH: ''
WS_PORT: 8546
restart: unless-stopped
depends_on:
- abstract-mainnet-client
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.abstract-mainnet-external-node-pruned-stripprefix.stripprefix.prefixes=/abstract-mainnet
- traefik.http.services.abstract-mainnet-external-node-pruned.loadbalancer.server.port=80
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-pruned.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-pruned.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-pruned.rule=Host(`$DOMAIN`) && (Path(`/abstract-mainnet`) || Path(`/abstract-mainnet/`))}
- ${NO_SSL:+traefik.http.routers.abstract-mainnet-external-node-pruned.rule=Path(`/abstract-mainnet`) || Path(`/abstract-mainnet/`)}
- traefik.http.routers.abstract-mainnet-external-node-pruned.middlewares=abstract-mainnet-external-node-pruned-stripprefix, ipallowlist
abstract-mainnet-db:
image: postgres:14
expose:
- 5430
environment:
- PGPORT=5430
- POSTGRES_PASSWORD=notsecurepassword
command: >
postgres
-c max_connections=200
-c log_error_verbosity=terse
-c shared_buffers=2GB
-c effective_cache_size=4GB
-c maintenance_work_mem=1GB
-c checkpoint_completion_target=0.9
-c random_page_cost=1.1
-c effective_io_concurrency=200
-c min_wal_size=4GB
-c max_wal_size=16GB
-c max_worker_processes=16
-c checkpoint_timeout=1800
networks:
- chains
volumes:
- ${ABSTRACT_MAINNET_EXTERNAL_NODE_PRUNED__DB_DATA:-abstract-mainnet-external-node-pruned_db}:/var/lib/postgresql/data
healthcheck:
interval: 1s
timeout: 3s
test: [CMD-SHELL, psql -U postgres -c "select exists (select * from pg_stat_activity where datname = '' and application_name = 'pg_restore')" | grep -e ".f$$"]
logging: *logging-defaults
volumes:
abstract-mainnet-external-node-pruned:
abstract-mainnet-external-node-pruned_db:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: abstract
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -1,172 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:abstract/external-node/abstract-testnet-external-node-archive.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/abstract-testnet-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
abstract-testnet-archive-client:
image: ${ABSTRACT_EXTERNAL_NODE_IMAGE:-matterlabs/external-node}:${ABSTRACT_TESTNET_EXTERNAL_NODE_VERSION:-v28.2.1}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
ports:
- 14028:14028
- 14028:14028/udp
expose:
- 8545
- 8546
environment:
- DATABASE_POOL_SIZE=50
- DATABASE_URL=postgres://postgres:notsecurepassword@abstract-testnet-archive-db:5430/zksync_local_ext_node
- EN_API_NAMESAPCES=eth,net,web3,debug,pubsub,debug,zks
- EN_ETH_CLIENT_URL=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- EN_HEALTHCHECK_PORT=3081
- EN_HTTP_PORT=8545
- EN_L1_CHAIN_ID=11155111
- EN_L2_CHAIN_ID=11124
- EN_MAIN_NODE_URL=https://api.testnet.abs.xyz
- EN_MAX_RESPONSE_BODY_SIZE_MB=30
- EN_MAX_RESPONSE_BODY_SIZE_OVERRIDES_MB=eth_getLogs=100,eth_getBlockReceipts=None
- EN_MERKLE_TREE_PATH=./db/ext-node/lightweight
- EN_PROMETHEUS_PORT=3322
- EN_PRUNING_ENABLED=false
- EN_REQ_ENTITIES_LIMIT=100000
- EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL=abstract-testnet-external-node-snapshots
- EN_SNAPSHOTS_OBJECT_STORE_MODE=GCSAnonymousReadOnly
- EN_SNAPSHOTS_RECOVERY_ENABLED=true
- EN_STATE_CACHE_PATH=./db/ext-node/state_keeper
- EN_WS_PORT=8546
- RUST_LOG=warn,zksync=info,zksync_core::metadata_calculator=debug,zksync_state=info,zksync_utils=info,zksync_web3_decl::client=error
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ABSTRACT_TESTNET_EXTERNAL_NODE_ARCHIVE_DATA:-abstract-testnet-external-node-archive}:/db
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
abstract-testnet-archive:
image: nginx
expose:
- '80'
environment:
PROXY_HOST: abstract-testnet-archive-client
RPC_PATH: ''
RPC_PORT: 8545
WS_PATH: ''
WS_PORT: 8546
restart: unless-stopped
depends_on:
- abstract-testnet-archive-client
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.abstract-testnet-external-node-archive-stripprefix.stripprefix.prefixes=/abstract-testnet-archive
- traefik.http.services.abstract-testnet-external-node-archive.loadbalancer.server.port=80
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-archive.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-archive.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-archive.rule=Host(`$DOMAIN`) && (Path(`/abstract-testnet-archive`) || Path(`/abstract-testnet-archive/`))}
- ${NO_SSL:+traefik.http.routers.abstract-testnet-external-node-archive.rule=Path(`/abstract-testnet-archive`) || Path(`/abstract-testnet-archive/`)}
- traefik.http.routers.abstract-testnet-external-node-archive.middlewares=abstract-testnet-external-node-archive-stripprefix, ipallowlist
abstract-testnet-archive-db:
image: postgres:14
expose:
- 5430
environment:
- PGPORT=5430
- POSTGRES_PASSWORD=notsecurepassword
command: >
postgres
-c max_connections=200
-c log_error_verbosity=terse
-c shared_buffers=2GB
-c effective_cache_size=4GB
-c maintenance_work_mem=1GB
-c checkpoint_completion_target=0.9
-c random_page_cost=1.1
-c effective_io_concurrency=200
-c min_wal_size=4GB
-c max_wal_size=16GB
-c max_worker_processes=16
-c checkpoint_timeout=1800
networks:
- chains
volumes:
- ${ABSTRACT_TESTNET_EXTERNAL_NODE_ARCHIVE__DB_DATA:-abstract-testnet-external-node-archive_db}:/var/lib/postgresql/data
healthcheck:
interval: 1s
timeout: 3s
test: [CMD-SHELL, psql -U postgres -c "select exists (select * from pg_stat_activity where datname = '' and application_name = 'pg_restore')" | grep -e ".f$$"]
logging: *logging-defaults
volumes:
abstract-testnet-external-node-archive:
abstract-testnet-external-node-archive_db:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: abstract-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -1,172 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:abstract/external-node/abstract-testnet-external-node-pruned.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/abstract-testnet \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
abstract-testnet-client:
image: ${ABSTRACT_EXTERNAL_NODE_IMAGE:-matterlabs/external-node}:${ABSTRACT_TESTNET_EXTERNAL_NODE_VERSION:-v28.2.1}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
ports:
- 12157:12157
- 12157:12157/udp
expose:
- 8545
- 8546
environment:
- DATABASE_POOL_SIZE=50
- DATABASE_URL=postgres://postgres:notsecurepassword@abstract-testnet-db:5430/zksync_local_ext_node
- EN_API_NAMESAPCES=eth,net,web3,debug,pubsub,debug,zks
- EN_ETH_CLIENT_URL=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- EN_HEALTHCHECK_PORT=3081
- EN_HTTP_PORT=8545
- EN_L1_CHAIN_ID=11155111
- EN_L2_CHAIN_ID=11124
- EN_MAIN_NODE_URL=https://api.testnet.abs.xyz
- EN_MAX_RESPONSE_BODY_SIZE_MB=30
- EN_MAX_RESPONSE_BODY_SIZE_OVERRIDES_MB=eth_getLogs=100,eth_getBlockReceipts=None
- EN_MERKLE_TREE_PATH=./db/ext-node/lightweight
- EN_PROMETHEUS_PORT=3322
- EN_PRUNING_ENABLED=true
- EN_REQ_ENTITIES_LIMIT=100000
- EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL=abstract-testnet-external-node-snapshots
- EN_SNAPSHOTS_OBJECT_STORE_MODE=GCSAnonymousReadOnly
- EN_SNAPSHOTS_RECOVERY_ENABLED=true
- EN_STATE_CACHE_PATH=./db/ext-node/state_keeper
- EN_WS_PORT=8546
- RUST_LOG=warn,zksync=info,zksync_core::metadata_calculator=debug,zksync_state=info,zksync_utils=info,zksync_web3_decl::client=error
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ABSTRACT_TESTNET_EXTERNAL_NODE_PRUNED_DATA:-abstract-testnet-external-node-pruned}:/db
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
abstract-testnet:
image: nginx
expose:
- '80'
environment:
PROXY_HOST: abstract-testnet-client
RPC_PATH: ''
RPC_PORT: 8545
WS_PATH: ''
WS_PORT: 8546
restart: unless-stopped
depends_on:
- abstract-testnet-client
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.abstract-testnet-external-node-pruned-stripprefix.stripprefix.prefixes=/abstract-testnet
- traefik.http.services.abstract-testnet-external-node-pruned.loadbalancer.server.port=80
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-pruned.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-pruned.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-pruned.rule=Host(`$DOMAIN`) && (Path(`/abstract-testnet`) || Path(`/abstract-testnet/`))}
- ${NO_SSL:+traefik.http.routers.abstract-testnet-external-node-pruned.rule=Path(`/abstract-testnet`) || Path(`/abstract-testnet/`)}
- traefik.http.routers.abstract-testnet-external-node-pruned.middlewares=abstract-testnet-external-node-pruned-stripprefix, ipallowlist
abstract-testnet-db:
image: postgres:14
expose:
- 5430
environment:
- PGPORT=5430
- POSTGRES_PASSWORD=notsecurepassword
command: >
postgres
-c max_connections=200
-c log_error_verbosity=terse
-c shared_buffers=2GB
-c effective_cache_size=4GB
-c maintenance_work_mem=1GB
-c checkpoint_completion_target=0.9
-c random_page_cost=1.1
-c effective_io_concurrency=200
-c min_wal_size=4GB
-c max_wal_size=16GB
-c max_worker_processes=16
-c checkpoint_timeout=1800
networks:
- chains
volumes:
- ${ABSTRACT_TESTNET_EXTERNAL_NODE_PRUNED__DB_DATA:-abstract-testnet-external-node-pruned_db}:/var/lib/postgresql/data
healthcheck:
interval: 1s
timeout: 3s
test: [CMD-SHELL, psql -U postgres -c "select exists (select * from pg_stat_activity where datname = '' and application_name = 'pg_restore')" | grep -e ".f$$"]
logging: *logging-defaults
volumes:
abstract-testnet-external-node-pruned:
abstract-testnet-external-node-pruned_db:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: abstract-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -1 +0,0 @@
arb/nitro/alephzero-mainnet-nitro-archive-pebble-hash.yml

View File

@@ -1 +0,0 @@
arb/nitro/alephzero-mainnet-nitro-pruned-pebble-path.yml

View File

@@ -1 +0,0 @@
arb/nitro/alephzero-sepolia-nitro-archive-leveldb-hash.yml

View File

@@ -1 +0,0 @@
arb/nitro/alephzero-sepolia-nitro-pruned-pebble-path.yml

View File

@@ -1,5 +0,0 @@
{
"chain": {
"info-json": "[{\"chain-id\":41455,\"parent-chain-id\":1,\"parent-chain-is-arbitrum\":false,\"chain-config\":{\"homesteadBlock\":0,\"daoForkBlock\":null,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"berlinBlock\":0,\"londonBlock\":0,\"clique\":{\"period\":0,\"epoch\":0},\"arbitrum\":{\"EnableArbOS\":true,\"AllowDebugPrecompiles\":false,\"DataAvailabilityCommittee\":true,\"InitialArbOSVersion\":20,\"GenesisBlockNum\":0,\"MaxCodeSize\":98304,\"MaxInitCodeSize\":49152,\"InitialChainOwner\":\"0x257812604076712675ae9788F5Bd738173CA3CE0\"},\"chainId\":41455},\"rollup\":{\"bridge\":\"0x41Ec9456AB918f2aBA81F38c03Eb0B93b78E84d9\",\"inbox\":\"0x56D8EC76a421063e1907503aDd3794c395256AEb\",\"sequencer-inbox\":\"0xF75206c49c1694594E3e69252E519434f1579876\",\"rollup\":\"0x1CA12290D954CFe022323b6A6Df92113ed6b1C98\",\"validator-utils\":\"0x2b0E04Dc90e3fA58165CB41E2834B44A56E766aF\",\"validator-wallet-creator\":\"0x9CAd81628aB7D8e239F1A5B497313341578c5F71\",\"deployed-at\":20412468}}]"
}
}

View File

@@ -1,6 +0,0 @@
{
"chain": {
"info-json": "[{\"chain-id\":2039,\"parent-chain-id\":11155111,\"parent-chain-is-arbitrum\":false,\"chain-name\":\"Aleph Zero EVM testnet\",\"chain-config\":{\"homesteadBlock\":0,\"daoForkBlock\":null,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"berlinBlock\":0,\"londonBlock\":0,\"clique\":{\"period\":0,\"epoch\":0},\"arbitrum\":{\"EnableArbOS\":true,\"AllowDebugPrecompiles\":false,\"DataAvailabilityCommittee\":true,\"InitialArbOSVersion\":20,\"GenesisBlockNum\":0,\"MaxCodeSize\":98304,\"MaxInitCodeSize\":49152,\"InitialChainOwner\":\"0x4c6dfF3e40e82a1fB599e062051726a9f7808a18\"},\"chainId\":2039},\"rollup\":{\"bridge\":\"0xCB5c0B38C45Fad0C20591E26b0b3C3809123994A\",\"inbox\":\"0xb27fd27987a71a6B77Fb8705bFb6010C411083EB\",\"sequencer-inbox\":\"0x16Ef70c48EF4BaaCfdaa4AfdD37F69332832a0bD\",\"rollup\":\"0xC8C08A4DbbF3367c8441151591c3d935947CB42F\",\"validator-utils\":\"0xb33Dca7b17c72CFC311D68C543cd4178E0d7ce55\",\"validator-wallet-creator\":\"0x75500812ADC9E51b721BEa31Df322EEc66967DDF\",\"deployed-at\":5827184}}]",
"name": "Aleph Zero EVM testnet"
}
}

View File

@@ -1,113 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/arbnode/arbitrum-one-arbnode-archive-leveldb-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-one-arbnode-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-one-arbnode-archive:
image: ${ARBITRUM_ARBNODE_IMAGE:-offchainlabs/arb-node}:${ARBITRUM_ONE_ARBNODE_VERSION:-v1.4.5-e97c1a4}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
- 8546
entrypoint: [/home/user/go/bin/arb-node]
command:
- --core.checkpoint-gas-frequency=156250000
- --l1.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --l2.disable-upstream
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=7070
- --node.cache.allow-slow-lookup
- --node.chain-id=42161
- --node.rpc.addr=0.0.0.0
- --node.rpc.enable-l1-calls
- --node.rpc.port=8545
- --node.rpc.tracing.enable
- --node.rpc.tracing.namespace=trace
- --node.ws.addr=0.0.0.0
- --node.ws.port=8546
- --persistent.chain=/data/datadir/
- --persistent.global-config=/data/
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_ARBNODE_ARCHIVE_LEVELDB_HASH_DATA:-arbitrum-one-arbnode-archive-leveldb-hash}:/data
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.arbitrum-one-arbnode-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/arbitrum-one-arbnode-archive
- traefik.http.services.arbitrum-one-arbnode-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-one-arbnode-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-arbnode-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-arbnode-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one-arbnode-archive`) || Path(`/arbitrum-one-arbnode-archive/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-one-arbnode-archive-leveldb-hash.rule=Path(`/arbitrum-one-arbnode-archive`) || Path(`/arbitrum-one-arbnode-archive/`)}
- traefik.http.routers.arbitrum-one-arbnode-archive-leveldb-hash.middlewares=arbitrum-one-arbnode-archive-leveldb-hash-stripprefix, ipallowlist
volumes:
arbitrum-one-arbnode-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -1,6 +0,0 @@
{
"chain": {
"info-json": "[{\"chain-id\":6398,\"parent-chain-id\":11155111,\"parent-chain-is-arbitrum\":false,\"chain-name\":\"Connext Sepolia\",\"chain-config\":{\"homesteadBlock\":0,\"daoForkBlock\":null,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"berlinBlock\":0,\"londonBlock\":0,\"clique\":{\"period\":0,\"epoch\":0},\"arbitrum\":{\"EnableArbOS\":true,\"AllowDebugPrecompiles\":false,\"DataAvailabilityCommittee\":true,\"InitialArbOSVersion\":20,\"GenesisBlockNum\":0,\"MaxCodeSize\":24576,\"MaxInitCodeSize\":49152,\"InitialChainOwner\":\"0x8ECD393576Ca37a7e5095f31bdfE21F606FF5F75\"},\"chainId\":6398},\"rollup\":{\"bridge\":\"0xf0b58FA876005898798a66A04EE09159C199CB7A\",\"inbox\":\"0x7bc7DAF843bf57c54D41D912F8221A2eE830c320\",\"sequencer-inbox\":\"0x7f5C1a58014E9DE69663CAc441bfa4C5d94b7E64\",\"rollup\":\"0xE6D7bf11A6264BACa59e8fAD7f6985FaC8f62e60\",\"validator-utils\":\"0xb33Dca7b17c72CFC311D68C543cd4178E0d7ce55\",\"validator-wallet-creator\":\"0x75500812ADC9E51b721BEa31Df322EEc66967DDF\",\"deployed-at\":5780509}}]",
"name": "Connext Sepolia"
}
}

View File

@@ -1,6 +0,0 @@
{
"chain": {
"name": "Everclear Mainnet",
"info-json": "[{\"chain-id\":25327,\"parent-chain-id\":1,\"parent-chain-is-arbitrum\":false,\"chain-name\":\"Everclear Mainnet\",\"chain-config\":{\"homesteadBlock\":0,\"daoForkBlock\":null,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"berlinBlock\":0,\"londonBlock\":0,\"clique\":{\"period\":0,\"epoch\":0},\"arbitrum\":{\"EnableArbOS\":true,\"AllowDebugPrecompiles\":false,\"DataAvailabilityCommittee\":true,\"InitialArbOSVersion\":20,\"GenesisBlockNum\":0,\"MaxCodeSize\":24576,\"MaxInitCodeSize\":49152,\"InitialChainOwner\":\"0x98a426C8ED821cAaef1b4BF7D29b514dcef970C0\"},\"chainId\":25327},\"rollup\":{\"bridge\":\"0x4eb4fB614e1aa3634513319F4Ec7334bC4321356\",\"inbox\":\"0x97FdC935c5E25613AA13a054C7Aa71cf751DB495\",\"sequencer-inbox\":\"0x7B0517E0104dB60198f9d573C0aB8d480207827E\",\"rollup\":\"0xc6CAd31D83E33Fc8fBc855f36ef9Cb2fCE070f5C\",\"validator-utils\":\"0x2b0E04Dc90e3fA58165CB41E2834B44A56E766aF\",\"validator-wallet-creator\":\"0x9CAd81628aB7D8e239F1A5B497313341578c5F71\",\"deployed-at\":20684364}}]"
}
}

View File

@@ -1,145 +0,0 @@
---
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/fireeth/arbitrum-one-fireeth-pruned-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-one \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: 10m
max-file: '3'
services:
arbitrum-one:
image: ${ARBITRUM_FIREETH_IMAGE:-ghcr.io/streamingfast/firehose-ethereum}:${ARBITRUM_ONE_FIREETH_VERSION:-v2.11.7-nitro-nitro-v3.5.5-fh3.0}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
environment:
- ${ARBITRUM_ONE_FIREETH_PRUNED_PEBBLE_HASH_S3_BLOCKS_STORE:-/firehose-data/storage/merged-blocks}
entrypoint: [sh, -c, 'exec fireeth -c /config/firehose.yml start --substreams-rpc-endpoints "${ ARBITRUM_ONE_EXECUTION_RPC}" --reader-node-arguments "$*"', _]
command:
- --execution.caching.archive=false
- --execution.caching.state-scheme=hash
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --firehose-enabled
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,debug,admin,txpool,engine
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=pruned
- --persistent.chain=/firehose-data/reader/data/arbitrum-one
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_FIREETH_PRUNED_PEBBLE_HASH_DATA:-arbitrum-one-fireeth-pruned-pebble-hash}:/firehose-data
- ${ARBITRUM_ONE_FIREETH_PRUNED_PEBBLE_HASH_MERGED_BLOCKS_DATA:-arbitrum-one-fireeth-pruned-pebble-hash-blocks}:/firehose-data/storage/merged-blocks
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- traefik.enable=true
- traefik.http.middlewares.arbitrum-one-fireeth-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-one
- traefik.http.services.arbitrum-one-fireeth-pruned-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one`) || Path(`/arbitrum-one/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash.rule=Path(`/arbitrum-one`) || Path(`/arbitrum-one/`)}
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash.middlewares=arbitrum-one-fireeth-pruned-pebble-hash-stripprefix, ipallowlist
- traefik.http.services.arbitrum-one-fireeth-pruned-pebble-hash-firehose.loadbalancer.server.scheme=h2c
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-firehose.service=arbitrum-one-fireeth-pruned-pebble-hash-firehose
- traefik.http.services.arbitrum-one-fireeth-pruned-pebble-hash-firehose.loadbalancer.server.port=10015
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-firehose.entrypoints=grpc
- ${NO_SSL:-traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-firehose.tls.certresolver=myresolver}
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-firehose.rule=Host(`arbitrum-one.${DOMAIN}`)
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-firehose.middlewares=ipallowlist
- traefik.http.services.arbitrum-one-fireeth-pruned-pebble-hash-substreams.loadbalancer.server.scheme=h2c
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-substreams.service=arbitrum-one-fireeth-pruned-pebble-hash-substreams
- traefik.http.services.arbitrum-one-fireeth-pruned-pebble-hash-substreams.loadbalancer.server.port=10016
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-substreams.entrypoints=grpc
- ${NO_SSL:-traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-substreams.tls.certresolver=myresolver}
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-substreams.rule=Host(`arbitrum-one-substreams.${DOMAIN}`)
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-substreams.middlewares=ipallowlist
volumes:
arbitrum-one-fireeth-pruned-pebble-hash:
arbitrum-one-fireeth-pruned-pebble-hash-blocks:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,100 +0,0 @@
---
services:
alephzero-mainnet-archive:
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_MAINNET_NITRO_VERSION:-v3.5.3-0a9c975}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.caching.state-scheme=hash
- --execution.forwarding-target=https://rpc.alephzero.raas.gelato.cloud
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.alephzero.raas.gelato.cloud
- --node.data-availability.sequencer-inbox-address=0x1411949971076304187394088912578077660717096867958
- --node.feed.input.url=wss://feed.alephzero.raas.gelato.cloud
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/alephzero-mainnet-archive
- --persistent.db-engine=leveldb
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ALEPHZERO_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-alephzero-mainnet-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./arb/alephzero/mainnet:/config
- /slowdisk:/slowdisk
labels:
- traefik.enable=true
- traefik.http.middlewares.alephzero-mainnet-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/alephzero-mainnet-archive
- traefik.http.services.alephzero-mainnet-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && PathPrefix(`/alephzero-mainnet-archive`)}
- ${NO_SSL:+traefik.http.routers.alephzero-mainnet-nitro-archive-leveldb-hash.rule=PathPrefix(`/alephzero-mainnet-archive`)}
- traefik.http.routers.alephzero-mainnet-nitro-archive-leveldb-hash.middlewares=alephzero-mainnet-nitro-archive-leveldb-hash-stripprefix, ipwhitelist
volumes:
alephzero-mainnet-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
chain: alephzero
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,143 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/alephzero-mainnet-nitro-archive-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/alephzero-mainnet-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
alephzero-mainnet-archive:
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.forwarding-target=https://rpc.alephzero.raas.gelato.cloud
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.alephzero.raas.gelato.cloud
- --node.data-availability.sequencer-inbox-address=0x1411949971076304187394088912578077660717096867958
- --node.feed.input.url=wss://feed.alephzero.raas.gelato.cloud
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/alephzero-mainnet-archive
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ALEPHZERO_MAINNET_NITRO_ARCHIVE_PEBBLE_HASH_DATA:-alephzero-mainnet-nitro-archive-pebble-hash}:/root/.arbitrum
- ./arb/alephzero/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.alephzero-mainnet-nitro-archive-pebble-hash-stripprefix.stripprefix.prefixes=/alephzero-mainnet-archive
- traefik.http.services.alephzero-mainnet-nitro-archive-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-archive-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-archive-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-archive-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/alephzero-mainnet-archive`) || Path(`/alephzero-mainnet-archive/`))}
- ${NO_SSL:+traefik.http.routers.alephzero-mainnet-nitro-archive-pebble-hash.rule=Path(`/alephzero-mainnet-archive`) || Path(`/alephzero-mainnet-archive/`)}
- traefik.http.routers.alephzero-mainnet-nitro-archive-pebble-hash.middlewares=alephzero-mainnet-nitro-archive-pebble-hash-stripprefix, ipallowlist
volumes:
alephzero-mainnet-nitro-archive-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: alephzero
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,146 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/alephzero-mainnet-nitro-pruned-pebble-path.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/alephzero-mainnet \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
alephzero-mainnet:
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=false
- --execution.caching.state-scheme=path
- --execution.forwarding-target=https://rpc.alephzero.raas.gelato.cloud
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.alephzero.raas.gelato.cloud
- --node.data-availability.sequencer-inbox-address=0x1411949971076304187394088912578077660717096867958
- --node.feed.input.url=wss://feed.alephzero.raas.gelato.cloud
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/alephzero-mainnet
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ALEPHZERO_MAINNET_NITRO_PRUNED_PEBBLE_PATH_DATA:-alephzero-mainnet-nitro-pruned-pebble-path}:/root/.arbitrum
- ./arb/alephzero/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.alephzero-mainnet-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/alephzero-mainnet
- traefik.http.services.alephzero-mainnet-nitro-pruned-pebble-path.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-pruned-pebble-path.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-pruned-pebble-path.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && (Path(`/alephzero-mainnet`) || Path(`/alephzero-mainnet/`))}
- ${NO_SSL:+traefik.http.routers.alephzero-mainnet-nitro-pruned-pebble-path.rule=Path(`/alephzero-mainnet`) || Path(`/alephzero-mainnet/`)}
- traefik.http.routers.alephzero-mainnet-nitro-pruned-pebble-path.middlewares=alephzero-mainnet-nitro-pruned-pebble-path-stripprefix, ipallowlist
volumes:
alephzero-mainnet-nitro-pruned-pebble-path:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: alephzero
method-groups:
enabled:
- debug
- filter
methods:
disabled:
# not compatible with path state scheme
- name: debug_traceBlockByHash
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,100 +0,0 @@
---
services:
alephzero-sepolia-archive:
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_SEPOLIA_NITRO_VERSION:-v3.5.3-0a9c975}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.caching.state-scheme=hash
- --execution.forwarding-target=https://rpc.alephzero-testnet.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.alephzero-testnet.gelato.digital
- --node.data-availability.sequencer-inbox-address=0x130937498521962644184395825246273622310592356541
- --node.feed.input.url=wss://feed.alephzero-testnet.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/alephzero-sepolia-archive
- --persistent.db-engine=leveldb
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ALEPHZERO_SEPOLIA_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-alephzero-sepolia-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./arb/alephzero/sepolia:/config
- /slowdisk:/slowdisk
labels:
- traefik.enable=true
- traefik.http.middlewares.alephzero-sepolia-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/alephzero-sepolia-archive
- traefik.http.services.alephzero-sepolia-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && PathPrefix(`/alephzero-sepolia-archive`)}
- ${NO_SSL:+traefik.http.routers.alephzero-sepolia-nitro-archive-leveldb-hash.rule=PathPrefix(`/alephzero-sepolia-archive`)}
- traefik.http.routers.alephzero-sepolia-nitro-archive-leveldb-hash.middlewares=alephzero-sepolia-nitro-archive-leveldb-hash-stripprefix, ipwhitelist
volumes:
alephzero-sepolia-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
chain: alephzero-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,143 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/alephzero-sepolia-nitro-archive-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/alephzero-sepolia-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
alephzero-sepolia-archive:
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.forwarding-target=https://rpc.alephzero-testnet.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.alephzero-testnet.gelato.digital
- --node.data-availability.sequencer-inbox-address=0x130937498521962644184395825246273622310592356541
- --node.feed.input.url=wss://feed.alephzero-testnet.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/alephzero-sepolia-archive
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ALEPHZERO_SEPOLIA_NITRO_ARCHIVE_PEBBLE_HASH_DATA:-alephzero-sepolia-nitro-archive-pebble-hash}:/root/.arbitrum
- ./arb/alephzero/sepolia:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.alephzero-sepolia-nitro-archive-pebble-hash-stripprefix.stripprefix.prefixes=/alephzero-sepolia-archive
- traefik.http.services.alephzero-sepolia-nitro-archive-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-archive-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-archive-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-archive-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/alephzero-sepolia-archive`) || Path(`/alephzero-sepolia-archive/`))}
- ${NO_SSL:+traefik.http.routers.alephzero-sepolia-nitro-archive-pebble-hash.rule=Path(`/alephzero-sepolia-archive`) || Path(`/alephzero-sepolia-archive/`)}
- traefik.http.routers.alephzero-sepolia-nitro-archive-pebble-hash.middlewares=alephzero-sepolia-nitro-archive-pebble-hash-stripprefix, ipallowlist
volumes:
alephzero-sepolia-nitro-archive-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: alephzero-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,146 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/alephzero-sepolia-nitro-pruned-pebble-path.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/alephzero-sepolia \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
alephzero-sepolia:
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=false
- --execution.caching.state-scheme=path
- --execution.forwarding-target=https://rpc.alephzero-testnet.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.alephzero-testnet.gelato.digital
- --node.data-availability.sequencer-inbox-address=0x130937498521962644184395825246273622310592356541
- --node.feed.input.url=wss://feed.alephzero-testnet.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/alephzero-sepolia
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ALEPHZERO_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_DATA:-alephzero-sepolia-nitro-pruned-pebble-path}:/root/.arbitrum
- ./arb/alephzero/sepolia:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.alephzero-sepolia-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/alephzero-sepolia
- traefik.http.services.alephzero-sepolia-nitro-pruned-pebble-path.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-pruned-pebble-path.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-pruned-pebble-path.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && (Path(`/alephzero-sepolia`) || Path(`/alephzero-sepolia/`))}
- ${NO_SSL:+traefik.http.routers.alephzero-sepolia-nitro-pruned-pebble-path.rule=Path(`/alephzero-sepolia`) || Path(`/alephzero-sepolia/`)}
- traefik.http.routers.alephzero-sepolia-nitro-pruned-pebble-path.middlewares=alephzero-sepolia-nitro-pruned-pebble-path-stripprefix, ipallowlist
volumes:
alephzero-sepolia-nitro-pruned-pebble-path:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: alephzero-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
# not compatible with path state scheme
- name: debug_traceBlockByHash
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,136 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-nova-nitro-archive-leveldb-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-nova-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-nova-archive:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_NOVA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=42170
- --execution.caching.archive=true
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=archive
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-nova-archive
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_NOVA_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-arbitrum-nova-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./tmp/arbitrum-nova-archive:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.arbitrum-nova-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/arbitrum-nova-archive
- traefik.http.services.arbitrum-nova-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-nova-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-nova-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-nova-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-nova-archive`) || Path(`/arbitrum-nova-archive/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-nova-nitro-archive-leveldb-hash.rule=Path(`/arbitrum-nova-archive`) || Path(`/arbitrum-nova-archive/`)}
- traefik.http.routers.arbitrum-nova-nitro-archive-leveldb-hash.middlewares=arbitrum-nova-nitro-archive-leveldb-hash-stripprefix, ipallowlist
volumes:
arbitrum-nova-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum-nova
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,137 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-nova-nitro-pruned-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-nova \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-nova:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_NOVA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=42170
- --execution.caching.archive=false
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=pruned
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-nova
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_NOVA_NITRO_PRUNED_PEBBLE_HASH_DATA:-arbitrum-nova-nitro-pruned-pebble-hash}:/root/.arbitrum
- ./tmp/arbitrum-nova:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.arbitrum-nova-nitro-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-nova
- traefik.http.services.arbitrum-nova-nitro-pruned-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-nova-nitro-pruned-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-nova-nitro-pruned-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-nova-nitro-pruned-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-nova`) || Path(`/arbitrum-nova/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-nova-nitro-pruned-pebble-hash.rule=Path(`/arbitrum-nova`) || Path(`/arbitrum-nova/`)}
- traefik.http.routers.arbitrum-nova-nitro-pruned-pebble-hash.middlewares=arbitrum-nova-nitro-pruned-pebble-hash-stripprefix, ipallowlist
volumes:
arbitrum-nova-nitro-pruned-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum-nova
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,142 +0,0 @@
---
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-one-nitro-archive-leveldb-hash--benchmark.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-one-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-one-archive:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.5.5-90ee45c}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=42161
- --execution.caching.archive=true
- --execution.caching.state-scheme=hash
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.latest=archive
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-one-archive
- --persistent.db-engine=leveldb
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-arbitrum-one-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./tmp/arbitrum-one-archive:/tmp
- /slowdisk:/slowdisk
arbitrum-one-archive-benchmark:
build:
context: ./benchmark-proxy
dockerfile: Dockerfile
expose:
- '8545'
environment:
- ENABLE_DETAILED_LOGS=${BENCHMARK_PROXY_VERBOSE:-false}
- LISTEN_ADDR=:8545
- PRIMARY_BACKEND=http://arbitrum-one-archive:8545
- SUMMARY_INTERVAL=60
restart: unless-stopped
depends_on:
- arbitrum-one-archive
networks:
- chains
labels:
- traefik.enable=true
- traefik.http.middlewares.arbitrum-one-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/arbitrum-one-archive
- traefik.http.services.arbitrum-one-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one-archive`) || Path(`/arbitrum-one-archive/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.rule=Path(`/arbitrum-one-archive`) || Path(`/arbitrum-one-archive/`)}
- traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.middlewares=arbitrum-one-nitro-archive-leveldb-hash-stripprefix, ipallowlist
volumes:
arbitrum-one-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,207 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-one-nitro-archive-leveldb-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-one-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-one-archive:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=42161
- --execution.caching.archive=true
- --execution.rpc.classic-redirect=http://arbitrum-one-arbnode-archive:8545
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=archive
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-one-archive
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-arbitrum-one-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./tmp/arbitrum-one-archive:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.arbitrum-one-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/arbitrum-one-archive
- traefik.http.services.arbitrum-one-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one-archive`) || Path(`/arbitrum-one-archive/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.rule=Path(`/arbitrum-one-archive`) || Path(`/arbitrum-one-archive/`)}
- traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.middlewares=arbitrum-one-nitro-archive-leveldb-hash-stripprefix, ipallowlist
arbitrum-one-arbnode-archive:
image: ${ARBITRUM_ARBNODE_IMAGE:-offchainlabs/arb-node}:${ARBITRUM_ONE_ARBNODE_VERSION:-v1.4.5-e97c1a4}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
- 8546
entrypoint: [/home/user/go/bin/arb-node]
command:
- --core.checkpoint-gas-frequency=156250000
- --l1.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --l2.disable-upstream
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=7070
- --node.cache.allow-slow-lookup
- --node.chain-id=42161
- --node.rpc.addr=0.0.0.0
- --node.rpc.enable-l1-calls
- --node.rpc.port=8545
- --node.rpc.tracing.enable
- --node.rpc.tracing.namespace=trace
- --node.ws.addr=0.0.0.0
- --node.ws.port=8546
- --persistent.chain=/data/datadir/
- --persistent.global-config=/data/
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_ARBNODE_ARCHIVE_LEVELDB_HASH_DATA:-arbitrum-one-arbnode-archive-leveldb-hash}:/data
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
volumes:
arbitrum-one-arbnode-archive-leveldb-hash:
arbitrum-one-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -1,208 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-one-nitro-archive-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-one-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-one-archive:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=42161
- --execution.caching.archive=true
- --execution.rpc.classic-redirect=http://arbitrum-one-arbnode-archive:8545
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=archive
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-one-archive
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_NITRO_ARCHIVE_PEBBLE_HASH_DATA:-arbitrum-one-nitro-archive-pebble-hash}:/root/.arbitrum
- ./tmp/arbitrum-one-archive:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.arbitrum-one-nitro-archive-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-one-archive
- traefik.http.services.arbitrum-one-nitro-archive-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one-archive`) || Path(`/arbitrum-one-archive/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-one-nitro-archive-pebble-hash.rule=Path(`/arbitrum-one-archive`) || Path(`/arbitrum-one-archive/`)}
- traefik.http.routers.arbitrum-one-nitro-archive-pebble-hash.middlewares=arbitrum-one-nitro-archive-pebble-hash-stripprefix, ipallowlist
arbitrum-one-arbnode-archive:
image: ${ARBITRUM_ARBNODE_IMAGE:-offchainlabs/arb-node}:${ARBITRUM_ONE_ARBNODE_VERSION:-v1.4.5-e97c1a4}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
- 8546
entrypoint: [/home/user/go/bin/arb-node]
command:
- --core.checkpoint-gas-frequency=156250000
- --l1.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --l2.disable-upstream
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=7070
- --node.cache.allow-slow-lookup
- --node.chain-id=42161
- --node.rpc.addr=0.0.0.0
- --node.rpc.enable-l1-calls
- --node.rpc.port=8545
- --node.rpc.tracing.enable
- --node.rpc.tracing.namespace=trace
- --node.ws.addr=0.0.0.0
- --node.ws.port=8546
- --persistent.chain=/data/datadir/
- --persistent.global-config=/data/
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_ARBNODE_ARCHIVE_LEVELDB_HASH_DATA:-arbitrum-one-arbnode-archive-leveldb-hash}:/data
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
volumes:
arbitrum-one-arbnode-archive-leveldb-hash:
arbitrum-one-nitro-archive-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -1,157 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-one-nitro-pruned-pebble-hash--benchmark.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-one \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-one:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=42161
- --execution.caching.archive=false
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=pruned
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-one
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_DATA:-arbitrum-one-nitro-pruned-pebble-hash}:/root/.arbitrum
- ./tmp/arbitrum-one:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
arbitrum-one-benchmark:
build:
context: ./benchmark-proxy
dockerfile: Dockerfile
expose:
- '8545'
environment:
- ENABLE_DETAILED_LOGS=${BENCHMARK_PROXY_VERBOSE:-false}
- LISTEN_ADDR=:8545
- PRIMARY_BACKEND=http://arbitrum-one:8545
- SUMMARY_INTERVAL=60
restart: unless-stopped
depends_on:
- arbitrum-one
networks:
- chains
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.arbitrum-one-nitro-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-one
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one`) || Path(`/arbitrum-one/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.rule=Path(`/arbitrum-one`) || Path(`/arbitrum-one/`)}
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.middlewares=arbitrum-one-nitro-pruned-pebble-hash-stripprefix, ipallowlist
volumes:
arbitrum-one-nitro-pruned-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,201 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-one-nitro-pruned-pebble-hash--fireeth.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-one \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-one:
image: ${ARBITRUM_FIREETH_IMAGE:-ghcr.io/streamingfast/firehose-ethereum}:${ARBITRUM_ONE_FIREETH_VERSION:-v2.11.7-nitro-nitro-v3.5.5-fh3.0}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
entrypoint: [sh, -c, exec fireeth start reader-node --log-to-file=false --reader-node-arguments "$*", _]
command:
- --chain.id=42161
- --execution.caching.archive=false
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=pruned
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-one
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_FIREETH_DATA:-arbitrum-one-fireeth}:/app/firehose-data
- ${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_DATA:-arbitrum-one-nitro-pruned-pebble-hash}:/root/.arbitrum
- ./tmp/arbitrum-one:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.arbitrum-one-nitro-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-one
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one`) || Path(`/arbitrum-one/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.rule=Path(`/arbitrum-one`) || Path(`/arbitrum-one/`)}
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.middlewares=arbitrum-one-nitro-pruned-pebble-hash-stripprefix, ipallowlist
arbitrum-one-firehose:
image: ${ARBITRUM_FIREETH_IMAGE:-ghcr.io/streamingfast/firehose-ethereum}:${ARBITRUM_ONE_FIREETH_VERSION:-v2.11.7-nitro-nitro-v3.5.5-fh3.0}
expose:
- 10015
- 10014
environment:
- ${ARBITRUM_ONE_FIREETH_BLOCKS_STORE:-/app/firehose-data/storage/merged-blocks}
entrypoint: [sh, -c, exec fireeth --config-file="" --log-to-file=false start firehose index-builder relayer merger $@, _]
command:
- --firehose-rate-limit-bucket-fill-rate=${ARBITRUM_ONE_FIREHOSE_RATE_LIMIT_BUCKET_FILL_RATE:-1s}
- --firehose-rate-limit-bucket-size=${ARBITRUM_ONE_FIREHOSE_RATE_LIMIT_BUCKET_SIZE:-200}
- --log-to-file=false
- --relayer-source=arbitrum-one:10010
restart: unless-stopped
depends_on:
- arbitrum-one
networks:
- chains
volumes:
- ${ARBITRUM_ONE_FIREETH_DATA:-arbitrum-one-fireeth}:/app/firehose-data
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash-firehose.loadbalancer.server.scheme=h2c
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-firehose.service=arbitrum-one-nitro-pruned-pebble-hash-firehose
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash-firehose.loadbalancer.server.port=10015
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-firehose.entrypoints=grpc
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-firehose.tls.certresolver=myresolver}
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-firehose.rule=Host(`arbitrum-one-firehose.${DOMAIN}`)
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-firehose.middlewares=ipallowlist
arbitrum-one-events:
image: ${ARBITRUM_FIREETH_IMAGE:-ghcr.io/streamingfast/firehose-ethereum}:${ARBITRUM_ONE_FIREETH_VERSION:-v2.11.7-nitro-nitro-v3.5.5-fh3.0}
expose:
- 10016
entrypoint: [sh, -c, exec fireeth --config-file="" --log-to-file=false start substreams-tier1 substreams-tier2 $@, _]
command:
- --common-live-blocks-addr=arbitrum-one-firehose:10014
- --log-to-file=false
- --substreams-block-execution-timeout=${ARBITRUM_ONE_SUBSTREAMS_BLOCK_EXECUTION_TIMEOUT:-3m0s}
- --substreams-rpc-endpoints=${ARBITRUM_ONE_EXECUTION_ARCHIVE_RPC}
- --substreams-tier1-max-subrequests=${ARBITRUM_ONE_SUBSTREAMS_TIER1_MAX_SUBREQUESTS:-4}
restart: unless-stopped
depends_on:
- arbitrum-one
networks:
- chains
volumes:
- ${ARBITRUM_ONE_FIREETH_DATA:-arbitrum-one-fireeth}:/app/firehose-data
logging: *logging-defaults
labels:
- traefik.enable=true
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash-events.loadbalancer.server.scheme=h2c
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-events.service=arbitrum-one-nitro-pruned-pebble-hash-events
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash-events.loadbalancer.server.port=10016
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-events.entrypoints=grpc
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-events.tls.certresolver=myresolver}
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-events.rule=Host(`arbitrum-one-events.${DOMAIN}`)
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-events.middlewares=ipallowlist
volumes:
arbitrum-one-nitro-pruned-pebble-hash:
arbitrum-one-nitro-pruned-pebble-hash_fireeth:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,137 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-one-nitro-pruned-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-one \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-one:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=42161
- --execution.caching.archive=false
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=pruned
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-one
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_DATA:-arbitrum-one-nitro-pruned-pebble-hash}:/root/.arbitrum
- ./tmp/arbitrum-one:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.arbitrum-one-nitro-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-one
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one`) || Path(`/arbitrum-one/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.rule=Path(`/arbitrum-one`) || Path(`/arbitrum-one/`)}
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.middlewares=arbitrum-one-nitro-pruned-pebble-hash-stripprefix, ipallowlist
volumes:
arbitrum-one-nitro-pruned-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,137 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-sepolia-nitro-archive-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-sepolia-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-sepolia-archive:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=421614
- --execution.caching.archive=true
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=archive
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-sepolia-archive
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_SEPOLIA_NITRO_ARCHIVE_PEBBLE_HASH_DATA:-arbitrum-sepolia-nitro-archive-pebble-hash}:/root/.arbitrum
- ./tmp/arbitrum-sepolia-archive:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.arbitrum-sepolia-nitro-archive-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-sepolia-archive
- traefik.http.services.arbitrum-sepolia-nitro-archive-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-archive-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-archive-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-archive-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-sepolia-archive`) || Path(`/arbitrum-sepolia-archive/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-sepolia-nitro-archive-pebble-hash.rule=Path(`/arbitrum-sepolia-archive`) || Path(`/arbitrum-sepolia-archive/`)}
- traefik.http.routers.arbitrum-sepolia-nitro-archive-pebble-hash.middlewares=arbitrum-sepolia-nitro-archive-pebble-hash-stripprefix, ipallowlist
volumes:
arbitrum-sepolia-nitro-archive-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,137 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-sepolia-nitro-pruned-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-sepolia \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-sepolia:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=421614
- --execution.caching.archive=false
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=pruned
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-sepolia
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_SEPOLIA_NITRO_PRUNED_PEBBLE_HASH_DATA:-arbitrum-sepolia-nitro-pruned-pebble-hash}:/root/.arbitrum
- ./tmp/arbitrum-sepolia:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.arbitrum-sepolia-nitro-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-sepolia
- traefik.http.services.arbitrum-sepolia-nitro-pruned-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-sepolia`) || Path(`/arbitrum-sepolia/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-hash.rule=Path(`/arbitrum-sepolia`) || Path(`/arbitrum-sepolia/`)}
- traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-hash.middlewares=arbitrum-sepolia-nitro-pruned-pebble-hash-stripprefix, ipallowlist
volumes:
arbitrum-sepolia-nitro-pruned-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,57 +0,0 @@
services:
arbitrum-sepolia:
image: 'offchainlabs/nitro-node:${NITRO_VERSION:-v3.5.3-0a9c975}'
stop_grace_period: 3m
user: root
volumes:
- 'arbitrum-sepolia-nitro-pruned-pebble-path:/root/.arbitrum'
- './tmp/arbitrum-sepolia:/tmp'
expose:
- 8545
command:
- --chain.id=421614
- --execution.caching.state-scheme=path
- --execution.rpc.gas-cap=600000000
- --execution.caching.archive=false
- --execution.sequencer.enable=false
- --persistent.db-engine=pebble
- --persistent.chain=/root/.arbitrum/arbitrum-sepolia
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
- --http.addr=0.0.0.0
- --http.port=8545
- --http.vhosts=*
- --http.corsdomain=*
- --http.api=eth,net,web3,arb,txpool,debug
- --ws.port=8545
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.api=eth,net,web3,arb,txpool,debug
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --log-type=json
- --node.sequencer=false
- --node.staker.enable=false
- --node.batch-poster.enable=false
restart: unless-stopped
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.arbitrum-sepolia-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/arbitrum-sepolia"
- "traefik.http.services.arbitrum-sepolia-nitro-pruned-pebble-path.loadbalancer.server.port=8545"
- "traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-path.entrypoints=websecure"
- "traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-path.tls.certresolver=myresolver"
- "traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && PathPrefix(`/arbitrum-sepolia`)"
- "traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-path.middlewares=arbitrum-sepolia-nitro-pruned-pebble-path-stripprefix, ipwhitelist"
networks:
- chains
volumes:
arbitrum-sepolia-nitro-pruned-pebble-path:

View File

@@ -1,142 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/connext-sepolia-nitro-archive-leveldb-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/connext-sepolia-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
connext-sepolia-archive:
image: ${CONNEXT_NITRO_IMAGE:-offchainlabs/nitro-node}:${CONNEXT_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.forwarding-target=https://rpc.connext-sepolia.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.connext-sepolia.gelato.digital
- --node.data-availability.sequencer-inbox-address=0x727095791318912381473707332248435763608420056676
- --node.feed.input.url=wss://feed.connext-sepolia.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/connext-sepolia-archive
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${CONNEXT_SEPOLIA_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-connext-sepolia-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./arb/connext/sepolia:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.connext-sepolia-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/connext-sepolia-archive
- traefik.http.services.connext-sepolia-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.connext-sepolia-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.connext-sepolia-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.connext-sepolia-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/connext-sepolia-archive`) || Path(`/connext-sepolia-archive/`))}
- ${NO_SSL:+traefik.http.routers.connext-sepolia-nitro-archive-leveldb-hash.rule=Path(`/connext-sepolia-archive`) || Path(`/connext-sepolia-archive/`)}
- traefik.http.routers.connext-sepolia-nitro-archive-leveldb-hash.middlewares=connext-sepolia-nitro-archive-leveldb-hash-stripprefix, ipallowlist
volumes:
connext-sepolia-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: everclear-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,146 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/connext-sepolia-nitro-pruned-pebble-path.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/connext-sepolia \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
connext-sepolia:
image: ${CONNEXT_NITRO_IMAGE:-offchainlabs/nitro-node}:${CONNEXT_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=false
- --execution.caching.state-scheme=path
- --execution.forwarding-target=https://rpc.connext-sepolia.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.connext-sepolia.gelato.digital
- --node.data-availability.sequencer-inbox-address=0x727095791318912381473707332248435763608420056676
- --node.feed.input.url=wss://feed.connext-sepolia.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/connext-sepolia
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${CONNEXT_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_DATA:-connext-sepolia-nitro-pruned-pebble-path}:/root/.arbitrum
- ./arb/connext/sepolia:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.connext-sepolia-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/connext-sepolia
- traefik.http.services.connext-sepolia-nitro-pruned-pebble-path.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.connext-sepolia-nitro-pruned-pebble-path.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.connext-sepolia-nitro-pruned-pebble-path.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.connext-sepolia-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && (Path(`/connext-sepolia`) || Path(`/connext-sepolia/`))}
- ${NO_SSL:+traefik.http.routers.connext-sepolia-nitro-pruned-pebble-path.rule=Path(`/connext-sepolia`) || Path(`/connext-sepolia/`)}
- traefik.http.routers.connext-sepolia-nitro-pruned-pebble-path.middlewares=connext-sepolia-nitro-pruned-pebble-path-stripprefix, ipallowlist
volumes:
connext-sepolia-nitro-pruned-pebble-path:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: everclear-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
# not compatible with path state scheme
- name: debug_traceBlockByHash
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,142 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/everclear-mainnet-nitro-archive-leveldb-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/everclear-mainnet-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
everclear-mainnet-archive:
image: ${EVERCLEAR_NITRO_IMAGE:-offchainlabs/nitro-node}:${EVERCLEAR_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.forwarding-target=https://rpc.everclear.raas.gelato.cloud
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.everclear.raas.gelato.cloud
- --node.data-availability.sequencer-inbox-address=0x727095791318912381473707332248435763608420056676
- --node.feed.input.url=wss://feed.everclear.raas.gelato.cloud
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/everclear-mainnet-archive
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${EVERCLEAR_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-everclear-mainnet-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./arb/everclear/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.everclear-mainnet-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/everclear-mainnet-archive
- traefik.http.services.everclear-mainnet-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.everclear-mainnet-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.everclear-mainnet-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.everclear-mainnet-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/everclear-mainnet-archive`) || Path(`/everclear-mainnet-archive/`))}
- ${NO_SSL:+traefik.http.routers.everclear-mainnet-nitro-archive-leveldb-hash.rule=Path(`/everclear-mainnet-archive`) || Path(`/everclear-mainnet-archive/`)}
- traefik.http.routers.everclear-mainnet-nitro-archive-leveldb-hash.middlewares=everclear-mainnet-nitro-archive-leveldb-hash-stripprefix, ipallowlist
volumes:
everclear-mainnet-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: everclear
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,146 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/everclear-mainnet-nitro-pruned-pebble-path.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/everclear-mainnet \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
everclear-mainnet:
image: ${EVERCLEAR_NITRO_IMAGE:-offchainlabs/nitro-node}:${EVERCLEAR_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=false
- --execution.caching.state-scheme=path
- --execution.forwarding-target=https://rpc.everclear.raas.gelato.cloud
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.everclear.raas.gelato.cloud
- --node.data-availability.sequencer-inbox-address=0x727095791318912381473707332248435763608420056676
- --node.feed.input.url=wss://feed.everclear.raas.gelato.cloud
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/everclear-mainnet
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${EVERCLEAR_MAINNET_NITRO_PRUNED_PEBBLE_PATH_DATA:-everclear-mainnet-nitro-pruned-pebble-path}:/root/.arbitrum
- ./arb/everclear/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.everclear-mainnet-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/everclear-mainnet
- traefik.http.services.everclear-mainnet-nitro-pruned-pebble-path.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.everclear-mainnet-nitro-pruned-pebble-path.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.everclear-mainnet-nitro-pruned-pebble-path.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.everclear-mainnet-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && (Path(`/everclear-mainnet`) || Path(`/everclear-mainnet/`))}
- ${NO_SSL:+traefik.http.routers.everclear-mainnet-nitro-pruned-pebble-path.rule=Path(`/everclear-mainnet`) || Path(`/everclear-mainnet/`)}
- traefik.http.routers.everclear-mainnet-nitro-pruned-pebble-path.middlewares=everclear-mainnet-nitro-pruned-pebble-path-stripprefix, ipallowlist
volumes:
everclear-mainnet-nitro-pruned-pebble-path:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: everclear
method-groups:
enabled:
- debug
- filter
methods:
disabled:
# not compatible with path state scheme
- name: debug_traceBlockByHash
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,141 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/opencampuscodex-sepolia-nitro-archive-leveldb-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/opencampuscodex-sepolia-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
opencampuscodex-sepolia-archive:
image: ${OPENCAMPUSCODEX_NITRO_IMAGE:-offchainlabs/nitro-node}:${OPENCAMPUSCODEX_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.forwarding-target=https://rpc.open-campus-codex.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ARBITRUM_SEPOLIA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.open-campus-codex.gelato.digital
- --node.data-availability.sequencer-inbox-address=0xe347C1223381b9Dcd6c0F61cf81c90175A7Bae77
- --node.feed.input.url=wss://feed.open-campus-codex.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.connection.url=${ARBITRUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/opencampuscodex-sepolia-archive
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${OPENCAMPUSCODEX_SEPOLIA_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-opencampuscodex-sepolia-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./arb/opencampuscodex/arbitrum-sepolia:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.opencampuscodex-sepolia-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/opencampuscodex-sepolia-archive
- traefik.http.services.opencampuscodex-sepolia-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/opencampuscodex-sepolia-archive`) || Path(`/opencampuscodex-sepolia-archive/`))}
- ${NO_SSL:+traefik.http.routers.opencampuscodex-sepolia-nitro-archive-leveldb-hash.rule=Path(`/opencampuscodex-sepolia-archive`) || Path(`/opencampuscodex-sepolia-archive/`)}
- traefik.http.routers.opencampuscodex-sepolia-nitro-archive-leveldb-hash.middlewares=opencampuscodex-sepolia-nitro-archive-leveldb-hash-stripprefix, ipallowlist
volumes:
opencampuscodex-sepolia-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: open-campus-codex-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,120 +0,0 @@
# use at your own risk
services:
opencampuscodex-sepolia:
image: ${OPENCAMPUSCODEX_NITRO_IMAGE:-offchainlabs/nitro-node}:${OPENCAMPUSCODEX_SEPOLIA_NITRO_VERSION:-v3.5.3-0a9c975}
user: root
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
expose:
- 8545
- 8551
ports:
- 10938:10938
- 10938:10938/udp
volumes:
- ${OPENCAMPUSCODEX_SEPOLIA_NITRO_PRUNED_PEBBLE_HASH_DATA:-opencampuscodex-sepolia-nitro-pruned-pebble-hash}:/root/.arbitrum
- /slowdisk:/slowdisk
- .jwtsecret:/jwtsecret:ro
- ./tmp/opencampuscodex-sepolia:/tmp
command:
- --datadir=/root/.arbitrum
- --port=10938
- --bind=0.0.0.0
- --nat=extip:${IP}
- --http
- --http.port=8545
- --http.vhosts=*
- --ws
- --ws.port=8545
- --ws.origins=*
- --ws.addr=0.0.0.0
- --http.addr=0.0.0.0
- --maxpeers=50
- --http.api=eth,net,web3,arb,txpool,debug
- --ws.api=eth,net,web3,arb,txpool,debug
- --rpc.gascap=600000000
- --rpc.returndatalimit=10000000
- --rpc.txfeecap=0
- --execution.caching.state-scheme=hash
- --execution.rpc.gas-cap=600000000
- --execution.caching.archive=false
- --execution.sequencer.enable=false
- --persistent.db-engine=pebble
- --persistent.chain=/root/.arbitrum/opencampuscodex-sepolia
- --conf.file=/config/baseConfig.json
- --node.sequencer=false
- --node.staker.enable=false
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.sequencer-inbox-address=0xe347C1223381b9Dcd6c0F61cf81c90175A7Bae77
- --node.data-availability.parent-chain-node-url=${ARBITRUM_SEPOLIA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.open-campus-codex.gelato.digital
- --node.feed.input.url=wss://feed.open-campus-codex.gelato.digital
- --execution.forwarding-target=https://rpc.open-campus-codex.gelato.digital
- --parent-chain.connection.url=${ARBITRUM_SEPOLIA_EXECUTION_RPC}
networks:
- chains
restart: unless-stopped
stop_grace_period: 5m
labels:
- traefik.enable=true
- traefik.http.middlewares.opencampuscodex-sepolia-nitro-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/opencampuscodex-sepolia
- traefik.http.services.opencampuscodex-sepolia-nitro-pruned-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-hash.rule=Host(`$DOMAIN`) && PathPrefix(`/opencampuscodex-sepolia`)}
- ${NO_SSL:+traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-hash.rule=PathPrefix(`/opencampuscodex-sepolia`)}
- traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-hash.middlewares=opencampuscodex-sepolia-nitro-pruned-pebble-hash-stripprefix, ipwhitelist
volumes:
opencampuscodex-sepolia-nitro-pruned-pebble-hash:
x-upstreams:
- chain: open-campus-codex-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex

View File

@@ -1,145 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/opencampuscodex-sepolia-nitro-pruned-pebble-path.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/opencampuscodex-sepolia \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
opencampuscodex-sepolia:
image: ${OPENCAMPUSCODEX_NITRO_IMAGE:-offchainlabs/nitro-node}:${OPENCAMPUSCODEX_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=false
- --execution.caching.state-scheme=path
- --execution.forwarding-target=https://rpc.open-campus-codex.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ARBITRUM_SEPOLIA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.open-campus-codex.gelato.digital
- --node.data-availability.sequencer-inbox-address=0xe347C1223381b9Dcd6c0F61cf81c90175A7Bae77
- --node.feed.input.url=wss://feed.open-campus-codex.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.connection.url=${ARBITRUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/opencampuscodex-sepolia
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${OPENCAMPUSCODEX_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_DATA:-opencampuscodex-sepolia-nitro-pruned-pebble-path}:/root/.arbitrum
- ./arb/opencampuscodex/arbitrum-sepolia:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.opencampuscodex-sepolia-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/opencampuscodex-sepolia
- traefik.http.services.opencampuscodex-sepolia-nitro-pruned-pebble-path.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-path.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-path.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && (Path(`/opencampuscodex-sepolia`) || Path(`/opencampuscodex-sepolia/`))}
- ${NO_SSL:+traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-path.rule=Path(`/opencampuscodex-sepolia`) || Path(`/opencampuscodex-sepolia/`)}
- traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-path.middlewares=opencampuscodex-sepolia-nitro-pruned-pebble-path-stripprefix, ipallowlist
volumes:
opencampuscodex-sepolia-nitro-pruned-pebble-path:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: open-campus-codex-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
# not compatible with path state scheme
- name: debug_traceBlockByHash
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,141 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/playblock-mainnet-nitro-archive-leveldb-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/playblock-mainnet-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
playblock-mainnet-archive:
image: ${PLAYBLOCK_NITRO_IMAGE:-offchainlabs/nitro-node}:${PLAYBLOCK_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.forwarding-target=https://rpc.playblock.io
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ARBITRUM_NOVA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.playblock.io
- --node.data-availability.sequencer-inbox-address=0x1297541082195356755105700451499873350464260779639
- --node.feed.input.url=wss://feed.playblock.io
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.connection.url=${ARBITRUM_NOVA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/playblock-mainnet-archive
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${PLAYBLOCK_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-playblock-mainnet-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./arb/playblock/arbitrum-nova:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.playblock-mainnet-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/playblock-mainnet-archive
- traefik.http.services.playblock-mainnet-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.playblock-mainnet-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.playblock-mainnet-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.playblock-mainnet-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/playblock-mainnet-archive`) || Path(`/playblock-mainnet-archive/`))}
- ${NO_SSL:+traefik.http.routers.playblock-mainnet-nitro-archive-leveldb-hash.rule=Path(`/playblock-mainnet-archive`) || Path(`/playblock-mainnet-archive/`)}
- traefik.http.routers.playblock-mainnet-nitro-archive-leveldb-hash.middlewares=playblock-mainnet-nitro-archive-leveldb-hash-stripprefix, ipallowlist
volumes:
playblock-mainnet-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: playnance
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,145 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/playblock-mainnet-nitro-pruned-pebble-path.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/playblock-mainnet \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
playblock-mainnet:
image: ${PLAYBLOCK_NITRO_IMAGE:-offchainlabs/nitro-node}:${PLAYBLOCK_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=false
- --execution.caching.state-scheme=path
- --execution.forwarding-target=https://rpc.playblock.io
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ARBITRUM_NOVA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.playblock.io
- --node.data-availability.sequencer-inbox-address=0x1297541082195356755105700451499873350464260779639
- --node.feed.input.url=wss://feed.playblock.io
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.connection.url=${ARBITRUM_NOVA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/playblock-mainnet
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${PLAYBLOCK_MAINNET_NITRO_PRUNED_PEBBLE_PATH_DATA:-playblock-mainnet-nitro-pruned-pebble-path}:/root/.arbitrum
- ./arb/playblock/arbitrum-nova:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.playblock-mainnet-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/playblock-mainnet
- traefik.http.services.playblock-mainnet-nitro-pruned-pebble-path.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.playblock-mainnet-nitro-pruned-pebble-path.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.playblock-mainnet-nitro-pruned-pebble-path.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.playblock-mainnet-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && (Path(`/playblock-mainnet`) || Path(`/playblock-mainnet/`))}
- ${NO_SSL:+traefik.http.routers.playblock-mainnet-nitro-pruned-pebble-path.rule=Path(`/playblock-mainnet`) || Path(`/playblock-mainnet/`)}
- traefik.http.routers.playblock-mainnet-nitro-pruned-pebble-path.middlewares=playblock-mainnet-nitro-pruned-pebble-path-stripprefix, ipallowlist
volumes:
playblock-mainnet-nitro-pruned-pebble-path:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: playnance
method-groups:
enabled:
- debug
- filter
methods:
disabled:
# not compatible with path state scheme
- name: debug_traceBlockByHash
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,142 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/real-mainnet-nitro-archive-leveldb-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/real-mainnet-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
real-mainnet-archive:
image: ${REAL_NITRO_IMAGE:-offchainlabs/nitro-node}:${REAL_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.forwarding-target=https://rpc.realforreal.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.realforreal.gelato.digital
- --node.data-availability.sequencer-inbox-address=0x466813324240923703236721233648302990016039913376
- --node.feed.input.url=wss://feed.realforreal.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/real-mainnet-archive
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${REAL_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-real-mainnet-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./arb/real/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.real-mainnet-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/real-mainnet-archive
- traefik.http.services.real-mainnet-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/real-mainnet-archive`) || Path(`/real-mainnet-archive/`))}
- ${NO_SSL:+traefik.http.routers.real-mainnet-nitro-archive-leveldb-hash.rule=Path(`/real-mainnet-archive`) || Path(`/real-mainnet-archive/`)}
- traefik.http.routers.real-mainnet-nitro-archive-leveldb-hash.middlewares=real-mainnet-nitro-archive-leveldb-hash-stripprefix, ipallowlist
volumes:
real-mainnet-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: real
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,143 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/real-mainnet-nitro-archive-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/real-mainnet-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
real-mainnet-archive:
image: ${REAL_NITRO_IMAGE:-offchainlabs/nitro-node}:${REAL_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.forwarding-target=https://rpc.realforreal.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.realforreal.gelato.digital
- --node.data-availability.sequencer-inbox-address=0x466813324240923703236721233648302990016039913376
- --node.feed.input.url=wss://feed.realforreal.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/real-mainnet-archive
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${REAL_MAINNET_NITRO_ARCHIVE_PEBBLE_HASH_DATA:-real-mainnet-nitro-archive-pebble-hash}:/root/.arbitrum
- ./arb/real/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.real-mainnet-nitro-archive-pebble-hash-stripprefix.stripprefix.prefixes=/real-mainnet-archive
- traefik.http.services.real-mainnet-nitro-archive-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-archive-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-archive-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-archive-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/real-mainnet-archive`) || Path(`/real-mainnet-archive/`))}
- ${NO_SSL:+traefik.http.routers.real-mainnet-nitro-archive-pebble-hash.rule=Path(`/real-mainnet-archive`) || Path(`/real-mainnet-archive/`)}
- traefik.http.routers.real-mainnet-nitro-archive-pebble-hash.middlewares=real-mainnet-nitro-archive-pebble-hash-stripprefix, ipallowlist
volumes:
real-mainnet-nitro-archive-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: real
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,146 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/real-mainnet-nitro-pruned-pebble-path.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/real-mainnet \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
real-mainnet:
image: ${REAL_NITRO_IMAGE:-offchainlabs/nitro-node}:${REAL_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=false
- --execution.caching.state-scheme=path
- --execution.forwarding-target=https://rpc.realforreal.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.realforreal.gelato.digital
- --node.data-availability.sequencer-inbox-address=0x466813324240923703236721233648302990016039913376
- --node.feed.input.url=wss://feed.realforreal.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/real-mainnet
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${REAL_MAINNET_NITRO_PRUNED_PEBBLE_PATH_DATA:-real-mainnet-nitro-pruned-pebble-path}:/root/.arbitrum
- ./arb/real/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.real-mainnet-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/real-mainnet
- traefik.http.services.real-mainnet-nitro-pruned-pebble-path.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-pruned-pebble-path.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-pruned-pebble-path.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && (Path(`/real-mainnet`) || Path(`/real-mainnet/`))}
- ${NO_SSL:+traefik.http.routers.real-mainnet-nitro-pruned-pebble-path.rule=Path(`/real-mainnet`) || Path(`/real-mainnet/`)}
- traefik.http.routers.real-mainnet-nitro-pruned-pebble-path.middlewares=real-mainnet-nitro-pruned-pebble-path-stripprefix, ipallowlist
volumes:
real-mainnet-nitro-pruned-pebble-path:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: real
method-groups:
enabled:
- debug
- filter
methods:
disabled:
# not compatible with path state scheme
- name: debug_traceBlockByHash
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -1,6 +0,0 @@
{
"chain": {
"info-json": "[{\"chain-id\":656476,\"parent-chain-id\":421614,\"parent-chain-is-arbitrum\":true,\"chain-name\":\"Codex\",\"chain-config\":{\"homesteadBlock\":0,\"daoForkBlock\":null,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"berlinBlock\":0,\"londonBlock\":0,\"clique\":{\"period\":0,\"epoch\":0},\"arbitrum\":{\"EnableArbOS\":true,\"AllowDebugPrecompiles\":false,\"DataAvailabilityCommittee\":true,\"InitialArbOSVersion\":20,\"GenesisBlockNum\":0,\"MaxCodeSize\":24576,\"MaxInitCodeSize\":49152,\"InitialChainOwner\":\"0xF46B08D9E85df74b6f24Ad85A6a655c02857D5b8\"},\"chainId\":656476},\"rollup\":{\"bridge\":\"0xbf3D64671154D1FB0b27Cb1decbE1094d7016448\",\"inbox\":\"0x67F231eDC83a66556148673863e73D705422A678\",\"sequencer-inbox\":\"0xd5131c1924f080D45CA3Ae97262c0015F675004b\",\"rollup\":\"0x0A94003d3482128c89395aBd94a41DA8eeBB59f7\",\"validator-utils\":\"0xB11EB62DD2B352886A4530A9106fE427844D515f\",\"validator-wallet-creator\":\"0xEb9885B6c0e117D339F47585cC06a2765AaE2E0b\",\"deployed-at\":41549214}}]",
"name": "Codex"
}
}

View File

@@ -1,6 +0,0 @@
{
"chain": {
"info-json": "[{\"chain-id\":1829,\"parent-chain-id\":42170,\"parent-chain-is-arbitrum\":true,\"chain-name\":\"Playblock\",\"chain-config\":{\"homesteadBlock\":0,\"daoForkBlock\":null,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"berlinBlock\":0,\"londonBlock\":0,\"clique\":{\"period\":0,\"epoch\":0},\"arbitrum\":{\"EnableArbOS\":true,\"AllowDebugPrecompiles\":false,\"DataAvailabilityCommittee\":true,\"InitialArbOSVersion\":11,\"GenesisBlockNum\":0,\"MaxCodeSize\":24576,\"MaxInitCodeSize\":49152,\"InitialChainOwner\":\"0x10Fe3cb853F7ef551E1598d91436e95d41Aea45a\"},\"chainId\":1829},\"rollup\":{\"bridge\":\"0xD4FE46D2533E7d03382ac6cACF0547F336e59DC0\",\"inbox\":\"0xFF55fB76F5671dD9eB6c62EffF8D693Bb161a3ad\",\"sequencer-inbox\":\"0xe347C1223381b9Dcd6c0F61cf81c90175A7Bae77\",\"rollup\":\"0x04ea347cC6A258A7F65D67aFb60B1d487062A1d0\",\"validator-utils\":\"0x6c21303F5986180B1394d2C89f3e883890E2867b\",\"validator-wallet-creator\":\"0x2b0E04Dc90e3fA58165CB41E2834B44A56E766aF\",\"deployed-at\":55663578}}]",
"name": "Playblock"
}
}

View File

@@ -1,6 +0,0 @@
{
"chain": {
"info-json": "[{\"chain-id\":111188,\"parent-chain-id\":1,\"parent-chain-is-arbitrum\":false,\"chain-name\":\"real\",\"chain-config\":{\"homesteadBlock\":0,\"daoForkBlock\":null,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"berlinBlock\":0,\"londonBlock\":0,\"clique\":{\"period\":0,\"epoch\":0},\"arbitrum\":{\"EnableArbOS\":true,\"AllowDebugPrecompiles\":false,\"DataAvailabilityCommittee\":true,\"InitialArbOSVersion\":11,\"GenesisBlockNum\":0,\"MaxCodeSize\":24576,\"MaxInitCodeSize\":49152,\"InitialChainOwner\":\"0xbB0385FebfD25E01527617938129A34bD497331e\"},\"chainId\":111188},\"rollup\":{\"bridge\":\"0x39D2EEcC8B55f46aE64789E2494dE777cDDeED03\",\"inbox\":\"0xf538671ddd60eE54BdD6FBb0E309c491A7A2df11\",\"sequencer-inbox\":\"0x51C4a227D59E49E26Ea07D8e4E9Af163da4c87A0\",\"rollup\":\"0xc4F7B37bE2bBbcF07373F28c61b1A259dfe49d2a\",\"validator-utils\":\"0x2b0E04Dc90e3fA58165CB41E2834B44A56E766aF\",\"validator-wallet-creator\":\"0x9CAd81628aB7D8e239F1A5B497313341578c5F71\",\"deployed-at\":19446518}}]",
"name": "real"
}
}

View File

@@ -1 +0,0 @@
arb/nitro/arbitrum-nova-nitro-pruned-pebble-hash.yml

View File

@@ -1,45 +0,0 @@
services:
arbitrum-classic:
image: 'offchainlabs/arb-node:v1.4.5-e97c1a4'
stop_grace_period: 30s
user: root
volumes:
- ${ARBITRUM_ONE_MAINNET_ARBNODE_ARCHIVE_TRACE_DATA:-arbitrum-one-mainnet-arbnode-archive-trace}:/data
- ./arbitrum/classic-entrypoint.sh:/entrypoint.sh
expose:
- 8547
- 8548
entrypoint: ["/home/user/go/bin/arb-node"]
command:
- --l1.url=http://eth.drpc.org
- --core.checkpoint-gas-frequency=156250000
- --node.rpc.enable-l1-calls
- --node.cache.allow-slow-lookup
- --node.rpc.tracing.enable
- --node.rpc.addr=0.0.0.0
- --node.rpc.port=8547
- --node.rpc.tracing.namespace=trace
- --node.chain-id=42161
- --node.ws.addr=0.0.0.0
- --node.ws.port=8548
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=7070
- --l2.disable-upstream
- --persistent.chain=/data/datadir/
- --persistent.global-config=/data/
restart: unless-stopped
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.arbitrum-one-mainnet-arbnode-archive-trace-stripprefix.stripprefix.prefixes=/arbitrum-classic"
- "traefik.http.services.arbitrum-one-mainnet-arbnode-archive-trace.loadbalancer.server.port=8547"
- "${NO_SSL:-traefik.http.routers.arbitrum-one-mainnet-arbnode-archive-trace.entrypoints=websecure}"
- "${NO_SSL:-traefik.http.routers.arbitrum-one-mainnet-arbnode-archive-trace.tls.certresolver=myresolver}"
- "${NO_SSL:-traefik.http.routers.arbitrum-one-mainnet-arbnode-archive-trace.rule=Host(`$DOMAIN`) && PathPrefix(`/arbitrum-classic`)}"
- "${NO_SSL:+traefik.http.routers.arbitrum-one-mainnet-arbnode-archive-trace.rule=PathPrefix(`/arbitrum-classic`)}"
- "traefik.http.routers.arbitrum-one-mainnet-arbnode-archive-trace.middlewares=arbitrum-one-mainnet-arbnode-archive-trace-stripprefix, ipwhitelist"
networks:
- chains
volumes:
arbitrum-one-mainnet-arbnode-archive-trace:

View File

@@ -1 +0,0 @@
arb/nitro/alephzero-mainnet-nitro-archive-leveldb-hash.yml

View File

@@ -1 +0,0 @@
arb/nitro/arbitrum-one-nitro-pruned-pebble-hash.yml

View File

@@ -1 +0,0 @@
arb/nitro/arbitrum-sepolia-nitro-archive-pebble-hash.yml

View File

@@ -1 +0,0 @@
arb/nitro/arbitrum-sepolia-nitro-pruned-pebble-hash.yml

View File

@@ -1 +0,0 @@
arb/nitro/arbitrum-sepolia-nitro-pruned-pebble-hash.yml

View File

@@ -1 +0,0 @@
avalanche/go/avalanche-fuji-go-pruned-pebbledb.yml

View File

@@ -1 +0,0 @@
avalanche/go/avalanche-mainnet-go-archive-leveldb.yml

View File

@@ -1,51 +0,0 @@
services:
avalanche-archive-client:
image: avaplatform/avalanchego:${AVALANCHEGO_VERSION:-v1.12.2}
ulimits:
nofile: 1048576
expose:
- "9650"
- "30720"
ports:
- "30720:30720/tcp"
- "30720:30720/udp"
volumes:
- ${AVALANCHE_MAINNET_GO_ARCHIVE_DATA:-avalanche-mainnet-go-archive}:/root/.avalanchego
- ./avalanche/configs/chains/C/archive-config.json:/root/.avalanchego/configs/chains/C/config.json
environment:
- "IP=${IP}"
networks:
- chains
command: "/avalanchego/build/avalanchego --http-host= --http-allowed-hosts=* --staking-port=30720 --public-ip=$IP"
restart: unless-stopped
avalanche-archive:
restart: unless-stopped
image: nginx
depends_on:
- avalanche-archive-client
expose:
- 80
environment:
PROXY_HOST: avalanche-archive-client
RPC_PORT: 9650
RPC_PATH: /ext/bc/C/rpc
WS_PORT: 9650
WS_PATH: /ext/bc/C/ws
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.avalanche-mainnet-go-archive-stripprefix.stripprefix.prefixes=/avalanche-archive"
- "traefik.http.services.avalanche-mainnet-go-archive.loadbalancer.server.port=80"
- "${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive.entrypoints=websecure}"
- "${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive.tls.certresolver=myresolver}"
- "${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive.rule=Host(`$DOMAIN`) && PathPrefix(`/avalanche-archive`)}"
- "${NO_SSL:+traefik.http.routers.avalanche-mainnet-go-archive.rule=PathPrefix(`/avalanche-archive`)}"
- "traefik.http.routers.avalanche-mainnet-go-archive.middlewares=avalanche-mainnet-go-archive-stripprefix, ipwhitelist"
volumes:
avalanche-mainnet-go-archive:

View File

@@ -1 +0,0 @@
avalanche/go/avalanche-mainnet-go-pruned-pebbledb.yml

View File

@@ -1,4 +0,0 @@
{
"state-sync-enabled": false,
"pruning-enabled": false
}

View File

@@ -1,43 +0,0 @@
{
"snowman-api-enabled": false,
"coreth-admin-api-enabled": false,
"coreth-admin-api-dir": "",
"eth-apis": [
"public-eth",
"public-eth-filter",
"net",
"web3",
"internal-public-eth",
"internal-public-blockchain",
"internal-public-transaction-pool",
"internal-public-account"
],
"continuous-profiler-dir": "",
"continuous-profiler-frequency": 900000000000,
"continuous-profiler-max-files": 5,
"rpc-gas-cap": 50000000,
"rpc-tx-fee-cap": 100,
"preimages-enabled": false,
"pruning-enabled": true,
"snapshot-async": true,
"snapshot-verification-enabled": false,
"metrics-enabled": false,
"metrics-expensive-enabled": false,
"local-txs-enabled": false,
"api-max-duration": 0,
"ws-cpu-refill-rate": 0,
"ws-cpu-max-stored": 0,
"api-max-blocks-per-request": 0,
"allow-unfinalized-queries": false,
"allow-unprotected-txs": false,
"keystore-directory": "",
"keystore-external-signer": "",
"keystore-insecure-unlock-allowed": false,
"remote-tx-gossip-only-enabled": false,
"tx-regossip-frequency": 60000000000,
"tx-regossip-max-size": 15,
"log-level": "debug",
"offline-pruning-enabled": false,
"offline-pruning-bloom-filter-size": 512,
"offline-pruning-data-directory": ""
}

View File

@@ -1,6 +0,0 @@
{
"state-sync-enabled": true,
"pruning-enabled": true,
"offline-pruning-enabled": true,
"offline-pruning-data-directory": "/root/.avalanchego/offline-pruning"
}

View File

@@ -1,7 +0,0 @@
{
"state-sync-enabled": true,
"pruning-enabled": true,
"offline-pruning-enabled": false,
"offline-pruning-data-directory": "/root/.avalanchego/offline-pruning",
"rpc-gas-cap": 600000000
}

View File

@@ -1,4 +0,0 @@
{
"state-sync-enabled": false,
"pruning-enabled": false
}

View File

@@ -1,128 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:avalanche/go/avalanche-fuji-go-archive-leveldb.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/avalanche-fuji-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
avalanche-fuji-archive-client:
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_FUJI_GO_VERSION:-v1.13.0-fuji}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
ports:
- 10046:10046
- 10046:10046/udp
expose:
- 9650
- 9650
entrypoint: [/avalanchego/build/avalanchego]
command:
- --chain-config-dir=/config/archive
- --db-type=leveldb
- --http-allowed-hosts=*
- --http-host=
- --network-id=fuji
- --public-ip=${IP}
- --staking-port=10046
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${AVALANCHE_FUJI_GO_ARCHIVE_LEVELDB_DATA:-avalanche-fuji-go-archive-leveldb}:/root/.avalanchego
- ./avalanche/fuji:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
avalanche-fuji-archive:
image: nginx
expose:
- '80'
environment:
PROXY_HOST: avalanche-fuji-archive-client
RPC_PATH: /ext/bc/C/rpc
RPC_PORT: 9650
WS_PATH: /ext/bc/C/ws
WS_PORT: 9650
restart: unless-stopped
depends_on:
- avalanche-fuji-archive-client
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.avalanche-fuji-go-archive-leveldb-stripprefix.stripprefix.prefixes=/avalanche-fuji-archive
- traefik.http.services.avalanche-fuji-go-archive-leveldb.loadbalancer.server.port=80
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-archive-leveldb.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-archive-leveldb.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-archive-leveldb.rule=Host(`$DOMAIN`) && (Path(`/avalanche-fuji-archive`) || Path(`/avalanche-fuji-archive/`))}
- ${NO_SSL:+traefik.http.routers.avalanche-fuji-go-archive-leveldb.rule=Path(`/avalanche-fuji-archive`) || Path(`/avalanche-fuji-archive/`)}
- traefik.http.routers.avalanche-fuji-go-archive-leveldb.middlewares=avalanche-fuji-go-archive-leveldb-stripprefix, ipallowlist
volumes:
avalanche-fuji-go-archive-leveldb:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: avalanche
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -1,128 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:avalanche/go/avalanche-fuji-go-pruned-pebbledb.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/avalanche-fuji \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
avalanche-fuji-client:
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_FUJI_GO_VERSION:-v1.13.0-fuji}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
ports:
- 10350:10350
- 10350:10350/udp
expose:
- 9650
- 9650
entrypoint: [/avalanchego/build/avalanchego]
command:
- --chain-config-dir=/config/pruned
- --db-type=pebbledb
- --http-allowed-hosts=*
- --http-host=
- --network-id=fuji
- --public-ip=${IP}
- --staking-port=10350
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${AVALANCHE_FUJI_GO_PRUNED_PEBBLEDB_DATA:-avalanche-fuji-go-pruned-pebbledb}:/root/.avalanchego
- ./avalanche/fuji:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
avalanche-fuji:
image: nginx
expose:
- '80'
environment:
PROXY_HOST: avalanche-fuji-client
RPC_PATH: /ext/bc/C/rpc
RPC_PORT: 9650
WS_PATH: /ext/bc/C/ws
WS_PORT: 9650
restart: unless-stopped
depends_on:
- avalanche-fuji-client
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.avalanche-fuji-go-pruned-pebbledb-stripprefix.stripprefix.prefixes=/avalanche-fuji
- traefik.http.services.avalanche-fuji-go-pruned-pebbledb.loadbalancer.server.port=80
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.rule=Host(`$DOMAIN`) && (Path(`/avalanche-fuji`) || Path(`/avalanche-fuji/`))}
- ${NO_SSL:+traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.rule=Path(`/avalanche-fuji`) || Path(`/avalanche-fuji/`)}
- traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.middlewares=avalanche-fuji-go-pruned-pebbledb-stripprefix, ipallowlist
volumes:
avalanche-fuji-go-pruned-pebbledb:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: avalanche
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -1,128 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:avalanche/go/avalanche-mainnet-go-archive-leveldb.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/avalanche-mainnet-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
avalanche-mainnet-archive-client:
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_MAINNET_GO_VERSION:-v1.13.0}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
ports:
- 12934:12934
- 12934:12934/udp
expose:
- 9650
- 9650
entrypoint: [/avalanchego/build/avalanchego]
command:
- --chain-config-dir=/config/archive
- --db-type=leveldb
- --http-allowed-hosts=*
- --http-host=
- --network-id=mainnet
- --public-ip=${IP}
- --staking-port=12934
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${AVALANCHE_MAINNET_GO_ARCHIVE_LEVELDB_DATA:-avalanche-mainnet-go-archive-leveldb}:/root/.avalanchego
- ./avalanche/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
avalanche-mainnet-archive:
image: nginx
expose:
- '80'
environment:
PROXY_HOST: avalanche-mainnet-archive-client
RPC_PATH: /ext/bc/C/rpc
RPC_PORT: 9650
WS_PATH: /ext/bc/C/ws
WS_PORT: 9650
restart: unless-stopped
depends_on:
- avalanche-mainnet-archive-client
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.avalanche-mainnet-go-archive-leveldb-stripprefix.stripprefix.prefixes=/avalanche-mainnet-archive
- traefik.http.services.avalanche-mainnet-go-archive-leveldb.loadbalancer.server.port=80
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive-leveldb.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive-leveldb.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive-leveldb.rule=Host(`$DOMAIN`) && (Path(`/avalanche-mainnet-archive`) || Path(`/avalanche-mainnet-archive/`))}
- ${NO_SSL:+traefik.http.routers.avalanche-mainnet-go-archive-leveldb.rule=Path(`/avalanche-mainnet-archive`) || Path(`/avalanche-mainnet-archive/`)}
- traefik.http.routers.avalanche-mainnet-go-archive-leveldb.middlewares=avalanche-mainnet-go-archive-leveldb-stripprefix, ipallowlist
volumes:
avalanche-mainnet-go-archive-leveldb:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: avalanche
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -1,128 +0,0 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:avalanche/go/avalanche-mainnet-go-pruned-pebbledb.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/avalanche-mainnet \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
avalanche-mainnet-client:
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_MAINNET_GO_VERSION:-v1.13.0}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
ports:
- 11929:11929
- 11929:11929/udp
expose:
- 9650
- 9650
entrypoint: [/avalanchego/build/avalanchego]
command:
- --chain-config-dir=/config/pruned
- --db-type=pebbledb
- --http-allowed-hosts=*
- --http-host=
- --network-id=mainnet
- --public-ip=${IP}
- --staking-port=11929
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${AVALANCHE_MAINNET_GO_PRUNED_PEBBLEDB_DATA:-avalanche-mainnet-go-pruned-pebbledb}:/root/.avalanchego
- ./avalanche/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
avalanche-mainnet:
image: nginx
expose:
- '80'
environment:
PROXY_HOST: avalanche-mainnet-client
RPC_PATH: /ext/bc/C/rpc
RPC_PORT: 9650
WS_PATH: /ext/bc/C/ws
WS_PORT: 9650
restart: unless-stopped
depends_on:
- avalanche-mainnet-client
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.avalanche-mainnet-go-pruned-pebbledb-stripprefix.stripprefix.prefixes=/avalanche-mainnet
- traefik.http.services.avalanche-mainnet-go-pruned-pebbledb.loadbalancer.server.port=80
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.rule=Host(`$DOMAIN`) && (Path(`/avalanche-mainnet`) || Path(`/avalanche-mainnet/`))}
- ${NO_SSL:+traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.rule=Path(`/avalanche-mainnet`) || Path(`/avalanche-mainnet/`)}
- traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.middlewares=avalanche-mainnet-go-pruned-pebbledb-stripprefix, ipallowlist
volumes:
avalanche-mainnet-go-pruned-pebbledb:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: avalanche
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -1,4 +0,0 @@
{
"state-sync-enabled": false,
"pruning-enabled": false
}

View File

@@ -1,35 +0,0 @@
services:
backup-http:
image: abassi/node-http-server:latest
restart: unless-stopped
volumes:
- /backup:/dir_to_serve
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.backup-server-stripprefix.stripprefix.prefixes=/backup"
- "traefik.http.services.backup-server.loadbalancer.server.port=8080"
- "traefik.http.routers.backup-server.entrypoints=websecure"
- "traefik.http.routers.backup-server.tls.certresolver=myresolver"
- "traefik.http.routers.backup-server.rule=Host(`$DOMAIN`) && PathPrefix(`/backup`)"
- "traefik.http.routers.backup-server.middlewares=backup-server-stripprefix"
networks:
- chains
backup-dav:
image: 117503445/go_webdav:latest
restart: unless-stopped
environment:
- "dav=/null,/webdav,null,null,false"
volumes:
- /backup:/webdav
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.backup-storage-stripprefix.stripprefix.prefixes=/dav"
- "traefik.http.services.backup-storage.loadbalancer.server.port=80"
- "traefik.http.routers.backup-storage.entrypoints=websecure"
- "traefik.http.routers.backup-storage.tls.certresolver=myresolver"
- "traefik.http.routers.backup-storage.rule=Host(`$DOMAIN`) && PathPrefix(`/dav`)"
- "traefik.http.routers.backup-storage.middlewares=backup-storage-stripprefix"
networks:
- chains

View File

@@ -1,51 +0,0 @@
#!/bin/bash
backup_dir="/backup"
if [[ -n $2 ]]; then
echo "upload backup via webdav to $2"
else
if [ ! -d "$backup_dir" ]; then
echo "Error: /backup directory does not exist"
exit 1
fi
fi
# Read the JSON input and extract the list of keys
keys=$(cat /root/rpc/$1.yml | yaml2json - | jq '.volumes' | jq -r 'keys[]')
# Iterate over the list of keys
for key in $keys; do
echo "Executing command with key: /var/lib/docker/volumes/rpc_$key/_data"
source_folder="/var/lib/docker/volumes/rpc_$key/_data"
folder_size=$(du -shL "$source_folder" | awk '{
size = $1
sub(/[Kk]$/, "", size) # Remove 'K' suffix if present
sub(/[Mm]$/, "", size) # Remove 'M' suffix if present
sub(/[Gg]$/, "", size) # Remove 'G' suffix if present
sub(/[Tt]$/, "", size) # Remove 'T' suffix if present
if ($1 ~ /[Kk]$/) {
size *= 0.001 # Convert kilobytes to gigabytes
} else if ($1 ~ /[Mm]$/) {
size *= 0.001 # Convert megabytes to gigabytes
} else if ($1 ~ /[Tt]$/) {
size *= 1000 # convert terabytes to gigabytes
}
print size
}')
folder_size_gb=$(printf "%.0f" "$folder_size")
target_file="rpc_$key-$(date +'%Y-%m-%d-%H-%M-%S')-${folder_size_gb}G.tar.zst"
#echo "$target_file"
if [[ -n $2 ]]; then
tar -cf - --dereference "$source_folder" | pv -pterb -s $(du -sb "$source_folder" | awk '{print $1}') | zstd | curl -X PUT --upload-file - "$2/null/uploading-$target_file"
curl -X MOVE -H "Destination: /null/$target_file" "$2/null/uploading-$target_file"
else
tar -cf - --dereference "$source_folder" | pv -pterb -s $(du -sb "$source_folder" | awk '{print $1}') | zstd -o "/backup/uploading-$target_file"
mv "/backup/uploading-$target_file" "/backup/$target_file"
fi
done

View File

@@ -1 +0,0 @@
op/erigon/base-mainnet-op-erigon-archive-trace.yml

View File

@@ -1 +0,0 @@
op/reth/base-mainnet-op-reth-archive-trace.yml

View File

@@ -1 +0,0 @@
op/reth/base-mainnet-op-reth-pruned-trace.yml

View File

@@ -1 +0,0 @@
op/geth/base-mainnet-op-geth-pruned-pebble-path.yml

View File

@@ -1 +0,0 @@
op/reth/base-sepolia-op-reth-pruned-trace.yml

View File

@@ -1 +0,0 @@
op/geth/base-sepolia-op-geth-pruned-pebble-path.yml

View File

@@ -1,6 +0,0 @@
networks:
chains:
driver: bridge
ipam:
config:
- subnet: ${CHAINS_SUBNET:-192.168.0.0/26}

View File

@@ -1,24 +0,0 @@
services:
benchmark-proxy:
build:
context: ./benchmark-proxy
dockerfile: Dockerfile
expose:
- "8080"
environment:
- LISTEN_ADDR=:8080
- SUMMARY_INTERVAL=60
- PRIMARY_BACKEND=${BENCHMARK_PROXY_PRIMARY_BACKEND}
- SECONDARY_BACKENDS=${BENCHMARK_PROXY_SECONDARY_BACKENDS}
restart: unless-stopped
networks:
- chains
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.benchmark-proxy-stripprefix.stripprefix.prefixes=/benchmark"
- "traefik.http.services.benchmark-proxy.loadbalancer.server.port=8080"
- "${NO_SSL:-traefik.http.routers.benchmark-proxy.entrypoints=websecure}"
- "${NO_SSL:-traefik.http.routers.benchmark-proxy.tls.certresolver=myresolver}"
- "${NO_SSL:-traefik.http.routers.benchmark-proxy.rule=Host(`$DOMAIN`) && PathPrefix(`/benchmark`)}"
- "${NO_SSL:+traefik.http.routers.benchmark-proxy.rule=PathPrefix(`/benchmark`)}"
- "traefik.http.routers.benchmark-proxy.middlewares=benchmark-proxy-stripprefix, ipwhitelist"

View File

@@ -1,27 +0,0 @@
# Build stage
FROM golang:1.21-alpine AS builder
WORKDIR /app
# Initialize Go modules if not already done
RUN go mod init benchmark-proxy
# Add the dependency before building
RUN go get github.com/gorilla/websocket
# Copy source code
COPY . .
# Build the application with CGO disabled for a static binary
RUN CGO_ENABLED=0 go build -ldflags="-s -w" -o benchmark-proxy main.go
# Runtime stage (if you're using a multi-stage build)
FROM alpine:latest
WORKDIR /app
# Copy the binary from the build stage
COPY --from=builder /app/benchmark-proxy .
# Run the application
ENTRYPOINT ["./benchmark-proxy"]

View File

@@ -1,7 +0,0 @@
module benchmark-proxy
go 1.21
require github.com/gorilla/websocket v1.5.1
require golang.org/x/net v0.17.0 // indirect

View File

@@ -1,4 +0,0 @@
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=

File diff suppressed because it is too large Load Diff

View File

@@ -1,22 +0,0 @@
[package]
name = "benchmark_proxy_rust"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
tokio = { version = "1", features = ["full"] }
hyper = { version = "0.14", features = ["full"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
tokio-tungstenite = { version = "0.21", features = ["native-tls"] }
log = "0.4"
env_logger = "0.10"
dashmap = "5.5"
reqwest = { version = "0.11", features = ["json", "rustls-tls"], default-features = false }
thiserror = "1.0"
futures-util = "0.3"
http = "0.2"
url = "2.5"
lazy_static = "1.4.0"

View File

@@ -1,343 +0,0 @@
use crate::{config::AppConfig, structures::Backend};
use dashmap::DashMap;
use futures_util::{stream::SplitSink, SinkExt, StreamExt};
use log::{debug, error, info, warn};
use serde_json::json;
use std::{
collections::HashMap,
sync::{Arc, Mutex},
time::{Duration, SystemTime},
};
use tokio::{
net::TcpStream,
sync::watch,
task::JoinHandle,
time::sleep,
};
use tokio_tungstenite::{
connect_async,
tungstenite::{protocol::Message as TungsteniteMessage, Error as TungsteniteError},
MaybeTlsStream, WebSocketStream,
};
use url::Url;
const RECONNECT_DELAY: Duration = Duration::from_secs(10);
#[derive(serde::Deserialize, Debug)]
struct SubscriptionMessage {
#[allow(dead_code)] // May not be used if only checking method
jsonrpc: Option<String>,
method: Option<String>,
params: Option<SubscriptionParams>,
result: Option<serde_json::Value>, // For subscription ID confirmation
id: Option<serde_json::Value>, // For request echo
}
#[derive(serde::Deserialize, Debug)]
struct SubscriptionParams {
subscription: String,
result: HeaderData,
}
#[derive(serde::Deserialize, Debug)]
struct HeaderData {
number: String, // Hex string like "0x123"
// Add other fields like "hash" if ever needed for more advanced logic
}
pub struct BlockHeightTracker {
config: Arc<AppConfig>,
backends: Vec<Backend>,
block_heights: Arc<DashMap<String, u64>>,
last_update_times: Arc<DashMap<String, SystemTime>>,
shutdown_tx: watch::Sender<bool>,
tasks: Arc<Mutex<Vec<JoinHandle<()>>>>,
enable_detailed_logs: bool,
}
impl BlockHeightTracker {
pub fn new(
config: Arc<AppConfig>,
all_backends: &[Backend],
) -> Option<Arc<Self>> {
if !config.enable_block_height_tracking {
info!("BlockHeightTracker disabled by configuration.");
return None;
}
info!("Initializing BlockHeightTracker for {} backends.", all_backends.len());
let (shutdown_tx, _shutdown_rx) = watch::channel(false); // _shutdown_rx cloned by tasks
Some(Arc::new(Self {
config: config.clone(),
backends: all_backends.to_vec(), // Clones the slice into a Vec
block_heights: Arc::new(DashMap::new()),
last_update_times: Arc::new(DashMap::new()),
shutdown_tx,
tasks: Arc::new(Mutex::new(Vec::new())),
enable_detailed_logs: config.enable_detailed_logs,
}))
}
pub fn start_monitoring(self: Arc<Self>) {
if self.backends.is_empty() {
info!("BHT: No backends configured for monitoring.");
return;
}
info!("BHT: Starting block height monitoring for {} backends.", self.backends.len());
let mut tasks_guard = self.tasks.lock().unwrap();
for backend in self.backends.clone() {
// Only monitor if backend has a URL, primarily for non-primary roles or specific needs
// For this implementation, we assume all backends in the list are candidates.
let task_self = self.clone();
let task_backend = backend.clone(); // Clone backend for the task
let task_shutdown_rx = self.shutdown_tx.subscribe();
let task = tokio::spawn(async move {
task_self
.monitor_backend_connection(task_backend, task_shutdown_rx)
.await;
});
tasks_guard.push(task);
}
}
async fn monitor_backend_connection(
self: Arc<Self>,
backend: Backend,
mut shutdown_rx: watch::Receiver<bool>,
) {
info!("BHT: Starting monitoring for backend: {}", backend.name);
loop { // Outer reconnect loop
tokio::select! {
biased;
_ = shutdown_rx.changed() => {
if *shutdown_rx.borrow() {
info!("BHT: Shutdown signal received for {}, terminating monitoring.", backend.name);
break; // Break outer reconnect loop
}
}
_ = tokio::time::sleep(Duration::from_millis(10)) => { // Give a chance for shutdown signal before attempting connection
// Proceed to connection attempt
}
}
if *shutdown_rx.borrow() { break; }
let mut ws_url = backend.url.clone();
let scheme = if backend.url.scheme() == "https" { "wss" } else { "ws" };
if let Err(_e) = ws_url.set_scheme(scheme) {
error!("BHT: Failed to set scheme to {} for backend {}: {}", scheme, backend.name, backend.url);
sleep(RECONNECT_DELAY).await;
continue;
}
if self.enable_detailed_logs {
debug!("BHT: Attempting to connect to {} for backend {}", ws_url, backend.name);
}
match connect_async(ws_url.clone()).await {
Ok((ws_stream, _response)) => {
if self.enable_detailed_logs {
info!("BHT: Successfully connected to WebSocket for backend: {}", backend.name);
}
let (mut write, mut read) = ws_stream.split();
let subscribe_payload = json!({
"jsonrpc": "2.0",
"method": "eth_subscribe",
"params": ["newHeads"],
"id": 1 // Static ID for this subscription
});
if let Err(e) = write.send(TungsteniteMessage::Text(subscribe_payload.to_string())).await {
error!("BHT: Failed to send eth_subscribe to {}: {}. Retrying connection.", backend.name, e);
// Connection will be retried by the outer loop after delay
sleep(RECONNECT_DELAY).await;
continue;
}
if self.enable_detailed_logs {
debug!("BHT: Sent eth_subscribe payload to {}", backend.name);
}
// Inner message reading loop
loop {
tokio::select! {
biased;
_ = shutdown_rx.changed() => {
if *shutdown_rx.borrow() {
info!("BHT: Shutdown signal for {}, closing WebSocket and stopping.", backend.name);
// Attempt to close the WebSocket gracefully
let _ = write.send(TungsteniteMessage::Close(None)).await;
break; // Break inner message_read_loop
}
}
maybe_message = read.next() => {
match maybe_message {
Some(Ok(message)) => {
match message {
TungsteniteMessage::Text(text_msg) => {
if self.enable_detailed_logs {
debug!("BHT: Received text from {}: {}", backend.name, text_msg);
}
match serde_json::from_str::<SubscriptionMessage>(&text_msg) {
Ok(parsed_msg) => {
if parsed_msg.method.as_deref() == Some("eth_subscription") {
if let Some(params) = parsed_msg.params {
let block_num_str = params.result.number;
match u64::from_str_radix(block_num_str.trim_start_matches("0x"), 16) {
Ok(block_num) => {
self.block_heights.insert(backend.name.clone(), block_num);
self.last_update_times.insert(backend.name.clone(), SystemTime::now());
if self.enable_detailed_logs {
debug!("BHT: Updated block height for {}: {} (raw: {})", backend.name, block_num, block_num_str);
}
}
Err(e) => error!("BHT: Failed to parse block number hex '{}' for {}: {}", block_num_str, backend.name, e),
}
}
} else if parsed_msg.id == Some(json!(1)) && parsed_msg.result.is_some() {
if self.enable_detailed_logs {
info!("BHT: Received subscription confirmation from {}: {:?}", backend.name, parsed_msg.result);
}
} else {
if self.enable_detailed_logs {
debug!("BHT: Received other JSON message from {}: {:?}", backend.name, parsed_msg);
}
}
}
Err(e) => {
if self.enable_detailed_logs {
warn!("BHT: Failed to parse JSON from {}: {}. Message: {}", backend.name, e, text_msg);
}
}
}
}
TungsteniteMessage::Binary(bin_msg) => {
if self.enable_detailed_logs {
debug!("BHT: Received binary message from {} ({} bytes), ignoring.", backend.name, bin_msg.len());
}
}
TungsteniteMessage::Ping(ping_data) => {
if self.enable_detailed_logs { debug!("BHT: Received Ping from {}, sending Pong.", backend.name); }
// tokio-tungstenite handles Pongs automatically by default if feature "rustls-pong" or "native-tls-pong" is enabled.
// If not, manual send:
// if let Err(e) = write.send(TungsteniteMessage::Pong(ping_data)).await {
// error!("BHT: Failed to send Pong to {}: {}", backend.name, e);
// break; // Break inner loop, connection might be unstable
// }
}
TungsteniteMessage::Pong(_) => { /* Usually no action needed */ }
TungsteniteMessage::Close(_) => {
if self.enable_detailed_logs { info!("BHT: WebSocket closed by server for {}.", backend.name); }
break; // Break inner loop
}
TungsteniteMessage::Frame(_) => { /* Raw frame, usually not handled directly */ }
}
}
Some(Err(e)) => {
match e {
TungsteniteError::ConnectionClosed | TungsteniteError::AlreadyClosed => {
if self.enable_detailed_logs { info!("BHT: WebSocket connection closed for {}.", backend.name); }
}
_ => {
error!("BHT: Error reading from WebSocket for {}: {:?}. Attempting reconnect.", backend.name, e);
}
}
break; // Break inner loop, will trigger reconnect
}
None => {
if self.enable_detailed_logs { info!("BHT: WebSocket stream ended for {}. Attempting reconnect.", backend.name); }
break; // Break inner loop, will trigger reconnect
}
}
}
} // End of inner select
if *shutdown_rx.borrow() { break; } // Ensure inner loop breaks if shutdown occurred
} // End of inner message reading loop
}
Err(e) => {
warn!("BHT: Failed to connect to WebSocket for backend {}: {:?}. Retrying after delay.", backend.name, e);
}
}
// If we are here, it means the connection was dropped or failed. Wait before retrying.
if !*shutdown_rx.borrow() { // Don't sleep if shutting down
sleep(RECONNECT_DELAY).await;
}
} // End of outer reconnect loop
info!("BHT: Stopped monitoring backend {}.", backend.name);
}
pub fn is_secondary_behind(&self, secondary_name: &str) -> bool {
if !self.config.enable_block_height_tracking { return false; } // If tracking is off, assume not behind
let primary_info = self.backends.iter().find(|b| b.role == "primary");
let primary_name = match primary_info {
Some(b) => b.name.clone(),
None => {
if self.enable_detailed_logs {
warn!("BHT: No primary backend configured for is_secondary_behind check.");
}
return false;
}
};
let primary_height_opt = self.block_heights.get(&primary_name).map(|h_ref| *h_ref.value());
let primary_height = match primary_height_opt {
Some(h) => h,
None => {
if self.enable_detailed_logs {
debug!("BHT: Primary '{}' height unknown for is_secondary_behind check with {}.", primary_name, secondary_name);
}
return false; // Primary height unknown, can't reliably determine if secondary is behind
}
};
let secondary_height_opt = self.block_heights.get(secondary_name).map(|h_ref| *h_ref.value());
match secondary_height_opt {
Some(secondary_height_val) => {
if primary_height > secondary_height_val {
let diff = primary_height - secondary_height_val;
let is_behind = diff > self.config.max_blocks_behind;
if self.enable_detailed_logs && is_behind {
debug!("BHT: Secondary '{}' (height {}) is behind primary '{}' (height {}). Diff: {}, Max allowed: {}",
secondary_name, secondary_height_val, primary_name, primary_height, diff, self.config.max_blocks_behind);
}
return is_behind;
}
false // Secondary is not behind or is ahead
}
None => {
if self.enable_detailed_logs {
debug!("BHT: Secondary '{}' height unknown, considering it behind primary '{}' (height {}).", secondary_name, primary_name, primary_height);
}
true // Secondary height unknown, assume it's behind if primary height is known
}
}
}
pub fn get_block_height_status(&self) -> HashMap<String, u64> {
self.block_heights
.iter()
.map(|entry| (entry.key().clone(), *entry.value()))
.collect()
}
pub async fn stop(&self) {
info!("BHT: Sending shutdown signal to all monitoring tasks...");
if self.shutdown_tx.send(true).is_err() {
error!("BHT: Failed to send shutdown signal. Tasks might not terminate gracefully.");
}
let mut tasks_guard = self.tasks.lock().unwrap();
info!("BHT: Awaiting termination of {} monitoring tasks...", tasks_guard.len());
for task in tasks_guard.drain(..) {
if let Err(e) = task.await {
error!("BHT: Error awaiting task termination: {:?}", e);
}
}
info!("BHT: All monitoring tasks terminated.");
}
}

View File

@@ -1,169 +0,0 @@
use std::env;
use std::str::FromStr;
use std::time::Duration;
use url::Url;
use thiserror::Error;
use log::{warn, info};
#[derive(Debug, Error)]
pub enum ConfigError {
#[error("Failed to parse environment variable '{var_name}': {source}")]
ParseError {
var_name: String,
source: Box<dyn std::error::Error + Send + Sync>,
},
#[error("Missing required environment variable: {var_name}")]
MissingVariable { var_name: String },
#[error("Invalid URL format for '{var_name}': {url_str} - {source}")]
UrlParseError {
var_name: String,
url_str: String,
source: url::ParseError,
},
}
#[derive(Debug, Clone)]
pub struct AppConfig {
pub listen_addr: String,
pub primary_backend_url: Url,
pub secondary_backend_urls: Vec<Url>,
pub summary_interval_secs: u64,
pub enable_detailed_logs: bool,
pub enable_secondary_probing: bool,
pub probe_interval_secs: u64,
pub min_delay_buffer_ms: u64,
pub probe_methods: Vec<String>,
pub enable_block_height_tracking: bool,
pub max_blocks_behind: u64,
pub enable_expensive_method_routing: bool,
pub max_body_size_bytes: usize,
pub http_client_timeout_secs: u64,
pub request_context_timeout_secs: u64,
}
// Helper function to get and parse environment variables
fn get_env_var<T: FromStr>(key: &str, default_value: T) -> T
where
<T as FromStr>::Err: std::fmt::Display,
{
match env::var(key) {
Ok(val_str) => match val_str.parse::<T>() {
Ok(val) => val,
Err(e) => {
warn!(
"Failed to parse environment variable '{}' with value '{}': {}. Using default: {:?}",
key, val_str, e, default_value
);
default_value
}
},
Err(_) => default_value,
}
}
// Helper function for boolean environment variables
fn get_env_var_bool(key: &str, default_value: bool) -> bool {
match env::var(key) {
Ok(val_str) => val_str.to_lowercase() == "true",
Err(_) => default_value,
}
}
// Helper function for Vec<String> from comma-separated string
fn get_env_var_vec_string(key: &str, default_value: Vec<String>) -> Vec<String> {
match env::var(key) {
Ok(val_str) => {
if val_str.is_empty() {
default_value
} else {
val_str.split(',').map(|s| s.trim().to_string()).collect()
}
}
Err(_) => default_value,
}
}
// Helper function for Vec<Url> from comma-separated string
fn get_env_var_vec_url(key: &str, default_value: Vec<Url>) -> Result<Vec<Url>, ConfigError> {
match env::var(key) {
Ok(val_str) => {
if val_str.is_empty() {
return Ok(default_value);
}
val_str
.split(',')
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.map(|url_str| {
Url::parse(url_str).map_err(|e| ConfigError::UrlParseError {
var_name: key.to_string(),
url_str: url_str.to_string(),
source: e,
})
})
.collect()
}
Err(_) => Ok(default_value),
}
}
pub fn load_from_env() -> Result<AppConfig, ConfigError> {
info!("Loading configuration from environment variables...");
let primary_backend_url_str = env::var("PRIMARY_BACKEND_URL").map_err(|_| {
ConfigError::MissingVariable {
var_name: "PRIMARY_BACKEND_URL".to_string(),
}
})?;
let primary_backend_url =
Url::parse(&primary_backend_url_str).map_err(|e| ConfigError::UrlParseError {
var_name: "PRIMARY_BACKEND_URL".to_string(),
url_str: primary_backend_url_str,
source: e,
})?;
let secondary_backend_urls = get_env_var_vec_url("SECONDARY_BACKEND_URLS", Vec::new())?;
let config = AppConfig {
listen_addr: get_env_var("LISTEN_ADDR", "127.0.0.1:8080".to_string()),
primary_backend_url,
secondary_backend_urls,
summary_interval_secs: get_env_var("SUMMARY_INTERVAL_SECS", 60),
enable_detailed_logs: get_env_var_bool("ENABLE_DETAILED_LOGS", false),
enable_secondary_probing: get_env_var_bool("ENABLE_SECONDARY_PROBING", true),
probe_interval_secs: get_env_var("PROBE_INTERVAL_SECS", 10),
min_delay_buffer_ms: get_env_var("MIN_DELAY_BUFFER_MS", 500),
probe_methods: get_env_var_vec_string(
"PROBE_METHODS",
vec!["eth_blockNumber".to_string(), "net_version".to_string()],
),
enable_block_height_tracking: get_env_var_bool("ENABLE_BLOCK_HEIGHT_TRACKING", true),
max_blocks_behind: get_env_var("MAX_BLOCKS_BEHIND", 5),
enable_expensive_method_routing: get_env_var_bool("ENABLE_EXPENSIVE_METHOD_ROUTING", false),
max_body_size_bytes: get_env_var("MAX_BODY_SIZE_BYTES", 10 * 1024 * 1024), // 10MB
http_client_timeout_secs: get_env_var("HTTP_CLIENT_TIMEOUT_SECS", 30),
request_context_timeout_secs: get_env_var("REQUEST_CONTEXT_TIMEOUT_SECS", 35),
};
info!("Configuration loaded successfully: {:?}", config);
Ok(config)
}
impl AppConfig {
pub fn http_client_timeout(&self) -> Duration {
Duration::from_secs(self.http_client_timeout_secs)
}
pub fn request_context_timeout(&self) -> Duration {
Duration::from_secs(self.request_context_timeout_secs)
}
pub fn summary_interval(&self) -> Duration {
Duration::from_secs(self.summary_interval_secs)
}
pub fn probe_interval(&self) -> Duration {
Duration::from_secs(self.probe_interval_secs)
}
}

View File

@@ -1,12 +0,0 @@
pub mod structures;
pub mod config;
pub mod stats_collector;
pub mod secondary_probe;
pub mod block_height_tracker;
pub mod rpc_utils;
pub mod request_handler;
pub mod websocket_handler;
fn main() {
println!("Hello, world!");
}

View File

@@ -1,272 +0,0 @@
use bytes::Bytes;
use hyper::{Body, Request, Response, StatusCode};
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc;
use log;
use crate::config::AppConfig;
use crate::stats_collector::StatsCollector;
use crate::secondary_probe::SecondaryProbe;
use crate::block_height_tracker::BlockHeightTracker;
use crate::structures::{Backend, BatchInfo};
use crate::rpc_utils;
#[derive(Debug)]
pub enum BackendResult {
Success {
backend_name: String,
response: reqwest::Response, // Send the whole reqwest::Response
duration: std::time::Duration,
},
Error {
backend_name: String,
error: reqwest::Error, // Send the reqwest::Error
duration: std::time::Duration,
},
}
fn calculate_secondary_delay(
batch_info: &crate::structures::BatchInfo,
probe: &Option<Arc<crate::secondary_probe::SecondaryProbe>>,
stats: &Arc<crate::stats_collector::StatsCollector>,
_config: &Arc<crate::config::AppConfig>, // _config might be used later for more complex logic
) -> std::time::Duration {
let mut max_delay = std::time::Duration::from_millis(0);
let default_delay = std::time::Duration::from_millis(25); // Default from Go
if batch_info.methods.is_empty() {
return default_delay;
}
for method_name in &batch_info.methods {
let current_method_delay = if let Some(p) = probe {
p.get_delay_for_method(method_name)
} else {
// This will use the stubbed method from StatsCollector which currently returns 25ms
stats.get_primary_p75_for_method(method_name)
};
if current_method_delay > max_delay {
max_delay = current_method_delay;
}
}
if max_delay == std::time::Duration::from_millis(0) { // if all methods were unknown or had 0 delay
if let Some(p) = probe {
// Go code uses: probe.minResponseTime + probe.minDelayBuffer
// probe.get_delay_for_method("") would approximate this if it falls back to min_response_time + buffer
return p.get_delay_for_method(""); // Assuming empty method falls back to base delay
}
return default_delay;
}
max_delay
}
pub async fn handle_http_request(
req: Request<Body>,
config: Arc<AppConfig>,
stats_collector: Arc<StatsCollector>,
http_client: Arc<reqwest::Client>,
secondary_probe: Option<Arc<SecondaryProbe>>,
block_height_tracker: Option<Arc<BlockHeightTracker>>,
all_backends: Arc<Vec<Backend>>,
) -> Result<Response<Body>, Box<dyn std::error::Error + Send + Sync>> {
let _overall_start_time = std::time::Instant::now(); // To be used later with request_context_timeout
// 1. Read and limit request body
let limited_body = hyper::body::Limited::new(req.into_body(), config.max_body_size_bytes);
let body_bytes = match hyper::body::to_bytes(limited_body).await {
Ok(bytes) => bytes,
Err(e) => {
log::error!("Failed to read request body or limit exceeded: {}", e);
let mut err_resp = Response::new(Body::from(format!("Request body error: {}", e)));
*err_resp.status_mut() = if e.is::<hyper::Error>() && e.downcast_ref::<hyper::Error>().map_or(false, |he| he.is_body_write_aborted() || format!("{}", he).contains("Too Large")) { // A bit heuristic for "Too Large"
StatusCode::PAYLOAD_TOO_LARGE
} else {
StatusCode::BAD_REQUEST
};
return Ok(err_resp);
}
};
// 2. Parse Batch Info
let batch_info = match rpc_utils::parse_batch_info(&body_bytes) {
Ok(info) => info,
Err(e) => {
log::error!("Invalid JSON-RPC request: {}", e);
let mut err_resp = Response::new(Body::from(format!("Invalid JSON-RPC: {}", e)));
*err_resp.status_mut() = StatusCode::BAD_REQUEST;
return Ok(err_resp);
}
};
let display_method = if batch_info.is_batch {
format!("batch[{}]", batch_info.request_count)
} else {
batch_info.methods.get(0).cloned().unwrap_or_else(|| "unknown".to_string())
};
log::info!("Received request: Method: {}, IsBatch: {}, NumMethods: {}", display_method, batch_info.is_batch, batch_info.methods.len());
// 3. Calculate Secondary Delay
let secondary_delay = calculate_secondary_delay(&batch_info, &secondary_probe, &stats_collector, &config);
if config.enable_detailed_logs {
log::debug!("Method: {}, Calculated secondary delay: {:?}", display_method, secondary_delay);
}
// 4. Backend Filtering & Expensive Method Routing
let mut target_backends: Vec<Backend> = (*all_backends).clone();
if batch_info.has_stateful {
log::debug!("Stateful method detected in request '{}', targeting primary only.", display_method);
target_backends.retain(|b| b.role == "primary");
} else {
// Filter by block height
if let Some(bht) = &block_height_tracker {
if config.enable_block_height_tracking { // Check if feature is enabled
target_backends.retain(|b| {
if b.role != "primary" && bht.is_secondary_behind(&b.name) {
if config.enable_detailed_logs { log::info!("Skipping secondary {}: behind in block height for request {}", b.name, display_method); }
// TODO: Add stat for skipped due to block height
false
} else { true }
});
}
}
// Filter by probe availability
if let Some(sp) = &secondary_probe {
if config.enable_secondary_probing { // Check if feature is enabled
target_backends.retain(|b| {
if b.role != "primary" && !sp.is_backend_available(&b.name) {
if config.enable_detailed_logs { log::info!("Skipping secondary {}: not available via probe for request {}", b.name, display_method); }
// TODO: Add stat for skipped due to probe unavailable
false
} else { true }
});
}
}
}
let is_req_expensive = batch_info.methods.iter().any(|m| rpc_utils::is_expensive_method(m)) ||
batch_info.methods.iter().any(|m| stats_collector.is_expensive_method_by_stats(m)); // Stubbed
if config.enable_expensive_method_routing && is_req_expensive && !batch_info.has_stateful {
log::debug!("Expensive method detected in request {}. Attempting to route to a secondary.", display_method);
// TODO: Complex expensive method routing logic.
// For now, this placeholder doesn't change target_backends.
// A real implementation would try to find the best secondary or stick to primary if none are suitable.
}
// 5. Concurrent Request Dispatch
let (response_tx, mut response_rx) = mpsc::channel::<BackendResult>(target_backends.len().max(1));
let mut dispatched_count = 0;
for backend in target_backends { // target_backends is now filtered
dispatched_count += 1;
let task_body_bytes = body_bytes.clone();
let task_http_client = http_client.clone();
let task_response_tx = response_tx.clone();
// task_backend_name, task_backend_url, task_backend_role are cloned from 'backend'
let task_backend_name = backend.name.clone();
let task_backend_url = backend.url.clone();
let task_backend_role = backend.role.clone();
let task_secondary_delay = secondary_delay;
let task_config_detailed_logs = config.enable_detailed_logs;
let task_http_timeout = config.http_client_timeout(); // Get Duration from config
tokio::spawn(async move {
let backend_req_start_time = std::time::Instant::now();
if task_backend_role != "primary" {
if task_config_detailed_logs {
log::debug!("Secondary backend {} for request {} delaying for {:?}", task_backend_name, display_method, task_secondary_delay);
}
tokio::time::sleep(task_secondary_delay).await;
}
let result = task_http_client
.post(task_backend_url)
.header("Content-Type", "application/json")
// TODO: Copy relevant headers from original request 'req.headers()'
.body(task_body_bytes)
.timeout(task_http_timeout)
.send()
.await;
let duration = backend_req_start_time.elapsed();
match result {
Ok(resp) => {
if task_config_detailed_logs {
log::debug!("Backend {} for request {} responded with status {}", task_backend_name, display_method, resp.status());
}
if task_response_tx.send(BackendResult::Success {
backend_name: task_backend_name,
response: resp,
duration,
}).await.is_err() {
log::error!("Failed to send success to channel for request {}: receiver dropped", display_method);
}
}
Err(err) => {
if task_config_detailed_logs {
log::error!("Backend {} for request {} request failed: {}", task_backend_name, display_method, err);
}
if task_response_tx.send(BackendResult::Error {
backend_name: task_backend_name,
error: err,
duration,
}).await.is_err() {
log::error!("Failed to send error to channel for request {}: receiver dropped", display_method);
}
}
}
});
}
drop(response_tx);
if dispatched_count == 0 {
log::warn!("No backends available to dispatch request for method {}", display_method);
// TODO: Add stat for no backend available
let mut err_resp = Response::new(Body::from("No available backends for this request type."));
*err_resp.status_mut() = StatusCode::SERVICE_UNAVAILABLE;
return Ok(err_resp);
}
// Placeholder: return the first received response
if let Some(first_result) = response_rx.recv().await {
if config.enable_detailed_logs {
log::info!("First backend response for request {}: {:?}", display_method, first_result);
}
match first_result {
BackendResult::Success { backend_name: _, response: reqwest_resp, duration: _ } => {
let mut hyper_resp_builder = Response::builder().status(reqwest_resp.status());
for (name, value) in reqwest_resp.headers().iter() {
hyper_resp_builder = hyper_resp_builder.header(name.clone(), value.clone());
}
let hyper_resp = hyper_resp_builder
.body(Body::wrap_stream(reqwest_resp.bytes_stream()))
.unwrap_or_else(|e| {
log::error!("Error building response from backend for request {}: {}", display_method, e);
let mut err_resp = Response::new(Body::from("Error processing backend response"));
*err_resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
err_resp
});
return Ok(hyper_resp);
}
BackendResult::Error { backend_name, error, duration: _ } => {
log::error!("First response for request {} was an error from {}: {}", display_method, backend_name, error);
let mut err_resp = Response::new(Body::from(format!("Error from backend {}: {}", backend_name, error)));
*err_resp.status_mut() = StatusCode::BAD_GATEWAY;
return Ok(err_resp);
}
}
} else {
log::error!("No responses received from any dispatched backend for method {}", display_method);
// TODO: Add stat for no response received
let mut err_resp = Response::new(Body::from("No response from any backend."));
*err_resp.status_mut() = StatusCode::GATEWAY_TIMEOUT;
return Ok(err_resp);
}
// Note: Overall request context timeout and full response aggregation logic are still TODOs.
}

View File

@@ -1,92 +0,0 @@
use crate::structures::{BatchInfo, JsonRpcRequest};
use std::collections::HashSet;
use log;
use serde_json; // Added for parsing
fn get_stateful_methods() -> HashSet<&'static str> {
[
"eth_newFilter", "eth_newBlockFilter", "eth_newPendingTransactionFilter",
"eth_getFilterChanges", "eth_getFilterLogs", "eth_uninstallFilter",
"eth_subscribe", "eth_unsubscribe", "eth_subscription", // "eth_subscription" is a notification, not a method client calls.
// But if it appears in a batch for some reason, it's state-related.
]
.iter()
.cloned()
.collect()
}
fn get_expensive_methods() -> HashSet<&'static str> {
[
// Ethereum Debug API (typically Geth-specific)
"debug_traceBlockByHash", "debug_traceBlockByNumber", "debug_traceCall", "debug_traceTransaction",
"debug_storageRangeAt", "debug_getModifiedAccountsByHash", "debug_getModifiedAccountsByNumber",
// Erigon/OpenEthereum Trace Module (more standard)
"trace_block", "trace_call", "trace_callMany", "trace_filter", "trace_get", "trace_rawTransaction",
"trace_replayBlockTransactions", "trace_replayTransaction", "trace_transaction",
// Specific combinations that might be considered extra expensive
"trace_replayBlockTransactions#vmTrace", // Example, depends on actual usage if # is method part
"trace_replayTransaction#vmTrace",
]
.iter()
.cloned()
.collect()
}
lazy_static::lazy_static! {
static ref STATEFUL_METHODS: HashSet<&'static str> = get_stateful_methods();
static ref EXPENSIVE_METHODS: HashSet<&'static str> = get_expensive_methods();
}
pub fn is_stateful_method(method: &str) -> bool {
STATEFUL_METHODS.contains(method)
}
pub fn is_expensive_method(method: &str) -> bool {
EXPENSIVE_METHODS.contains(method)
}
pub fn parse_batch_info(body_bytes: &[u8]) -> Result<BatchInfo, String> {
if body_bytes.is_empty() {
return Err("Empty request body".to_string());
}
// Try parsing as a batch (array) first
match serde_json::from_slice::<Vec<JsonRpcRequest>>(body_bytes) {
Ok(batch_reqs) => {
if batch_reqs.is_empty() {
return Err("Empty batch request".to_string());
}
let mut methods = Vec::new();
let mut has_stateful = false;
for req in &batch_reqs {
methods.push(req.method.clone());
if is_stateful_method(&req.method) {
has_stateful = true;
}
}
Ok(BatchInfo {
is_batch: true,
methods,
request_count: batch_reqs.len(),
has_stateful,
})
}
Err(_e_batch) => {
// If not a batch, try parsing as a single request
match serde_json::from_slice::<JsonRpcRequest>(body_bytes) {
Ok(single_req) => Ok(BatchInfo {
is_batch: false,
methods: vec![single_req.method.clone()],
request_count: 1,
has_stateful: is_stateful_method(&single_req.method),
}),
Err(_e_single) => {
// Log the actual errors if needed for debugging, but return a generic one
log::debug!("Failed to parse as batch: {}", _e_batch);
log::debug!("Failed to parse as single: {}", _e_single);
Err("Invalid JSON-RPC request format. Not a valid single request or batch.".to_string())
}
}
}
}
}

View File

@@ -1,383 +0,0 @@
use crate::{
config::AppConfig,
structures::{Backend, JsonRpcRequest},
};
use chrono::Utc;
use dashmap::DashMap;
use log::{debug, error, info, warn};
use reqwest::Client;
use serde_json::json;
use std::{
cmp::min,
sync::{
atomic::{AtomicU32, Ordering},
Arc, Mutex, RwLock,
},
time::{Duration, SystemTime},
};
use tokio::sync::watch;
const PROBE_REQUEST_COUNT: usize = 10;
const DEFAULT_MIN_RESPONSE_TIME_MS: u64 = 15;
const PROBE_CYCLE_DELAY_MS: u64 = 10;
pub struct SecondaryProbe {
config: Arc<AppConfig>,
backends: Vec<Backend>, // Only secondary backends
client: Client,
min_response_time: Arc<RwLock<Duration>>,
method_timings: Arc<DashMap<String, Duration>>, // method_name -> min_duration
backend_timings: Arc<DashMap<String, Duration>>, // backend_name -> min_duration
// Health state per backend
backend_available: Arc<DashMap<String, bool>>,
backend_error_count: Arc<DashMap<String, AtomicU32>>,
backend_consecutive_success_count: Arc<DashMap<String, AtomicU32>>, // For recovery
backend_last_success: Arc<DashMap<String, Mutex<SystemTime>>>,
last_probe_time: Arc<Mutex<SystemTime>>,
failure_count: Arc<AtomicU32>, // Consecutive overall probe cycle failures
last_success_time: Arc<Mutex<SystemTime>>, // Last time any probe in an overall cycle succeeded
shutdown_tx: watch::Sender<bool>,
shutdown_rx: watch::Receiver<bool>,
enable_detailed_logs: bool,
}
impl SecondaryProbe {
pub fn new(
config: Arc<AppConfig>,
all_backends: &[Backend],
client: Client,
) -> Option<Arc<Self>> {
let secondary_backends: Vec<Backend> = all_backends
.iter()
.filter(|b| b.role.to_lowercase() == "secondary")
.cloned()
.collect();
if secondary_backends.is_empty() {
info!("No secondary backends configured. SecondaryProbe will not be initialized.");
return None;
}
info!(
"Initializing SecondaryProbe for {} secondary backends.",
secondary_backends.len()
);
let backend_available = Arc::new(DashMap::new());
let backend_error_count = Arc::new(DashMap::new());
let backend_consecutive_success_count = Arc::new(DashMap::new());
let backend_last_success = Arc::new(DashMap::new());
for backend in &secondary_backends {
backend_available.insert(backend.name.clone(), true);
backend_error_count.insert(backend.name.clone(), AtomicU32::new(0));
backend_consecutive_success_count.insert(backend.name.clone(), AtomicU32::new(0));
backend_last_success.insert(backend.name.clone(), Mutex::new(SystemTime::now()));
info!(" - Backend '{}' ({}) initialized as available.", backend.name, backend.url);
}
let (shutdown_tx, shutdown_rx) = watch::channel(false);
Some(Arc::new(Self {
config: config.clone(),
backends: secondary_backends,
client,
min_response_time: Arc::new(RwLock::new(Duration::from_millis(
DEFAULT_MIN_RESPONSE_TIME_MS, // Or load from config if needed
))),
method_timings: Arc::new(DashMap::new()),
backend_timings: Arc::new(DashMap::new()),
backend_available,
backend_error_count,
backend_consecutive_success_count,
backend_last_success,
last_probe_time: Arc::new(Mutex::new(SystemTime::now())),
failure_count: Arc::new(AtomicU32::new(0)),
last_success_time: Arc::new(Mutex::new(SystemTime::now())),
shutdown_tx,
shutdown_rx, // Receiver is cloneable
enable_detailed_logs: config.enable_detailed_logs,
}))
}
pub fn start_periodic_probing(self: Arc<Self>) {
if self.backends.is_empty() {
info!("No secondary backends to probe. Periodic probing will not start.");
return;
}
info!(
"Starting periodic probing for {} secondary backends. Probe interval: {}s. Probe methods: {:?}. Max errors: {}, Recovery threshold: {}.",
self.backends.len(),
self.config.probe_interval_secs,
self.config.probe_methods,
self.config.max_error_threshold,
self.config.recovery_threshold
);
// Run initial probe
let initial_probe_self = self.clone();
tokio::spawn(async move {
if initial_probe_self.enable_detailed_logs {
debug!("Running initial probe...");
}
initial_probe_self.run_probe().await;
if initial_probe_self.enable_detailed_logs {
debug!("Initial probe finished.");
}
});
// Start periodic probing task
let mut interval = tokio::time::interval(self.config.probe_interval());
let mut shutdown_rx_clone = self.shutdown_rx.clone();
tokio::spawn(async move {
loop {
tokio::select! {
_ = interval.tick() => {
if self.enable_detailed_logs {
debug!("Running periodic probe cycle...");
}
self.run_probe().await;
if self.enable_detailed_logs {
debug!("Periodic probe cycle finished.");
}
}
res = shutdown_rx_clone.changed() => {
if res.is_err() || *shutdown_rx_clone.borrow() {
info!("SecondaryProbe: Shutdown signal received or channel closed, stopping periodic probing.");
break;
}
}
}
}
info!("SecondaryProbe: Periodic probing task has stopped.");
});
}
async fn run_probe(&self) {
let mut successful_probes_in_overall_cycle = 0;
let mut temp_method_timings: DashMap<String, Duration> = DashMap::new(); // method_name -> min_duration for this cycle
let mut temp_backend_timings: DashMap<String, Duration> = DashMap::new(); // backend_name -> min_duration for this cycle
let mut temp_overall_min_response_time = Duration::MAX;
for backend in &self.backends {
let mut backend_cycle_successful_probes = 0;
let mut backend_cycle_min_duration = Duration::MAX;
for method_name in &self.config.probe_methods {
let mut method_min_duration_for_backend_this_cycle = Duration::MAX;
for i in 0..PROBE_REQUEST_COUNT {
let probe_id = format!(
"probe-{}-{}-{}-{}",
backend.name,
method_name,
Utc::now().timestamp_nanos_opt().unwrap_or_else(|| SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap_or_default().as_nanos() as i64),
i
);
let request_body = JsonRpcRequest {
method: method_name.clone(),
params: Some(json!([])),
id: Some(json!(probe_id)),
jsonrpc: Some("2.0".to_string()),
};
let start_time = SystemTime::now();
match self.client.post(backend.url.clone()).json(&request_body).timeout(self.config.http_client_timeout()).send().await {
Ok(response) => {
let duration = start_time.elapsed().unwrap_or_default();
if response.status().is_success() {
// TODO: Optionally parse JSON RPC response for error field
backend_cycle_successful_probes += 1;
successful_probes_in_overall_cycle += 1;
method_min_duration_for_backend_this_cycle = min(method_min_duration_for_backend_this_cycle, duration);
backend_cycle_min_duration = min(backend_cycle_min_duration, duration);
temp_overall_min_response_time = min(temp_overall_min_response_time, duration);
if self.enable_detailed_logs {
debug!("Probe success: {} method {} ID {} took {:?}.", backend.name, method_name, probe_id, duration);
}
} else {
if self.enable_detailed_logs {
warn!("Probe failed (HTTP status {}): {} method {} ID {}. Body: {:?}", response.status(), backend.name, method_name, probe_id, response.text().await.unwrap_or_default());
}
}
}
Err(e) => {
if self.enable_detailed_logs {
warn!("Probe error (request failed): {} method {} ID {}: {:?}", backend.name, method_name, probe_id, e);
}
}
}
tokio::time::sleep(Duration::from_millis(PROBE_CYCLE_DELAY_MS)).await;
} // End of PROBE_REQUEST_COUNT loop
if method_min_duration_for_backend_this_cycle != Duration::MAX {
temp_method_timings
.entry(method_name.clone())
.and_modify(|current_min| *current_min = min(*current_min, method_min_duration_for_backend_this_cycle))
.or_insert(method_min_duration_for_backend_this_cycle);
}
} // End of probe_methods loop
if backend_cycle_min_duration != Duration::MAX {
temp_backend_timings.insert(backend.name.clone(), backend_cycle_min_duration);
}
self.update_backend_health(&backend.name, backend_cycle_successful_probes > 0);
if self.enable_detailed_logs {
debug!(
"Probe sub-cycle for backend {}: {} successful probes. Min duration for this backend this cycle: {:?}. Current health: available={}",
backend.name,
backend_cycle_successful_probes,
if backend_cycle_min_duration == Duration::MAX { None } else { Some(backend_cycle_min_duration) },
self.is_backend_available(&backend.name)
);
}
} // End of backends loop
// Update overall timings if any probe in the cycle was successful
if successful_probes_in_overall_cycle > 0 {
if temp_overall_min_response_time != Duration::MAX {
let mut min_resp_time_guard = self.min_response_time.write().unwrap();
*min_resp_time_guard = min(*min_resp_time_guard, temp_overall_min_response_time);
if self.enable_detailed_logs {
debug!("Global min_response_time updated to: {:?}", *min_resp_time_guard);
}
}
for entry in temp_method_timings.iter() {
self.method_timings
.entry(entry.key().clone())
.and_modify(|current_min| *current_min = min(*current_min, *entry.value()))
.or_insert(*entry.value());
if self.enable_detailed_logs {
debug!("Global method_timing for {} updated/set to: {:?}", entry.key(), *entry.value());
}
}
for entry in temp_backend_timings.iter() {
self.backend_timings
.entry(entry.key().clone())
.and_modify(|current_min| *current_min = min(*current_min, *entry.value()))
.or_insert(*entry.value());
if self.enable_detailed_logs {
debug!("Global backend_timing for {} updated/set to: {:?}", entry.key(), *entry.value());
}
}
self.failure_count.store(0, Ordering::Relaxed);
*self.last_success_time.lock().unwrap() = SystemTime::now();
if self.enable_detailed_logs {
info!("Overall probe cycle completed with {} successes. Overall failure count reset.", successful_probes_in_overall_cycle);
}
} else {
let prev_failures = self.failure_count.fetch_add(1, Ordering::Relaxed);
warn!(
"Overall probe cycle completed with NO successful probes. Overall failure count incremented to {}.",
prev_failures + 1
);
}
*self.last_probe_time.lock().unwrap() = SystemTime::now();
}
fn update_backend_health(&self, backend_name: &str, is_cycle_success: bool) {
let current_availability = self.is_backend_available(backend_name);
let error_count_entry = self.backend_error_count.entry(backend_name.to_string()).or_insert_with(|| AtomicU32::new(0));
let consecutive_success_entry = self.backend_consecutive_success_count.entry(backend_name.to_string()).or_insert_with(|| AtomicU32::new(0));
if is_cycle_success {
error_count_entry.store(0, Ordering::Relaxed);
consecutive_success_entry.fetch_add(1, Ordering::Relaxed);
if let Some(mut last_success_guard) = self.backend_last_success.get_mut(backend_name) {
*last_success_guard.lock().unwrap() = SystemTime::now();
}
if !current_availability {
let successes = consecutive_success_entry.load(Ordering::Relaxed);
if successes >= self.config.recovery_threshold {
self.backend_available.insert(backend_name.to_string(), true);
info!("Backend {} recovered and is now AVAILABLE ({} consecutive successes met threshold {}).", backend_name, successes, self.config.recovery_threshold);
consecutive_success_entry.store(0, Ordering::Relaxed); // Reset after recovery
} else {
if self.enable_detailed_logs {
debug!("Backend {} had a successful probe cycle. Consecutive successes: {}. Needs {} for recovery.", backend_name, successes, self.config.recovery_threshold);
}
}
} else {
if self.enable_detailed_logs {
debug!("Backend {} remains available, successful probe cycle.", backend_name);
}
}
} else { // Probe cycle failed for this backend
consecutive_success_entry.store(0, Ordering::Relaxed); // Reset consecutive successes on any failure
let current_errors = error_count_entry.fetch_add(1, Ordering::Relaxed) + 1; // +1 because fetch_add returns previous value
if current_availability && current_errors >= self.config.max_error_threshold {
self.backend_available.insert(backend_name.to_string(), false);
warn!(
"Backend {} has become UNAVAILABLE due to {} errors (threshold {}).",
backend_name, current_errors, self.config.max_error_threshold
);
} else {
if self.enable_detailed_logs {
if current_availability {
debug!("Backend {} is still available but error count increased to {}. Max errors before unavailable: {}", backend_name, current_errors, self.config.max_error_threshold);
} else {
debug!("Backend {} remains UNAVAILABLE, error count now {}.", backend_name, current_errors);
}
}
}
}
}
pub fn get_delay_for_method(&self, method_name: &str) -> Duration {
let base_delay = self
.method_timings
.get(method_name)
.map(|timing_ref| *timing_ref.value())
.unwrap_or_else(|| *self.min_response_time.read().unwrap()); // Read lock
let buffer = Duration::from_millis(self.config.min_delay_buffer_ms);
let calculated_delay = base_delay.saturating_add(buffer);
let overall_failures = self.failure_count.load(Ordering::Relaxed);
// Consider last_success_time to see if failures are recent and persistent
let time_since_last_overall_success = SystemTime::now()
.duration_since(*self.last_success_time.lock().unwrap()) // Lock for last_success_time
.unwrap_or_default();
// Fallback logic: if many consecutive failures AND last success was long ago
if overall_failures >= 3 && time_since_last_overall_success > self.config.probe_interval().saturating_mul(3) {
warn!(
"Probes failing ({} consecutive, last overall success {:?} ago). Using conservative fixed delay for method {}.",
overall_failures, time_since_last_overall_success, method_name
);
return Duration::from_millis(self.config.min_delay_buffer_ms.saturating_mul(3));
}
if self.enable_detailed_logs {
debug!("Delay for method '{}': base {:?}, buffer {:?}, final {:?}", method_name, base_delay, buffer, calculated_delay);
}
calculated_delay
}
pub fn is_backend_available(&self, backend_name: &str) -> bool {
self.backend_available
.get(backend_name)
.map_or(false, |entry| *entry.value())
}
pub fn stop(&self) {
info!("SecondaryProbe: Sending shutdown signal...");
if self.shutdown_tx.send(true).is_err() {
error!("Failed to send shutdown signal to SecondaryProbe task. It might have already stopped or had no receiver.");
}
}
}

View File

@@ -1,290 +0,0 @@
use crate::structures::{ResponseStats, WebSocketStats, CuDataPoint, Backend};
use crate::block_height_tracker::BlockHeightTracker;
use crate::secondary_probe::SecondaryProbe;
use std::time::{Duration, SystemTime};
use std::sync::{Arc, Mutex, atomic::{AtomicU64, Ordering}};
use dashmap::DashMap;
use log::{debug, error, info, warn};
pub struct StatsCollector {
pub request_stats: Arc<Mutex<Vec<ResponseStats>>>,
pub method_stats: Arc<DashMap<String, Mutex<Vec<Duration>>>>, // method_name -> list of durations for primary
pub backend_method_stats: Arc<DashMap<String, DashMap<String, Mutex<Vec<Duration>>>>>, // backend_name -> method_name -> list of durations
pub backend_wins: Arc<DashMap<String, AtomicU64>>, // backend_name -> count
pub method_backend_wins: Arc<DashMap<String, DashMap<String, AtomicU64>>>, // method_name -> backend_name -> count
pub first_response_durations: Arc<Mutex<Vec<Duration>>>,
pub actual_first_response_durations: Arc<Mutex<Vec<Duration>>>,
pub method_first_response_durations: Arc<DashMap<String, Mutex<Vec<Duration>>>>,
pub method_actual_first_response_durations: Arc<DashMap<String, Mutex<Vec<Duration>>>>,
pub total_requests: Arc<AtomicU64>>,
pub error_count: Arc<AtomicU64>>,
pub skipped_secondary_requests: Arc<AtomicU64>>,
pub ws_stats: Arc<Mutex<Vec<WebSocketStats>>>,
pub total_ws_connections: Arc<AtomicU64>>,
pub app_start_time: SystemTime,
pub interval_start_time: Arc<Mutex<SystemTime>>,
pub summary_interval: Duration,
pub method_cu_prices: Arc<DashMap<String, u64>>,
pub total_cu: Arc<AtomicU64>>,
pub method_cu: Arc<DashMap<String, AtomicU64>>, // method_name -> total CU for this method in interval
pub historical_cu: Arc<Mutex<Vec<CuDataPoint>>>,
pub has_secondary_backends: bool,
// Placeholders for probe and tracker - actual types will be defined later
// pub secondary_probe: Option<Arc<SecondaryProbe>>,
// pub block_height_tracker: Option<Arc<BlockHeightTracker>>,
}
impl StatsCollector {
pub fn new(summary_interval: Duration, has_secondary_backends: bool) -> Self {
let method_cu_prices = Arc::new(DashMap::new());
Self::init_cu_prices(&method_cu_prices);
StatsCollector {
request_stats: Arc::new(Mutex::new(Vec::new())),
method_stats: Arc::new(DashMap::new()),
backend_method_stats: Arc::new(DashMap::new()),
backend_wins: Arc::new(DashMap::new()),
method_backend_wins: Arc::new(DashMap::new()),
first_response_durations: Arc::new(Mutex::new(Vec::new())),
actual_first_response_durations: Arc::new(Mutex::new(Vec::new())),
method_first_response_durations: Arc::new(DashMap::new()),
method_actual_first_response_durations: Arc::new(DashMap::new()),
total_requests: Arc::new(AtomicU64::new(0)),
error_count: Arc::new(AtomicU64::new(0)),
skipped_secondary_requests: Arc::new(AtomicU64::new(0)),
ws_stats: Arc::new(Mutex::new(Vec::new())),
total_ws_connections: Arc::new(AtomicU64::new(0)),
app_start_time: SystemTime::now(),
interval_start_time: Arc::new(Mutex::new(SystemTime::now())),
summary_interval,
method_cu_prices,
total_cu: Arc::new(AtomicU64::new(0)),
method_cu: Arc::new(DashMap::new()),
historical_cu: Arc::new(Mutex::new(Vec::new())),
has_secondary_backends,
}
}
fn init_cu_prices(prices_map: &DashMap<String, u64>) {
// Base CU
prices_map.insert("eth_call".to_string(), 100);
prices_map.insert("eth_estimateGas".to_string(), 150);
prices_map.insert("eth_getLogs".to_string(), 200);
prices_map.insert("eth_sendRawTransaction".to_string(), 250);
prices_map.insert("trace_call".to_string(), 300);
prices_map.insert("trace_replayBlockTransactions".to_string(), 500);
// Default for unknown methods
prices_map.insert("default".to_string(), 50);
}
pub fn add_stats(&self, stats_vec: Vec<ResponseStats>) {
if stats_vec.is_empty() {
warn!("add_stats called with empty stats_vec");
return;
}
self.total_requests.fetch_add(1, Ordering::Relaxed);
let mut primary_stats: Option<&ResponseStats> = None;
let mut winning_backend_name: Option<String> = None;
let mut actual_first_response_duration: Option<Duration> = None;
let mut first_response_duration_from_primary_or_fastest_secondary: Option<Duration> = None;
// Find the 'actual-first-response' if present and the primary response
for stat in &stats_vec {
if stat.backend_name == "actual-first-response" {
actual_first_response_duration = Some(stat.duration);
} else if stat.backend_name.contains("-primary") { // Assuming primary name contains "-primary"
primary_stats = Some(stat);
}
}
let method_name = primary_stats.map_or_else(
|| stats_vec.first().map_or_else(|| "unknown".to_string(), |s| s.method.clone()),
|ps| ps.method.clone()
);
// Determine winning backend and first_response_duration_from_primary_or_fastest_secondary
if self.has_secondary_backends {
let mut fastest_duration = Duration::MAX;
for stat in stats_vec.iter().filter(|s| s.backend_name != "actual-first-response" && s.error.is_none()) {
if stat.duration < fastest_duration {
fastest_duration = stat.duration;
winning_backend_name = Some(stat.backend_name.clone());
}
}
if fastest_duration != Duration::MAX {
first_response_duration_from_primary_or_fastest_secondary = Some(fastest_duration);
}
} else {
// If no secondary backends, primary is the winner if no error
if let Some(ps) = primary_stats {
if ps.error.is_none() {
winning_backend_name = Some(ps.backend_name.clone());
first_response_duration_from_primary_or_fastest_secondary = Some(ps.duration);
}
}
}
// If no winner determined yet (e.g. all errored, or no secondary and primary errored),
// and if primary_stats exists, consider it as the "winner" for error tracking purposes.
if winning_backend_name.is_none() && primary_stats.is_some() {
winning_backend_name = Some(primary_stats.unwrap().backend_name.clone());
}
// Update backend_wins and method_backend_wins
if let Some(ref winner_name) = winning_backend_name {
self.backend_wins.entry(winner_name.clone()).or_insert_with(|| AtomicU64::new(0)).fetch_add(1, Ordering::Relaxed);
self.method_backend_wins.entry(method_name.clone()).or_default().entry(winner_name.clone()).or_insert_with(|| AtomicU64::new(0)).fetch_add(1, Ordering::Relaxed);
}
// Update first_response_durations and actual_first_response_durations
if let Some(duration) = first_response_duration_from_primary_or_fastest_secondary {
self.first_response_durations.lock().unwrap().push(duration);
self.method_first_response_durations.entry(method_name.clone()).or_insert_with(|| Mutex::new(Vec::new())).lock().unwrap().push(duration);
}
if let Some(duration) = actual_first_response_duration {
self.actual_first_response_durations.lock().unwrap().push(duration);
self.method_actual_first_response_durations.entry(method_name.clone()).or_insert_with(|| Mutex::new(Vec::new())).lock().unwrap().push(duration);
}
let mut request_stats_guard = self.request_stats.lock().unwrap();
for stat in stats_vec {
if stat.backend_name == "actual-first-response" { // Already handled
continue;
}
request_stats_guard.push(stat.clone());
if stat.error.is_some() {
if stat.error.as_deref() == Some("skipped by primary due to min_delay_buffer") {
self.skipped_secondary_requests.fetch_add(1, Ordering::Relaxed);
} else {
self.error_count.fetch_add(1, Ordering::Relaxed);
}
}
// Update backend_method_stats for all backends
self.backend_method_stats
.entry(stat.backend_name.clone())
.or_default()
.entry(stat.method.clone())
.or_insert_with(|| Mutex::new(Vec::new()))
.lock()
.unwrap()
.push(stat.duration);
// If the winning backend is primary and it's not a batch (batch handled separately), update method_stats and CUs
// Assuming primary_stats contains the correct method name for CU calculation
if let Some(ref winner_name_val) = winning_backend_name {
if &stat.backend_name == winner_name_val && stat.backend_name.contains("-primary") && stat.error.is_none() {
// Update method_stats (for primary)
self.method_stats
.entry(stat.method.clone())
.or_insert_with(|| Mutex::new(Vec::new()))
.lock()
.unwrap()
.push(stat.duration);
// Update CU
let cu_price = self.method_cu_prices.get(&stat.method).map_or_else(
|| self.method_cu_prices.get("default").map_or(0, |p| *p.value()),
|p| *p.value()
);
if cu_price > 0 {
self.total_cu.fetch_add(cu_price, Ordering::Relaxed);
self.method_cu.entry(stat.method.clone()).or_insert_with(|| AtomicU64::new(0)).fetch_add(cu_price, Ordering::Relaxed);
}
}
}
}
}
pub fn add_batch_stats(&self, methods: &[String], duration: Duration, backend_name: &str) {
if !backend_name.contains("-primary") { // Only primary processes batches directly for now
warn!("add_batch_stats called for non-primary backend: {}", backend_name);
return;
}
let mut batch_cu: u64 = 0;
for method_name in methods {
let cu_price = self.method_cu_prices.get(method_name).map_or_else(
|| self.method_cu_prices.get("default").map_or(0, |p| *p.value()),
|p| *p.value()
);
batch_cu += cu_price;
if cu_price > 0 {
self.method_cu.entry(method_name.clone()).or_insert_with(|| AtomicU64::new(0)).fetch_add(cu_price, Ordering::Relaxed);
}
// Update method_stats for each method in the batch on the primary
self.method_stats
.entry(method_name.clone())
.or_insert_with(|| Mutex::new(Vec::new()))
.lock()
.unwrap()
.push(duration); // Using the same duration for all methods in the batch as an approximation
// Update backend_method_stats
self.backend_method_stats
.entry(backend_name.to_string())
.or_default()
.entry(method_name.clone())
.or_insert_with(|| Mutex::new(Vec::new()))
.lock()
.unwrap()
.push(duration);
}
if batch_cu > 0 {
self.total_cu.fetch_add(batch_cu, Ordering::Relaxed);
}
// Note: total_requests is incremented by add_stats which should be called for the overall batch request
}
pub fn add_websocket_stats(&self, ws_stat: WebSocketStats) {
if ws_stat.error.is_some() {
self.error_count.fetch_add(1, Ordering::Relaxed);
}
self.ws_stats.lock().unwrap().push(ws_stat);
self.total_ws_connections.fetch_add(1, Ordering::Relaxed);
}
// STUBBED METHODS - to be implemented later
pub fn get_primary_p75_for_method(&self, _method: &str) -> std::time::Duration {
// Placeholder: return a default fixed duration
log::debug!("StatsCollector::get_primary_p75_for_method called (stub)");
std::time::Duration::from_millis(25) // Default from Go's calculateBatchDelay fallback
}
pub fn get_primary_p50_for_method(&self, _method: &str) -> std::time::Duration {
// Placeholder: return a default fixed duration
log::debug!("StatsCollector::get_primary_p50_for_method called (stub)");
std::time::Duration::from_millis(15)
}
pub fn is_expensive_method_by_stats(&self, _method: &str) -> bool {
// Placeholder: always return false
log::debug!("StatsCollector::is_expensive_method_by_stats called (stub)");
false
}
pub fn select_best_secondary_for_expensive_method(
&self,
_method: &str,
_backends: &[Backend],
_block_height_tracker: &Option<Arc<BlockHeightTracker>>,
_secondary_probe: &Option<Arc<SecondaryProbe>>,
) -> Option<Backend> {
// Placeholder: always return None
log::debug!("StatsCollector::select_best_secondary_for_expensive_method called (stub)");
None
}
}

View File

@@ -1,107 +0,0 @@
use serde::{Serialize, Deserialize};
use url::Url;
use http::StatusCode;
use std::time::{Duration, SystemTime};
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct JsonRpcRequest {
pub method: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub id: Option<serde_json::Value>,
#[serde(skip_serializing_if = "Option::is_none")]
pub jsonrpc: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub params: Option<serde_json::Value>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct BatchInfo {
pub is_batch: bool,
pub methods: Vec<String>,
pub request_count: usize,
pub has_stateful: bool,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct Backend {
#[serde(with = "url_serde")]
pub url: Url,
pub name: String,
pub role: String, // Consider an enum BackendRole { Primary, Secondary } later
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct ResponseStats {
pub backend_name: String,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(with = "http_serde_status_code_option", default)]
pub status_code: Option<StatusCode>,
#[serde(with = "humantime_serde")]
pub duration: Duration,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
pub method: String,
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct WebSocketStats {
pub backend_name: String,
pub error: Option<String>, // Default Option<String> serde is fine
pub connect_time: std::time::Duration, // Default Duration serde (secs/nanos struct)
pub is_active: bool,
pub client_to_backend_messages: u64,
pub backend_to_client_messages: u64,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct CuDataPoint {
pub timestamp: SystemTime,
pub cu: u64,
}
// Helper module for serializing/deserializing Option<http::StatusCode>
mod http_serde_status_code_option {
use http::StatusCode;
use serde::{self, Deserializer, Serializer, AsOwned};
pub fn serialize<S>(status_code: &Option<StatusCode>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match status_code {
Some(sc) => serializer.serialize_some(&sc.as_u16()),
None => serializer.serialize_none(),
}
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<StatusCode>, D::Error>
where
D: Deserializer<'de>,
{
Option::<u16>::deserialize(deserializer)?
.map(|code| StatusCode::from_u16(code).map_err(serde::de::Error::custom))
.transpose()
}
}
// Helper module for serializing/deserializing url::Url
mod url_serde {
use url::Url;
use serde::{self, Deserializer, Serializer};
pub fn serialize<S>(url: &Url, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(url.as_str())
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Url, D::Error>
where
D: Deserializer<'de>,
{
String::deserialize(deserializer)?
.parse()
.map_err(serde::de::Error::custom)
}
}

View File

@@ -1,228 +0,0 @@
use std::sync::Arc;
use std::time::{Duration, Instant};
use hyper::{Body, Request, Response, StatusCode};
use hyper_tungstenite::HyperWebsocket;
use log;
use tokio_tungstenite::tungstenite::protocol::Message;
use futures_util::{stream::StreamExt, sink::SinkExt};
use crate::config::AppConfig;
use crate::stats_collector::StatsCollector;
use crate::structures::{Backend, WebSocketStats}; // Ensure WebSocketStats has new fields
pub async fn handle_websocket_request(
mut req: Request<Body>,
app_config: Arc<AppConfig>,
stats_collector: Arc<StatsCollector>,
all_backends: Arc<Vec<Backend>>,
) -> Result<Response<Body>, Box<dyn std::error::Error + Send + Sync + 'static>> {
let upgrade_start_time = Instant::now();
// Check for upgrade request
if !hyper_tungstenite::is_upgrade_request(&req) {
log::warn!("Not a WebSocket upgrade request");
let mut resp = Response::new(Body::from("Not a WebSocket upgrade request"));
*resp.status_mut() = StatusCode::BAD_REQUEST;
return Ok(resp);
}
// Attempt to upgrade the connection
let (response, websocket) = match hyper_tungstenite::upgrade(&mut req, None) {
Ok((resp, ws)) => (resp, ws),
Err(e) => {
log::error!("WebSocket upgrade failed: {}", e);
let mut resp = Response::new(Body::from(format!("WebSocket upgrade failed: {}", e)));
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; // Or BAD_REQUEST
return Ok(resp);
}
};
// Spawn a task to handle the WebSocket connection after sending 101
tokio::spawn(async move {
match websocket.await {
Ok(ws_stream) => {
let client_ws_stream = ws_stream;
if app_config.enable_detailed_logs {
log::info!("Client WebSocket connection established.");
}
// Successfully upgraded client connection, now connect to primary backend
proxy_websocket_to_primary(client_ws_stream, app_config, stats_collector, all_backends).await;
}
Err(e) => {
log::error!("Error awaiting client WebSocket upgrade: {}", e);
// No actual client WS connection to record stats against other than the failed upgrade attempt
let stats = WebSocketStats {
backend_name: "client_upgrade_failed".to_string(),
error: Some(format!("Client WS upgrade await error: {}", e)),
connect_time: upgrade_start_time.elapsed(),
is_active: false,
client_to_backend_messages: 0,
backend_to_client_messages: 0,
};
stats_collector.add_websocket_stats(stats);
}
}
});
// Return the 101 Switching Protocols response to the client
Ok(response)
}
async fn proxy_websocket_to_primary(
mut client_ws_stream: HyperWebsocket, // Made mutable for close()
app_config: Arc<AppConfig>,
stats_collector: Arc<StatsCollector>,
all_backends: Arc<Vec<Backend>>,
) {
let connect_to_primary_start_time = Instant::now();
let mut client_to_backend_msg_count: u64 = 0;
let mut backend_to_client_msg_count: u64 = 0;
let mut ws_stats_error: Option<String> = None;
let mut backend_name_for_stats = "primary_unknown".to_string();
// 1. Find Primary Backend
let primary_backend = match all_backends.iter().find(|b| b.role == "primary") {
Some(pb) => {
backend_name_for_stats = pb.name.clone();
pb
}
None => {
log::error!("No primary backend configured for WebSocket proxy.");
ws_stats_error = Some("No primary backend configured".to_string());
// Close client connection gracefully if possible
let _ = client_ws_stream.close(None).await; // HyperWebsocket uses close method
// Record stats and return
let stats = WebSocketStats {
backend_name: backend_name_for_stats,
error: ws_stats_error,
connect_time: connect_to_primary_start_time.elapsed(),
is_active: false,
client_to_backend_messages,
backend_to_client_messages,
};
stats_collector.add_websocket_stats(stats);
return;
}
};
backend_name_for_stats = primary_backend.name.clone(); // Ensure it's set if primary_backend was found
// 2. Connect to Primary Backend's WebSocket
let mut ws_url = primary_backend.url.clone();
let scheme = if ws_url.scheme() == "https" { "wss" } else { "ws" };
if ws_url.set_scheme(scheme).is_err() {
log::error!("Failed to set WebSocket scheme for backend URL: {}", primary_backend.url);
ws_stats_error = Some(format!("Invalid backend URL scheme for {}", primary_backend.url));
let _ = client_ws_stream.close(None).await;
let stats = WebSocketStats {
backend_name: backend_name_for_stats,
error: ws_stats_error,
connect_time: connect_to_primary_start_time.elapsed(),
is_active: false,
client_to_backend_messages,
backend_to_client_messages,
};
stats_collector.add_websocket_stats(stats);
return;
}
let backend_connect_attempt_time = Instant::now();
let backend_ws_result = tokio_tungstenite::connect_async(ws_url.clone()).await;
let connect_duration = backend_connect_attempt_time.elapsed(); // This is backend connection time
let backend_ws_stream_conn = match backend_ws_result {
Ok((stream, _response)) => {
if app_config.enable_detailed_logs {
log::info!("Successfully connected to primary backend WebSocket: {}", primary_backend.name);
}
stream
}
Err(e) => {
log::error!("Failed to connect to primary backend {} WebSocket: {}", primary_backend.name, e);
ws_stats_error = Some(format!("Primary backend connect error: {}", e));
let _ = client_ws_stream.close(None).await; // Close client connection
let stats = WebSocketStats {
backend_name: backend_name_for_stats,
error: ws_stats_error,
connect_time: connect_duration,
is_active: false,
client_to_backend_messages,
backend_to_client_messages,
};
stats_collector.add_websocket_stats(stats);
return;
}
};
// 3. Proxying Logic
let (mut client_ws_tx, mut client_ws_rx) = client_ws_stream.split();
let (mut backend_ws_tx, mut backend_ws_rx) = backend_ws_stream_conn.split();
let client_to_backend_task = async {
while let Some(msg_result) = client_ws_rx.next().await {
match msg_result {
Ok(msg) => {
if app_config.enable_detailed_logs { log::trace!("C->B: {:?}", msg); }
if backend_ws_tx.send(msg).await.is_err() {
if app_config.enable_detailed_logs { log::debug!("Error sending to backend, C->B loop breaking."); }
break;
}
client_to_backend_msg_count += 1;
}
Err(e) => {
log::warn!("Error reading from client WebSocket: {}", e);
// Use a closure to capture `e` by reference for the format macro.
ws_stats_error.get_or_insert_with(|| { let e_ref = &e; format!("Client read error: {}", e_ref) });
break;
}
}
}
// Try to close the backend sink gracefully if client read loop ends
if app_config.enable_detailed_logs { log::debug!("C->B proxy loop finished. Closing backend_ws_tx.");}
let _ = backend_ws_tx.close().await;
};
let backend_to_client_task = async {
while let Some(msg_result) = backend_ws_rx.next().await {
match msg_result {
Ok(msg) => {
if app_config.enable_detailed_logs { log::trace!("B->C: {:?}", msg); }
if client_ws_tx.send(msg).await.is_err() {
if app_config.enable_detailed_logs { log::debug!("Error sending to client, B->C loop breaking."); }
break;
}
backend_to_client_msg_count += 1;
}
Err(e) => {
log::warn!("Error reading from backend WebSocket: {}", e);
// Use a closure to capture `e` by reference for the format macro.
ws_stats_error.get_or_insert_with(|| { let e_ref = &e; format!("Backend read error: {}", e_ref) });
break;
}
}
}
// Try to close the client sink gracefully if backend read loop ends
if app_config.enable_detailed_logs { log::debug!("B->C proxy loop finished. Closing client_ws_tx.");}
let _ = client_ws_tx.close().await;
};
// Run both proxy tasks concurrently
tokio::join!(client_to_backend_task, backend_to_client_task);
if app_config.enable_detailed_logs {
log::info!("WebSocket proxying ended for {}. Client->Backend: {}, Backend->Client: {}. Error: {:?}",
backend_name_for_stats, client_to_backend_msg_count, backend_to_client_msg_count, ws_stats_error);
}
let final_session_duration = connect_to_primary_start_time.elapsed();
let final_stats = WebSocketStats {
backend_name: backend_name_for_stats,
error: ws_stats_error,
connect_time: final_session_duration,
is_active: false, // Session is now over
client_to_backend_messages,
backend_to_client_messages,
};
stats_collector.add_websocket_stats(final_stats);
}

View File

@@ -1 +0,0 @@
berachain/reth/berachain-bartio-reth-archive-trace.yml

View File

@@ -1 +0,0 @@
berachain/reth/berachain-bepolia-reth-archive-trace.yml

Some files were not shown because too many files have changed in this diff Show More