3387 Commits

Author SHA1 Message Date
google-labs-jules[bot]
d03c00f9a9 Jules was unable to complete the task in time. Please review the work done so far and provide feedback for Jules to continue. 2025-05-29 13:57:43 +00:00
Para Dox
f9ea6d118c The system now intelligently manages backend health, ensuring that only responsive backends handle traffic while automatically recovering failed backends when they become available again. This provides much more robust handling of secondary backend failures. 2025-05-29 20:06:27 +07:00
Para Dox
318c4d26f5 This creates a truly intelligent proxy that learns from actual usage patterns and automatically optimizes routing based on real performance data rather than hardcoded assumptions 2025-05-29 19:26:39 +07:00
Para Dox
ef273ee331 This ensures that users always get responses from backends that are synchronized with the latest blockchain state, preventing issues with outdated or inconsistent data from lagging secondary backends. 2025-05-29 19:04:46 +07:00
Para Dox
e4f6ca8ee7 may the force be with me 2025-05-29 18:58:29 +07:00
Para Dox
7cf79509e3 may the force be with me 2025-05-29 18:52:08 +07:00
Para Dox
6d46471536 lets try sonnet 2025-05-29 11:33:43 +07:00
Para Dox
779a2e76f9 a set of random fixes into the blue 2025-05-29 10:53:36 +07:00
Para Dox
c5710c33b1 a set of random fixes into the blue 2025-05-29 10:35:07 +07:00
Para Dox
97cf8fd6fa a set of random fixes into the blue 2025-05-29 10:18:30 +07:00
Para Dox
57cfa421b4 a set of random fixes into the blue 2025-05-29 02:24:50 +07:00
Para Dox
b70e0f4afb a set of random fixes into the blue 2025-05-29 01:55:12 +07:00
Para Dox
dea6cf315e more features 2025-05-29 01:26:24 +07:00
Para Dox
c08a43eb02 more features 2025-05-29 01:22:10 +07:00
Para Dox
3b43c15074 more features 2025-05-29 01:08:24 +07:00
Para Dox
596dba9ad9 more features 2025-05-29 01:03:59 +07:00
Para Dox
73f70555d2 more features 2025-05-29 00:05:17 +07:00
Para Dox
0c06f20666 more features 2025-05-28 23:48:51 +07:00
Para Dox
431b159146 more features 2025-05-28 23:44:36 +07:00
Para Dox
d78f41e521 more features 2025-05-28 23:35:14 +07:00
Para Dox
23bf18eb83 more features 2025-05-28 23:27:18 +07:00
Para Dox
b22dfd6d10 more features 2025-05-28 23:21:57 +07:00
Para Dox
46c320ef52 more features 2025-05-28 23:10:40 +07:00
Para Dox
2daa7560de more features 2025-05-28 23:00:57 +07:00
Para Dox
90e8af4644 more features 2025-05-28 22:40:55 +07:00
Para Dox
f6161e575d more features 2025-05-28 22:30:28 +07:00
Para Dox
1c14c8a861 more features 2025-05-28 22:15:19 +07:00
Para Dox
5a557f574f more features 2025-05-28 22:03:07 +07:00
Para Dox
36cc615903 more features 2025-05-28 21:55:32 +07:00
Para Dox
6055d17a4d more features 2025-05-28 21:45:49 +07:00
Para Dox
f18cd800dc more features 2025-05-28 21:39:01 +07:00
Para Dox
e136521e26 bench bsc 2025-05-28 16:30:58 +07:00
Para Dox
04fdcaccfe updates 2025-05-28 16:23:23 +07:00
Para Dox
0ab7a45d64 done 2025-05-26 14:58:54 +07:00
Para Dox
07871d8efc done 2025-05-26 02:28:27 +07:00
Para Dox
f2b91ac1c0 done 2025-05-26 01:58:33 +07:00
Para Dox
e2218f499c done 2025-05-26 01:01:56 +07:00
Para Dox
a048e4c292 eriogn is a kind 2025-05-26 00:25:50 +07:00
Para Dox
8a74e4e149 reth of a kind 2025-05-26 00:21:46 +07:00
Para Dox
19b079da16 reth is a kind 2025-05-26 00:17:46 +07:00
Para Dox
355f538d8b done 2025-05-25 22:42:49 +07:00
Para Dox
1856f17848 done 2025-05-25 22:38:52 +07:00
Para Dox
5688b0b8c4 done 2025-05-25 22:29:01 +07:00
Para Dox
1ceeae7bea done 2025-05-25 21:30:12 +07:00
Para Dox
ce41dc7fb3 done 2025-05-25 21:27:51 +07:00
Para Dox
cc8c1a3c81 done 2025-05-25 21:26:06 +07:00
Para Dox
a70284eec1 done 2025-05-25 21:19:23 +07:00
Para Dox
edb6e7518b done 2025-05-25 20:50:31 +07:00
Para Dox
b71851d43f done 2025-05-25 20:36:38 +07:00
Para Dox
39323d19eb done 2025-05-25 20:21:23 +07:00
Para Dox
1feac516b4 done 2025-05-25 15:49:51 +07:00
Para Dox
3d10d7b32b done 2025-05-25 15:45:35 +07:00
Para Dox
2706436d16 done 2025-05-25 15:42:36 +07:00
Para Dox
03735aeed6 done 2025-05-25 15:40:30 +07:00
Para Dox
5aeb68cfb6 done 2025-05-25 15:34:58 +07:00
Para Dox
78b0f0669d done 2025-05-25 15:32:29 +07:00
Para Dox
0cfacad7e6 done 2025-05-25 15:26:41 +07:00
Para Dox
a7be4c90d6 done 2025-05-25 15:20:57 +07:00
Para Dox
15a2cd6ac0 done 2025-05-25 15:19:19 +07:00
Para Dox
1575dadaca done 2025-05-25 15:14:53 +07:00
Para Dox
af80ad8d74 done 2025-05-25 14:53:40 +07:00
Para Dox
e29ea29547 now we build reth 2025-05-25 14:14:32 +07:00
Para Dox
25b2310699 claude code did this 2025-05-24 22:32:46 +07:00
Para Dox
d246befaea hail the anchors 2025-05-24 19:07:19 +07:00
Para Dox
b82fb47f00 beta 2025-05-24 18:33:45 +07:00
Para Dox
6d0a5d9d6d node is refactored into it's own template 2025-05-24 18:04:16 +07:00
Para Dox
b0fc62b701 configurable 2025-05-24 02:24:42 +07:00
Para Dox
87e9ba6b9e fuse is back 2025-05-23 19:34:52 +07:00
Para Dox
5cc07f93a7 remove the health check 2025-05-23 18:41:33 +07:00
Para Dox
edf8d469fb naive health check 2025-05-23 17:12:52 +07:00
Para Dox
9e634508c7 Merge branch 'vibe' of github.com:StakeSquid/ethereum-rpc-docker into vibe 2025-05-23 12:48:36 +07:00
Para Dox
1495a49144 fix 2025-05-23 12:47:26 +07:00
Brain🧠
dd662dfdd0 home 2025-05-22 20:33:57 +03:00
Para Dox
c8ae2e4f91 fix 2025-05-22 22:02:50 +07:00
Para Dox
61d95baef9 links 2025-05-22 21:49:45 +07:00
Para Dox
958432e343 fuse not none 2025-05-22 21:48:30 +07:00
Para Dox
0f5d7e9651 lisk sepolia can not build 2025-05-22 19:33:37 +07:00
Para Dox
2a987f64a2 update 2025-05-22 19:25:14 +07:00
Para Dox
2d8c58f13e fix 2025-05-22 17:04:25 +07:00
Para Dox
5978ad47da rocksdb 2025-05-22 16:20:21 +07:00
Para Dox
af15054bf5 new fuse 2025-05-22 16:04:55 +07:00
Para Dox
58a3233ffd update 2025-05-22 15:02:47 +07:00
Para Dox
ff729a2f44 indexer for bitcoin 2025-05-22 14:53:28 +07:00
Para Dox
a600aabfab indexer for bitcoin 2025-05-22 14:48:16 +07:00
Para Dox
16e30d2b48 indexer for bitcoin 2025-05-22 14:31:35 +07:00
Para Dox
84bf18d6e4 indexer for bitcoin 2025-05-22 14:23:58 +07:00
Para Dox
e57fdad3fe indexer for bitcoin 2025-05-22 14:21:39 +07:00
Para Dox
860921b712 indexer for bitcoin 2025-05-22 13:59:40 +07:00
Para Dox
6c7ceaab90 now with ripple 2025-05-22 01:57:40 +07:00
Para Dox
5135a30790 links 2025-05-22 00:28:59 +07:00
Para Dox
5492883a2c new chain 2025-05-22 00:26:56 +07:00
Para Dox
4f7fe9a256 brrr 2025-05-22 00:15:39 +07:00
Para Dox
bbd93c0ffb brrr 2025-05-22 00:14:07 +07:00
Para Dox
295135dc8d brrr 2025-05-22 00:02:02 +07:00
Para Dox
6173215cdc brrr 2025-05-21 23:58:19 +07:00
Para Dox
0edd43dc56 deletre the wrong files 2025-05-21 23:29:26 +07:00
Para Dox
aa79b3128d brrr 2025-05-21 23:28:01 +07:00
Para Dox
a8354bebbe god entered the room 2025-05-21 23:12:36 +07:00
Para Dox
a1eedd6d01 bob pebblehash 2025-05-21 13:58:48 +07:00
Para Dox
72876114c3 unify 2025-05-21 13:56:22 +07:00
Para Dox
7ee7456752 update 2025-05-21 12:21:33 +07:00
Para Dox
27576f3cb4 update 2025-05-21 12:10:49 +07:00
Para Dox
b9fb679727 update 2025-05-21 12:03:43 +07:00
Para Dox
e97864a989 update 2025-05-21 11:58:00 +07:00
Para Dox
f51ad3db00 update 2025-05-21 11:51:00 +07:00
Para Dox
f073412959 one-blocks 2025-05-20 23:05:01 +07:00
Para Dox
bc12f3d42c no http 2025-05-20 22:28:39 +07:00
Para Dox
1dc50366cb port now gud 2025-05-20 22:26:02 +07:00
Para Dox
86ae0c4545 fix firehose 2025-05-20 22:20:20 +07:00
Para Dox
29a55e7e6f fix firehose default path 2025-05-20 22:16:22 +07:00
Para Dox
7661d8c7db fix firehose default path 2025-05-20 22:15:26 +07:00
Para Dox
a6bd111ce6 teh flag 2025-05-20 22:08:01 +07:00
Para Dox
16090503d4 few configs 2025-05-20 22:02:49 +07:00
Para Dox
2a13b59aae version fix 2025-05-20 21:54:12 +07:00
Para Dox
85a5862f90 renaming 2025-05-20 21:20:28 +07:00
Para Dox
8b6133287e fix 2025-05-19 21:44:36 +07:00
Para Dox
c648d1304a force it 2025-05-19 21:41:09 +07:00
Para Dox
62fde28752 force it 2025-05-19 21:37:19 +07:00
Para Dox
c5671b50c3 force it 2025-05-19 21:33:29 +07:00
Para Dox
5a04784b1c fix 2025-05-19 21:08:40 +07:00
Para Dox
39e3b4a9d6 fix upstreams 2025-05-19 20:00:01 +07:00
Para Dox
fb964d86a1 too much information 2025-05-19 19:38:00 +07:00
Para Dox
2eda458a23 fix 2025-05-19 19:13:42 +07:00
Para Dox
747e8077a0 pebble 2025-05-19 19:09:35 +07:00
Para Dox
4baf955ce2 do the index 2025-05-19 19:03:04 +07:00
Para Dox
9c32719987 do the index 2025-05-19 19:01:08 +07:00
Para Dox
59e1608b67 fix 2025-05-19 18:18:38 +07:00
Para Dox
b13e48e897 fix 2025-05-19 18:15:55 +07:00
Para Dox
5a38f4960b fix 2025-05-19 17:54:42 +07:00
Para Dox
646def7d2b update 2025-05-19 17:47:57 +07:00
Para Dox
84e66e2697 update 2025-05-19 17:09:18 +07:00
Para Dox
b21f82fc17 fix 2025-05-19 17:02:51 +07:00
Para Dox
017f5e9bec fix 2025-05-19 16:58:07 +07:00
Para Dox
228fb63f02 fix 2025-05-19 16:55:59 +07:00
Para Dox
1818ead761 leave for good 2025-05-19 16:52:38 +07:00
Para Dox
6a9ab07495 home 2025-05-19 16:33:27 +07:00
Para Dox
cd6ad78d11 fix 2025-05-19 16:15:16 +07:00
Para Dox
b2f6d8ef45 fix 2025-05-19 15:32:37 +07:00
Para Dox
22a7b3ea1b geth is not default 2025-05-19 15:20:38 +07:00
Para Dox
88140231ce classic names 2025-05-19 15:16:47 +07:00
Para Dox
ca84033a14 fix version 2025-05-19 14:51:25 +07:00
Para Dox
9afe08dff2 better classic 2025-05-19 13:23:57 +07:00
Para Dox
6e477bd5f0 renaming stuff 2025-05-19 13:05:42 +07:00
Para Dox
5a1c0bf495 nitro metrics path 2025-05-18 14:28:56 +07:00
Para Dox
70d727d7ad nitro metrics 2025-05-18 14:27:02 +07:00
Para Dox
6f0e5ac218 besu metrics 2025-05-18 14:19:12 +07:00
Para Dox
aa2b598b32 metrics for prysm 2025-05-18 14:15:34 +07:00
Para Dox
3587d82931 metric for sonic 2025-05-18 13:46:07 +07:00
Para Dox
0d902eb69b nimbus metrics 2025-05-18 13:36:33 +07:00
Para Dox
af0c8cd7e2 fix reth metric 2025-05-18 13:21:40 +07:00
Para Dox
98abb33fc8 now with prometheus. hopefully 2025-05-18 13:11:00 +07:00
Para Dox
f2855d3a2c strong progress 2025-05-16 15:16:39 +07:00
Para Dox
44461bffcd strong progress 2025-05-16 15:12:39 +07:00
Para Dox
d5598f5446 more visibility 2025-05-16 14:04:58 +07:00
Para Dox
07bbfb22cc more visibility 2025-05-16 14:04:09 +07:00
Para Dox
ddf85ee8f9 remove unused files 2025-05-16 12:03:23 +07:00
Para Dox
22ce16f065 do not store logs 2025-05-16 12:01:50 +07:00
Para Dox
bfccfdbadc test 2025-05-16 11:49:54 +07:00
Para Dox
8c654c70d1 test 2025-05-16 11:42:19 +07:00
Para Dox
f43859c298 delete stuff 2025-05-16 11:41:59 +07:00
Para Dox
0396014baf release the firehose 2025-05-15 16:25:39 +07:00
Para Dox
cd0adc475b update 2025-05-15 16:24:58 +07:00
Para Dox
eb5197aca3 no hardfork yet 2025-05-13 22:22:04 +07:00
Para Dox
b69db72c80 still secrets for zircuit 2025-05-13 13:40:41 +07:00
Para Dox
34c1c62a7d no more gensis and rollup.json for zircuit 2025-05-13 13:25:38 +07:00
Para Dox
4a4dfbda1d no more gensis and rollup.json for zircuit 2025-05-13 13:16:16 +07:00
Para Dox
648db38954 new params 2025-05-13 12:58:24 +07:00
Para Dox
1b489f5a94 new params 2025-05-13 12:40:46 +07:00
Para Dox
c9188fa6c7 fix 2025-05-12 13:44:21 +07:00
Para Dox
f92b61c228 update zircuit 2025-05-12 12:49:20 +07:00
Para Dox
3d98079a2a fix the logging issue 2025-05-12 10:12:47 +07:00
Para Dox
c83ed6d7d3 updates 2025-05-12 09:11:22 +07:00
Para Dox
0b139fed3d few tweaks 2025-05-10 19:40:07 +07:00
Para Dox
1ba136a4f8 few tweaks 2025-05-10 19:09:56 +07:00
Para Dox
8e2243616b add new parameter 2025-05-09 17:53:45 +07:00
Para Dox
3fbde2d791 add new parameter 2025-05-09 17:50:36 +07:00
Para Dox
781e6535b6 update 2025-05-09 17:23:10 +07:00
Para Dox
85ee5afd87 no more walkback and updates 2025-05-06 13:15:27 +07:00
Para Dox
be54827b7d fix 2025-05-03 18:53:40 +07:00
Para Dox
30ae1caff4 now with node profiles 2025-05-03 18:50:37 +07:00
Para Dox
4eedfa81e0 classic where classic belongs 2025-05-03 18:02:00 +07:00
Para Dox
12c72587e6 a minimal base 2025-05-02 14:17:43 +07:00
Para Dox
88c9eb7a2c format output 2025-05-01 16:10:52 +07:00
Para Dox
b3893280b8 benchmark prices 2025-05-01 16:01:53 +07:00
Para Dox
4718d739a8 benchmark prices 2025-05-01 15:56:09 +07:00
Para Dox
211bd2684b benchmark prices 2025-05-01 15:52:50 +07:00
Para Dox
b56880f5a2 benchmark prices 2025-05-01 15:44:39 +07:00
Para Dox
d6350bfde1 bench base 2025-05-01 14:33:27 +07:00
Para Dox
ea255de1cd now with websockets 2025-05-01 14:25:29 +07:00
Para Dox
f2b642c20d now with websockets 2025-05-01 14:20:31 +07:00
Para Dox
25502ae1a1 now with websockets 2025-05-01 14:17:45 +07:00
Para Dox
ae1cbd0493 now with websockets 2025-05-01 14:14:59 +07:00
Para Dox
83ce10ec3d naming convention introduced 2025-05-01 14:06:38 +07:00
Para Dox
232748632a first try 2025-05-01 14:05:47 +07:00
Para Dox
f171abb333 fancy testing 2025-05-01 13:33:13 +07:00
Para Dox
4722460091 fix dns tree url 2025-04-29 23:32:57 +07:00
Para Dox
f15132a8d1 fix dns tree url 2025-04-29 23:21:53 +07:00
Para Dox
4d199f81bb smol is gud 2025-04-29 23:11:06 +07:00
Para Dox
104f2d8e1c minified testnets 2025-04-29 22:17:44 +07:00
Para Dox
b930803d21 minified testnets 2025-04-29 22:17:08 +07:00
Para Dox
362f6bd577 fix backups 2025-04-29 18:00:53 +07:00
Para Dox
fca23f8205 pruning is now only starting manually 2025-04-29 16:31:39 +07:00
Para Dox
1241776349 add is 2025-04-29 16:11:54 +07:00
Para Dox
a6d574c9a9 fix classic shackle connection 2025-04-29 12:58:52 +07:00
Para Dox
d28181236c fix hoodie name 2025-04-29 12:10:50 +07:00
Para Dox
94bc176586 new methods and hoodie 2025-04-29 12:08:23 +07:00
Para Dox
002b2fa99c more thought 2025-04-28 22:14:28 +07:00
Para Dox
1f13edad78 more thought 2025-04-28 22:05:06 +07:00
Para Dox
fd2abeb162 jwtsecret 2025-04-28 21:48:56 +07:00
Para Dox
079aefb865 usage 2025-04-28 21:36:02 +07:00
Para Dox
639d8b60c8 sequencer 2025-04-28 20:51:05 +07:00
Para Dox
8c104531b4 it's the real one 2025-04-28 20:33:01 +07:00
Para Dox
8ff330d38b excellent 2025-04-28 20:31:33 +07:00
Para Dox
06faa1d9f4 moved 2025-04-28 20:28:22 +07:00
Para Dox
740dbd59de no genesis for bsc erigon 2025-04-28 17:28:23 +07:00
Para Dox
e8b5cce384 try 2025-04-28 17:21:29 +07:00
Para Dox
4ffde02488 try 2025-04-28 17:01:22 +07:00
Para Dox
e61b97ff4e try 2025-04-28 16:57:55 +07:00
Para Dox
9d22af0d73 try 2025-04-28 16:55:31 +07:00
Para Dox
7c66e91f2c nat 2025-04-28 16:46:40 +07:00
Para Dox
f23b099659 fix 2025-04-28 16:42:21 +07:00
Para Dox
15f9691b37 fix 2025-04-28 16:40:24 +07:00
Para Dox
3b90a9bbde fix 2025-04-28 16:38:52 +07:00
Para Dox
8b1cb957d1 fix 2025-04-28 16:34:10 +07:00
Para Dox
fe505dd239 no bootnodes for eth-classic 2025-04-28 16:09:45 +07:00
Para Dox
7086278d67 new life 2025-04-28 16:08:38 +07:00
Para Dox
ba1f06506d fix 2025-04-28 16:08:13 +07:00
Para Dox
c7d8a6e534 shortcuts 2025-04-28 16:05:42 +07:00
Para Dox
e618db0aea besu is back 2025-04-28 16:04:08 +07:00
Para Dox
9650e2f978 better naming 2025-04-28 15:02:51 +07:00
Para Dox
41027d8aa3 sync me a faster linea 2025-04-28 14:59:30 +07:00
Para Dox
071eff21ee simple look 2025-04-27 23:56:32 +07:00
Para Dox
e109dc0e35 simple look 2025-04-27 23:31:46 +07:00
Para Dox
ef725c1768 simple look 2025-04-27 23:29:43 +07:00
Para Dox
2684dec118 simple look 2025-04-27 23:26:36 +07:00
Para Dox
191fd11d93 simple look 2025-04-27 23:20:19 +07:00
Para Dox
0a6c5e9fb5 simple look 2025-04-27 23:18:07 +07:00
Para Dox
7c28a5d536 simple look 2025-04-27 23:14:52 +07:00
Para Dox
7a82a4dbf4 simple look 2025-04-27 23:06:16 +07:00
Para Dox
5af24a34af simple look 2025-04-27 23:02:25 +07:00
Para Dox
9ba491ab8b simple look 2025-04-27 22:56:10 +07:00
Para Dox
e7657a133c simple look 2025-04-27 22:48:49 +07:00
Para Dox
088f42cb11 simple look 2025-04-27 22:40:57 +07:00
Para Dox
125452c7d4 simple look 2025-04-27 22:34:46 +07:00
Para Dox
153abf5805 simple look 2025-04-27 22:33:30 +07:00
Para Dox
5c63c62859 simple look 2025-04-27 22:30:57 +07:00
Para Dox
cbfe770a1a optimize for calls 2025-04-27 21:18:45 +07:00
Para Dox
12579f612f optimize for calls 2025-04-27 21:12:47 +07:00
Para Dox
1d16a45496 optimize for calls 2025-04-27 21:10:20 +07:00
Para Dox
fa05b914a6 optimize for calls 2025-04-27 21:05:16 +07:00
Para Dox
ab10340dca optimize for calls 2025-04-27 21:03:37 +07:00
Para Dox
d18aa9ca72 optimize for calls 2025-04-27 21:00:46 +07:00
Para Dox
dca2a0ef60 optimize for calls 2025-04-27 20:39:38 +07:00
Para Dox
d69a8cb0de optimize for calls 2025-04-27 20:27:03 +07:00
Para Dox
a44ea07293 optimize for 0 2025-04-27 20:04:40 +07:00
Para Dox
d935624364 no node datadir yaj 2025-04-27 20:03:43 +07:00
Para Dox
5a080c522c new era 2025-04-27 19:30:25 +07:00
Para Dox
1eb8a58fb2 new era 2025-04-27 19:28:32 +07:00
Para Dox
4331a5ac4a new era 2025-04-27 19:26:38 +07:00
Para Dox
bfd8392e41 new era 2025-04-27 17:57:00 +07:00
Para Dox
ca59cb50eb new era 2025-04-27 17:53:05 +07:00
Para Dox
c3d5534520 new era 2025-04-27 17:51:24 +07:00
Para Dox
ba2103d7c0 new era 2025-04-27 17:50:03 +07:00
Para Dox
9f3710ddaa new era 2025-04-27 17:47:25 +07:00
Para Dox
4b17d54280 new era 2025-04-27 17:42:54 +07:00
Para Dox
b12409b2f6 new era 2025-04-27 17:40:33 +07:00
Para Dox
90e26f3523 exec 2025-04-27 17:32:37 +07:00
Para Dox
33d0bfe2c3 exec 2025-04-27 17:29:49 +07:00
Para Dox
1d85c22974 plz 2025-04-27 17:29:18 +07:00
Para Dox
ed2cf87989 new era 2025-04-27 17:24:06 +07:00
Para Dox
cca3d8e7e1 new era 2025-04-27 17:22:44 +07:00
Para Dox
037a01b7c2 new era 2025-04-27 17:17:12 +07:00
Para Dox
08b27a4a3b new era 2025-04-27 17:14:21 +07:00
Para Dox
e1985a117f new era 2025-04-27 17:10:32 +07:00
Para Dox
a1fc35b87f new era 2025-04-27 17:06:15 +07:00
Para Dox
938bf33053 new era 2025-04-27 16:54:04 +07:00
Para Dox
9c40ab7451 new era 2025-04-27 16:52:17 +07:00
Para Dox
339af89892 new era 2025-04-27 16:49:09 +07:00
Para Dox
b6f5c17a9f new era 2025-04-27 16:45:17 +07:00
Para Dox
5524999d55 new era 2025-04-27 16:43:52 +07:00
Para Dox
053b1aa97b new era 2025-04-27 16:41:29 +07:00
Para Dox
4ed74e9c75 new era 2025-04-27 16:18:00 +07:00
Para Dox
f7b7934dd1 next 2025-04-27 16:17:02 +07:00
Para Dox
8a74589cfb beacon node path 2025-04-27 13:47:58 +07:00
Para Dox
04c5e41ae9 nimbus 2025-04-26 21:21:21 +07:00
Para Dox
762011a809 nimbus 2025-04-26 21:17:15 +07:00
Para Dox
93baaace61 beef up reth 2025-04-26 20:58:41 +07:00
Para Dox
c7259dad75 first nimbus 2025-04-26 20:24:45 +07:00
Para Dox
1ded879d2d some dangerous path changes for reth datadir 2025-04-26 20:22:45 +07:00
Para Dox
a379057318 env var clash 2025-04-26 01:24:50 +07:00
Para Dox
e6529fcb7c sequncer url fix 2025-04-25 20:08:41 +07:00
Para Dox
d960ca7acd hardfork 2025-04-23 08:07:45 +07:00
Para Dox
e44909c46d seeds and peer issues tackle3d 2025-04-22 12:46:59 +07:00
Para Dox
12fd383dd8 fix 2025-04-21 11:40:31 +07:00
Para Dox
2cbe0c3986 fix 2025-04-21 11:16:22 +07:00
Para Dox
88deee2125 fix 2025-04-21 11:12:48 +07:00
Para Dox
fa1876de72 downgrade 2025-04-21 11:09:12 +07:00
Para Dox
056e59248d fix 2025-04-21 10:50:45 +07:00
Para Dox
0dd9af2905 more peers 2025-04-21 10:20:18 +07:00
Para Dox
eae44c31e7 try 2025-04-21 10:05:06 +07:00
Para Dox
ef5ce7fa1d do the thing 2025-04-21 09:52:07 +07:00
Para Dox
8f598d464c erigon ftw 2025-04-20 17:24:15 +07:00
Para Dox
2da52e423b erigon ftw 2025-04-20 17:14:29 +07:00
Para Dox
37c16701f2 upstreams without testing 2025-04-20 16:26:24 +07:00
Para Dox
54d3e75710 fix 2025-04-19 15:47:16 +07:00
Para Dox
c4b032577e nuclear rollup 2025-04-19 12:44:42 +07:00
Para Dox
a1f3fd7109 dangerous script 2025-04-19 12:39:54 +07:00
Para Dox
221a0629e0 no open client ports when no discovery 2025-04-19 12:22:28 +07:00
Para Dox
e4d3bfb019 update 2025-04-19 11:33:54 +07:00
Para Dox
04145934f2 update 2025-04-19 11:31:38 +07:00
Para Dox
0fe081bbce node depends on client 2025-04-19 02:20:58 +07:00
Para Dox
d38e7b675e node depends on client 2025-04-19 02:19:00 +07:00
Para Dox
821d75d3ea superchain 2025-04-19 00:55:27 +07:00
Para Dox
87bc69108c hopefully nothing breaks 2025-04-19 00:28:36 +07:00
Para Dox
4f12b8d0ed hopefully nothing breaks 2025-04-19 00:24:44 +07:00
Para Dox
efa0a7c56d good fix 2025-04-18 15:45:00 +07:00
Para Dox
98eb25f7b5 fix 2025-04-18 12:43:22 +07:00
Para Dox
464ba456b8 fix 2025-04-17 21:21:52 +07:00
Para Dox
a3bacf5347 fix 2025-04-17 21:21:34 +07:00
Para Dox
3f06f3f1b1 fix 2025-04-17 21:21:03 +07:00
Para Dox
6a0823a64d fix 2025-04-17 21:16:07 +07:00
Para Dox
4e8f6dbe58 fix 2025-04-17 21:13:26 +07:00
Para Dox
81d3d0bf41 fix 2025-04-17 21:12:02 +07:00
Para Dox
f7acaa9c85 fix 2025-04-17 21:08:44 +07:00
Para Dox
3303bd1d6d fix 2025-04-17 21:07:57 +07:00
Para Dox
8ff2f5a2ba fix 2025-04-17 21:06:43 +07:00
Para Dox
066af74c86 fix 2025-04-17 21:04:25 +07:00
Para Dox
15baa3183a fix 2025-04-17 21:03:34 +07:00
Para Dox
0605685825 fix 2025-04-17 21:00:09 +07:00
Para Dox
2116f29f2b old celo 2025-04-17 20:58:25 +07:00
Para Dox
78148783c8 fix 2025-04-17 17:03:45 +07:00
Para Dox
45ea204eea fix 2025-04-17 17:01:12 +07:00
Para Dox
3fbd7432e6 fix 2025-04-17 16:58:12 +07:00
Para Dox
40ecc82a94 fix 2025-04-17 16:22:00 +07:00
Para Dox
3167e1e80d fix 2025-04-17 15:40:28 +07:00
Para Dox
e6c7869c83 try 2025-04-16 17:38:28 +07:00
Para Dox
8ade443259 fix 2025-04-16 16:49:55 +07:00
Para Dox
064199f312 test 2025-04-16 16:45:10 +07:00
Para Dox
809d2857a6 fix 2025-04-16 16:43:11 +07:00
Para Dox
9193cf91ab new try 2025-04-16 16:30:15 +07:00
Para Dox
d4ddf5bd52 try 2025-04-16 16:26:51 +07:00
Para Dox
9f1450597f fix 2025-04-16 16:11:24 +07:00
Para Dox
3d12f51a75 new 2025-04-16 15:11:25 +07:00
Para Dox
bc6f08e6d9 fix 2025-04-16 15:06:50 +07:00
Para Dox
58e992ba1d fix 2025-04-16 14:55:17 +07:00
Para Dox
35840e9cce fix 2025-04-16 13:59:35 +07:00
Para Dox
1813c7f354 fix 2025-04-16 13:59:05 +07:00
Para Dox
cc47c8a543 new 2025-04-16 13:54:29 +07:00
Para Dox
b65b02ad75 fix 2025-04-16 11:35:04 +07:00
Para Dox
21fabe3e68 fix 2025-04-16 07:37:57 +07:00
Para Dox
55659ed6f0 fix 2025-04-15 17:07:19 +07:00
Para Dox
4206a48d04 fix 2025-04-15 17:06:54 +07:00
Para Dox
92caabd3a9 all 2025-04-15 17:04:55 +07:00
Para Dox
9d016c8c9e try 2025-04-15 16:49:28 +07:00
Para Dox
355f1d32ed fix 2025-04-15 16:48:34 +07:00
Para Dox
9bd49dd1d2 fix 2025-04-15 16:25:23 +07:00
Para Dox
73e144a92a fix 2025-04-15 16:10:30 +07:00
Para Dox
08ae2bc826 fix 2025-04-15 16:09:45 +07:00
Para Dox
fdca7f7762 fix 2025-04-15 16:07:33 +07:00
Para Dox
1b7a44e4d5 fix 2025-04-15 16:06:08 +07:00
Para Dox
333a21b528 fix 2025-04-15 16:00:46 +07:00
Para Dox
e5ef332e0e fix 2025-04-15 15:58:58 +07:00
Para Dox
d6c1eb0211 fix 2025-04-15 15:49:25 +07:00
Para Dox
2c4a031c2b fix 2025-04-15 15:40:26 +07:00
Para Dox
2e2842bd83 fix 2025-04-15 14:59:22 +07:00
Para Dox
77963a6021 fix 2025-04-15 14:44:19 +07:00
Para Dox
40cd48af0d fix 2025-04-15 14:40:18 +07:00
Para Dox
1b93862b79 fix 2025-04-15 14:23:36 +07:00
Para Dox
c898f72e71 fix 2025-04-15 14:22:27 +07:00
Para Dox
4206e6ee66 fix 2025-04-15 14:15:47 +07:00
Para Dox
e1b256de74 fix 2025-04-15 14:13:29 +07:00
Para Dox
3177bc5a0b fix 2025-04-15 14:04:36 +07:00
Para Dox
55f544fc92 fix 2025-04-15 14:03:03 +07:00
Para Dox
ce7f425339 fix 2025-04-15 14:02:34 +07:00
Para Dox
3d3b59e13a fix 2025-04-15 13:54:24 +07:00
Para Dox
7102146c1d news 2025-04-15 13:43:05 +07:00
Para Dox
9334c6c9e9 news 2025-04-15 13:41:58 +07:00
Para Dox
47eb68d99a no local urls anymore 2025-04-15 12:59:41 +07:00
Para Dox
6e6390f8be fix 2025-04-15 12:47:35 +07:00
Para Dox
62d44aa62c fix 2025-04-15 12:46:54 +07:00
Para Dox
c2230b8f33 fix 2025-04-15 11:26:19 +07:00
Para Dox
8ea3f5b7d5 fix 2025-04-15 11:15:12 +07:00
Para Dox
22180bcf2d fix 2025-04-15 11:11:00 +07:00
Para Dox
81b368c9ed fix 2025-04-15 11:05:18 +07:00
Para Dox
03fa670d1a fix 2025-04-15 10:55:13 +07:00
Para Dox
18c410d6ac fix 2025-04-15 10:49:51 +07:00
Para Dox
10367e06fb fix 2025-04-15 05:42:52 +07:00
Para Dox
0359d06d65 fix 2025-04-15 05:21:32 +07:00
Para Dox
c0379e2994 fix 2025-04-15 04:55:36 +07:00
Para Dox
19cc4caa89 fix 2025-04-15 04:14:21 +07:00
Para Dox
d6fa540b64 fix 2025-04-15 03:58:36 +07:00
Para Dox
31c48eaf3c fix 2025-04-15 03:39:19 +07:00
Para Dox
697265c2b0 fix 2025-04-15 03:35:50 +07:00
Para Dox
a248b01e9e de-11 2025-04-15 03:32:52 +07:00
Para Dox
e9b473bddc jo 2025-04-15 03:18:08 +07:00
Para Dox
7cf48f7061 fix 2025-04-15 03:12:51 +07:00
Para Dox
eef099ef47 fix 2025-04-15 03:09:06 +07:00
Para Dox
7029b7392d fix 2025-04-15 02:46:58 +07:00
Para Dox
0a69a6bb44 fix 2025-04-15 02:46:24 +07:00
Para Dox
77e71635c6 fix 2025-04-15 02:42:14 +07:00
Para Dox
6e321ca221 fix 2025-04-15 02:40:44 +07:00
Para Dox
e439ade5ab fix 2025-04-15 02:38:19 +07:00
Para Dox
1948c22eba fix 2025-04-15 02:35:44 +07:00
Para Dox
95d64d3d9c this 2025-04-15 02:27:41 +07:00
Para Dox
b3b73b2cad ne 2025-04-15 02:26:12 +07:00
Para Dox
73c76e28b3 new 2025-04-15 02:19:41 +07:00
Para Dox
45da1d7bda fix 2025-04-14 21:17:09 +07:00
Para Dox
3aeefd6fa5 fix 2025-04-14 21:05:06 +07:00
Para Dox
86641804a7 neede 2025-04-14 21:04:32 +07:00
Para Dox
7bfcc128c4 fix 2025-04-14 20:59:30 +07:00
Para Dox
e26ff24d4a needed 2025-04-14 20:59:13 +07:00
Para Dox
bf1c587253 fix 2025-04-14 20:55:56 +07:00
Para Dox
c36b14229e fix 2025-04-14 20:52:57 +07:00
Para Dox
2a3f74bd0d fix 2025-04-14 20:49:12 +07:00
Para Dox
f5de048804 fix 2025-04-14 20:48:36 +07:00
Para Dox
058517a20d fix 2025-04-14 20:46:24 +07:00
Para Dox
80d2faa98f fix 2025-04-14 20:38:32 +07:00
Para Dox
96d9081230 fix 2025-04-14 20:36:05 +07:00
Para Dox
f99b715f77 new 2025-04-14 20:33:41 +07:00
Para Dox
61a583441a get going 2025-04-14 20:31:25 +07:00
Para Dox
4a71b12a2c fix 2025-04-14 20:03:57 +07:00
Para Dox
e1649ab259 fix 2025-04-14 20:02:07 +07:00
Para Dox
f9245415fd fix 2025-04-14 19:58:40 +07:00
Para Dox
9719ceb420 fix 2025-04-14 19:51:46 +07:00
Para Dox
dcc2cb9b65 fix 2025-04-14 19:46:27 +07:00
Para Dox
8f1ecf48ae fix 2025-04-14 19:44:42 +07:00
Para Dox
3decde68d7 fix 2025-04-14 19:41:22 +07:00
Para Dox
a90397b2e9 fix 2025-04-14 19:39:06 +07:00
Para Dox
8fb39d33fb fix 2025-04-14 19:27:17 +07:00
Para Dox
ab919e76ef fix 2025-04-14 19:24:28 +07:00
Para Dox
b7c7080167 fix 2025-04-14 19:23:40 +07:00
Para Dox
1f974f0b85 fix 2025-04-14 19:19:23 +07:00
Para Dox
b2e3fe185f fix 2025-04-14 19:18:57 +07:00
Para Dox
165aeb6866 doit 2025-04-14 19:15:57 +07:00
Para Dox
d1b3f02248 fix 2025-04-14 19:14:27 +07:00
Para Dox
91bb9d55da fix 2025-04-14 19:11:58 +07:00
Para Dox
7358814ebd fix 2025-04-14 19:08:28 +07:00
Para Dox
3a1406bc55 fix 2025-04-14 19:04:52 +07:00
Para Dox
2aa0a2d5a8 fix 2025-04-14 19:02:40 +07:00
Para Dox
8bd6cde31f fix 2025-04-14 18:58:12 +07:00
Para Dox
869b14fc38 fix 2025-04-14 18:57:10 +07:00
Para Dox
d2ad1f1428 fix 2025-04-14 18:52:30 +07:00
Para Dox
40f6648de1 fix 2025-04-14 18:48:19 +07:00
Para Dox
4db3ef3562 fix 2025-04-14 18:43:08 +07:00
Para Dox
144d3c3650 t 2025-04-14 18:09:17 +07:00
Para Dox
6b9d512003 new 2025-04-14 18:08:32 +07:00
Para Dox
a2db3cefdb fix 2025-04-14 18:01:40 +07:00
Para Dox
73a2a804da remove rollup.json for bob 2025-04-14 18:01:25 +07:00
Para Dox
02caf42380 fix 2025-04-14 17:59:09 +07:00
Para Dox
876de679ae fix 2025-04-14 17:50:46 +07:00
Para Dox
5790c21c32 update genesis for lisk and remove rollup.json 2025-04-14 17:49:36 +07:00
Para Dox
6c54fdb6bd de-13 2025-04-14 17:39:56 +07:00
Para Dox
ef812230f3 fix 2025-04-14 17:32:49 +07:00
Para Dox
7d55b39c3a fix 2025-04-14 15:09:08 +07:00
Para Dox
f9cdd75cf8 fix 2025-04-14 15:04:31 +07:00
Para Dox
61764ef61e fix 2025-04-14 15:02:17 +07:00
Para Dox
2659865064 fix 2025-04-14 14:59:58 +07:00
Para Dox
52a49b45c4 fix 2025-04-14 14:54:51 +07:00
Para Dox
1351a225b6 fix 2025-04-14 14:52:30 +07:00
Para Dox
dd4725930e fix 2025-04-14 14:24:09 +07:00
Para Dox
24191ba4f0 fix 2025-04-14 14:22:10 +07:00
Para Dox
6080bb144b fix 2025-04-14 13:59:51 +07:00
Para Dox
02cf2b32b1 fix 2025-04-14 13:56:46 +07:00
Para Dox
e98fc0072a fix 2025-04-14 13:55:15 +07:00
Para Dox
1c5b0c2f0f fix 2025-04-14 13:52:46 +07:00
Para Dox
f5c4c0507a fix 2025-04-14 13:45:14 +07:00
Para Dox
3568b58816 cleanup 2025-04-14 13:40:36 +07:00
Para Dox
a907c000d3 scheme 2025-04-14 13:39:33 +07:00
Para Dox
dc758a8c7d fix 2025-04-14 13:37:21 +07:00
Para Dox
537d7406a3 fix 2025-04-14 13:28:51 +07:00
Para Dox
81c04e3f23 fix 2025-04-14 13:24:44 +07:00
Para Dox
21c50072c3 fix 2025-04-14 13:16:11 +07:00
Para Dox
49a2d601c5 hope 2025-04-14 13:07:45 +07:00
Para Dox
a524b0b7cd swoosh 2025-04-14 13:04:03 +07:00
Para Dox
56e165ebfb era? 2025-04-14 12:46:40 +07:00
Para Dox
f1bcd0aef2 era? 2025-04-14 12:45:39 +07:00
Para Dox
a2679d1d0b era? 2025-04-14 12:44:00 +07:00
Para Dox
aa7ebb8454 era? 2025-04-14 12:41:35 +07:00
Para Dox
7bc2c6857e era? 2025-04-14 12:39:47 +07:00
Para Dox
23fa663cdd era? 2025-04-14 12:34:42 +07:00
Para Dox
7d6ceb74d8 fix 2025-04-14 11:59:50 +07:00
Para Dox
1829f2394e fix 2025-04-14 11:37:28 +07:00
Para Dox
fa2c706daa fix 2025-04-14 11:36:53 +07:00
Para Dox
cd75b35ce7 fix 2025-04-14 11:30:50 +07:00
Para Dox
38ba497057 fix 2025-04-14 11:26:57 +07:00
Para Dox
7e2ea8baaf fix 2025-04-14 11:24:46 +07:00
Para Dox
de873b0a60 era? 2025-04-14 11:20:03 +07:00
Para Dox
310b48b1fb fix 2025-04-14 11:13:12 +07:00
Para Dox
2f046b49dd fix 2025-04-14 11:12:43 +07:00
Para Dox
b653ad153c fix 2025-04-14 11:09:30 +07:00
Para Dox
6db2d5df50 fix 2025-04-14 11:02:10 +07:00
Para Dox
fda35b3792 fix 2025-04-14 11:00:47 +07:00
Para Dox
8d0cbfc483 fix 2025-04-14 10:08:11 +07:00
Para Dox
96f33135b1 fix 2025-04-13 20:44:50 +07:00
Para Dox
8ba07ac2cf fix 2025-04-13 20:42:08 +07:00
Para Dox
a919bcdbbc fix 2025-04-13 20:37:01 +07:00
Para Dox
ca7dceae80 fix 2025-04-13 20:35:30 +07:00
Para Dox
85e2ef4c3f fix 2025-04-13 20:35:06 +07:00
Para Dox
a74d493743 fix 2025-04-13 20:29:13 +07:00
Para Dox
42c9ebc55f fix 2025-04-13 20:26:01 +07:00
Para Dox
64619a5037 fix 2025-04-13 20:22:43 +07:00
Para Dox
3354f15bef fix 2025-04-13 20:17:42 +07:00
Para Dox
cfd1031962 fix 2025-04-13 20:17:13 +07:00
Para Dox
e259c428bd fix 2025-04-13 20:14:05 +07:00
Para Dox
ad3f353c8b fix 2025-04-13 20:08:27 +07:00
Para Dox
2a86ab6270 try 2025-04-13 20:04:55 +07:00
Para Dox
76618c9c31 fix 2025-04-13 19:59:42 +07:00
Para Dox
d71084ce32 fix 2025-04-13 19:57:04 +07:00
Para Dox
8240f4ca74 fix 2025-04-13 19:54:49 +07:00
Para Dox
e94d9f80e6 fix 2025-04-13 19:40:48 +07:00
Para Dox
ddec133f2a fix 2025-04-13 19:35:32 +07:00
Para Dox
d842d0101d nice 2025-04-13 18:23:03 +07:00
Para Dox
3517530076 update 2025-04-13 18:19:04 +07:00
Para Dox
43500c2405 fix 2025-04-13 18:15:17 +07:00
Para Dox
bb687505de fiux opera 2025-04-13 18:07:43 +07:00
Para Dox
23499ad176 fix 2025-04-13 18:02:12 +07:00
Para Dox
dff30fed8b fix 2025-04-13 18:01:53 +07:00
Para Dox
6df1ca59d5 fix 2025-04-13 17:52:13 +07:00
Para Dox
4b14f4b589 new fun 2025-04-13 17:46:20 +07:00
Para Dox
a3739d0d6b fix 2025-04-13 17:21:41 +07:00
Para Dox
65ab9eec06 fix 2025-04-13 17:17:59 +07:00
Para Dox
ccbb06dc05 fix 2025-04-13 17:13:55 +07:00
Para Dox
790d6807f5 go 2025-04-13 17:05:06 +07:00
Para Dox
0b673534c5 fix 2025-04-13 16:59:25 +07:00
Para Dox
358e014ad7 fix 2025-04-13 16:45:10 +07:00
Para Dox
a6279455cd fix 2025-04-13 16:24:26 +07:00
Para Dox
e422b415d0 shortcut 2025-04-13 16:10:52 +07:00
Para Dox
5dc086ddeb new 2025-04-13 16:10:21 +07:00
Para Dox
0533c3c905 fix 2025-04-13 16:09:12 +07:00
Para Dox
09d8656e33 fix 2025-04-13 16:02:09 +07:00
Para Dox
912cc9388b fix 2025-04-13 15:49:36 +07:00
Para Dox
6b0036ef5d fix 2025-04-13 15:46:13 +07:00
Para Dox
592e1ba957 fix 2025-04-13 15:43:10 +07:00
Para Dox
5a9f310e13 fix 2025-04-13 14:23:05 +07:00
Para Dox
4384e527c7 fix 2025-04-13 14:14:50 +07:00
Para Dox
d782c70c75 new 2025-04-13 13:56:34 +07:00
Para Dox
3df500c911 dudeluu 2025-04-13 13:55:04 +07:00
Para Dox
e04c199ca4 fix 2025-04-13 13:53:35 +07:00
Para Dox
c7eef48e07 fix 2025-04-13 13:46:32 +07:00
Para Dox
7c4220f2b5 better nginx proxy 2025-04-13 13:19:08 +07:00
Para Dox
c4ae3e8850 better nginx proxy 2025-04-13 13:11:47 +07:00
Para Dox
2e45eade58 better nginx proxy 2025-04-13 13:09:44 +07:00
Para Dox
fba5ba7a1d better nginx proxy 2025-04-13 13:07:54 +07:00
Para Dox
b12c5fd113 fix 2025-04-13 12:43:14 +07:00
Para Dox
a665373b50 fix 2025-04-13 12:35:00 +07:00
Para Dox
5bbe1a5565 do it 2025-04-13 12:26:18 +07:00
Para Dox
412e130d46 try 2025-04-13 12:21:34 +07:00
Para Dox
5bcd9f5469 fix 2025-04-13 12:19:49 +07:00
Para Dox
c78a336884 renanme 2025-04-13 12:19:28 +07:00
Para Dox
d54ae62ab1 fix 2025-04-13 12:19:04 +07:00
Para Dox
e3781d0b36 new way 2025-04-13 12:18:16 +07:00
Para Dox
447882395e fix 2025-04-13 12:16:38 +07:00
Para Dox
39bbbd6494 fix 2025-04-13 12:16:19 +07:00
Para Dox
9f62aa2d2f fix 2025-04-13 12:08:12 +07:00
Para Dox
59b2f0f24d fix 2025-04-13 12:03:40 +07:00
Para Dox
1a2e99ddc3 fix 2025-04-13 11:51:20 +07:00
Para Dox
b6c60eea6b fix 2025-04-12 14:23:44 +07:00
Para Dox
6c21f202fa fix 2025-04-12 14:18:55 +07:00
Para Dox
18b1ddcfc3 fix 2025-04-12 14:15:52 +07:00
Para Dox
11641209d0 try 2025-04-12 14:12:25 +07:00
Para Dox
6a5815bc5b fix 2025-04-12 14:10:49 +07:00
Para Dox
cb77ca59de fix 2025-04-12 14:09:14 +07:00
Para Dox
9f02efc4a3 fix 2025-04-12 14:06:57 +07:00
Para Dox
b6418a495a fix 2025-04-12 13:54:59 +07:00
Para Dox
fccfbd6515 fix 2025-04-12 13:53:54 +07:00
Para Dox
9930a53f6d fix 2025-04-12 13:33:44 +07:00
Para Dox
fbf8d20f62 try 2025-04-12 13:29:27 +07:00
Para Dox
596324a554 fix 2025-04-12 13:26:30 +07:00
Para Dox
160761f632 fix 2025-04-12 13:24:52 +07:00
Para Dox
889d64c1c0 fix 2025-04-12 13:23:17 +07:00
Para Dox
09d6593e85 fix 2025-04-12 13:16:53 +07:00
Para Dox
1b40e92de2 fix 2025-04-12 13:15:38 +07:00
Para Dox
360b4effb8 fix 2025-04-12 13:13:40 +07:00
Para Dox
a549aadb5b fix 2025-04-12 13:11:53 +07:00
Para Dox
4001b9ab79 fix 2025-04-12 13:07:13 +07:00
Para Dox
aa86d6fcd2 fix 2025-04-12 13:04:50 +07:00
Para Dox
beed4d2637 fix 2025-04-12 13:02:51 +07:00
Para Dox
7e2206ae18 fix 2025-04-12 13:00:08 +07:00
Para Dox
c241595dfd fix 2025-04-12 12:57:50 +07:00
Para Dox
b3c69ea511 fix 2025-04-12 12:39:51 +07:00
Para Dox
d837b89d4a fix 2025-04-12 12:38:23 +07:00
Para Dox
14625b13a7 fix 2025-04-12 12:31:17 +07:00
Para Dox
360a37e673 fix 2025-04-12 12:24:37 +07:00
Para Dox
52b2227df1 fix 2025-04-12 12:19:58 +07:00
Para Dox
6220b2bc24 fix 2025-04-12 12:15:38 +07:00
Para Dox
8a0e84a90f fix 2025-04-12 12:10:45 +07:00
Para Dox
ce28c61601 fix 2025-04-12 12:09:18 +07:00
Para Dox
bb1b5105cd fix 2025-04-12 12:07:19 +07:00
Para Dox
a300c5be58 fix 2025-04-12 11:57:43 +07:00
Para Dox
2f3319cd11 fix 2025-04-12 11:55:40 +07:00
Para Dox
738a34f786 fix 2025-04-12 11:48:48 +07:00
Para Dox
e8f8f8b0b2 fix 2025-04-12 11:45:19 +07:00
Para Dox
4cd47f1d04 fix 2025-04-12 11:25:58 +07:00
Para Dox
81a6e765d4 fix 2025-04-12 11:22:29 +07:00
Para Dox
75cf385648 fix 2025-04-11 18:18:34 +07:00
Para Dox
c812ac1dea fix 2025-04-11 18:12:27 +07:00
Para Dox
b857dd7068 bartio2 2025-04-11 18:09:34 +07:00
Para Dox
d315bef269 bartio 2025-04-11 18:08:58 +07:00
Para Dox
a4560ca392 fix 2025-04-11 18:05:42 +07:00
Para Dox
613a536432 fix 2025-04-11 17:57:52 +07:00
Para Dox
9afff9d756 fix 2025-04-11 17:55:34 +07:00
Para Dox
8c3647c887 fix 2025-04-11 17:52:13 +07:00
Para Dox
edda4f89f4 fix 2025-04-11 17:50:48 +07:00
Para Dox
fa8e38da57 fix 2025-04-11 17:45:57 +07:00
Para Dox
fdd9ae8bca fix 2025-04-11 17:40:56 +07:00
Para Dox
e92198da88 fix 2025-04-11 17:38:09 +07:00
Para Dox
187144f72d fix 2025-04-11 17:32:01 +07:00
Para Dox
d2f95ab832 fix 2025-04-11 17:25:58 +07:00
Para Dox
2c4a591d38 fix 2025-04-11 17:22:41 +07:00
Para Dox
d5cc45bb24 fix 2025-04-11 17:17:19 +07:00
Para Dox
428938f5b2 fix 2025-04-11 15:52:41 +07:00
Para Dox
785983ee7e fix 2025-04-11 15:51:13 +07:00
Para Dox
a5d3b5d213 fix 2025-04-11 15:45:45 +07:00
Para Dox
3fa5cc4900 fix 2025-04-11 15:36:40 +07:00
Para Dox
6b8108b470 fix 2025-04-11 15:30:23 +07:00
Para Dox
5b87c35356 fix 2025-04-11 15:25:55 +07:00
Para Dox
1529bf3bfc better readme 2025-04-11 15:07:28 +07:00
Para Dox
2869855f04 better readme 2025-04-11 15:03:22 +07:00
Para Dox
4c0dda7425 better readme 2025-04-11 14:58:28 +07:00
Para Dox
5eacd839e8 better readme 2025-04-11 14:53:25 +07:00
Para Dox
cb350ce446 better readme 2025-04-11 14:50:55 +07:00
Para Dox
4616a4b0bf better readme 2025-04-11 14:45:24 +07:00
Para Dox
31b33d533a a readme that can be called like one 2025-04-11 14:33:31 +07:00
Para Dox
6070b6ec4c polish 2025-04-11 14:10:04 +07:00
Para Dox
6abcf9a126 this is my church 2025-04-11 13:25:13 +07:00
Para Dox
e4296a117c chainsaw massacre 2025-04-11 13:21:07 +07:00
Para Dox
00997f8bc0 cleanup 2025-04-10 16:28:45 +07:00
Para Dox
0f89b5ddf6 fix 2025-04-10 15:18:43 +07:00
Para Dox
20e9d26d44 some progress 2025-04-10 14:43:11 +07:00
Para Dox
5ce47e35dd new beginning 2025-04-10 13:36:00 +07:00
squidbear
e2f9b2f634 fix 2025-04-01 12:56:44 +02:00
squidbear
57434f06a6 fix 2025-04-01 12:42:36 +02:00
squidbear
d2ec1b1d23 fix 2025-04-01 12:35:54 +02:00
squidbear
673e4cd0a5 fix 2025-04-01 12:31:20 +02:00
squidbear
9fa87ecc59 fix 2025-04-01 12:25:48 +02:00
squidbear
b8e9114c8a fix 2025-04-01 11:40:52 +02:00
squidbear
2bd682d0d6 fix 2025-04-01 11:35:27 +02:00
squidbear
3bd4679c52 fix 2025-04-01 08:18:13 +02:00
squidbear
1d937a07ab fix 2025-04-01 08:16:47 +02:00
squidbear
fcdb322d2f fix 2025-04-01 07:54:24 +02:00
squidbear
abd303aba8 fix 2025-04-01 07:38:54 +02:00
squidbear
71df3fee70 fix 2025-04-01 07:30:40 +02:00
squidbear
eeb94ff99a fix 2025-04-01 07:29:38 +02:00
squidbear
4e7f1ce6ca fix 2025-04-01 07:28:18 +02:00
squidbear
c8e765d134 fix 2025-04-01 07:18:50 +02:00
squidbear
ba6eb35830 bring it back 2025-04-01 07:16:51 +02:00
squidbear
dd8b92ddc6 fix 2025-04-01 07:13:48 +02:00
squidbear
42a8ed5761 fix 2025-04-01 07:12:05 +02:00
squidbear
6283e3fa1a fix 2025-04-01 07:06:58 +02:00
squidbear
6ec304e4d6 fix 2025-04-01 07:00:15 +02:00
squidbear
15b5008c6c fix 2025-04-01 06:39:38 +02:00
squidbear
71382469cb fix 2025-03-31 19:06:06 +02:00
squidbear
bf76ae4efe new 2025-03-31 18:58:56 +02:00
squidbear
57a2dae63e fix 2025-03-31 18:47:32 +02:00
squidbear
0122689aaf move the static peers to env file 2025-03-31 18:17:52 +02:00
squidbear
5c89c5ab65 fix 2025-03-31 18:04:58 +02:00
squidbear
48729847a4 create a existing config 2025-03-31 17:44:23 +02:00
squidbear
acafbe4406 new op 2025-03-31 17:03:12 +02:00
squidbear
32ebc7ca2b soneium 2025-03-31 17:01:16 +02:00
squidbear
e4c9778db7 fix 2025-03-31 16:37:02 +02:00
squidbear
21acc360b4 naming 2025-03-31 14:21:25 +02:00
squidbear
fec5a956cd more culr need to follow redirects 2025-03-31 13:55:49 +02:00
squidbear
24f12d7b94 fix foir tron 2025-03-31 12:59:11 +02:00
squidbear
0a1c490e87 fix for tron 2025-03-31 12:58:11 +02:00
squidbear
86e08f54a9 fix 2025-03-31 12:43:41 +02:00
squidbear
f7502178eb update 2025-03-31 12:37:51 +02:00
squidbear
7b187639b1 update 2025-03-31 12:34:12 +02:00
squidbear
cd673e89e2 fix 2025-03-31 12:06:56 +02:00
squidbear
1a68c43f79 executable 2025-03-31 11:22:11 +02:00
squidbear
a8ac15094c can I haz init 2025-03-31 11:19:10 +02:00
squidbear
74161fdf89 maybe 2025-03-31 11:18:21 +02:00
squidbear
2a5def2334 fix 2025-03-31 10:40:43 +02:00
squidbear
5da176d58a fix 2025-03-31 10:38:43 +02:00
squidbear
7c34e72fe1 fix 2025-03-31 10:35:44 +02:00
squidbear
021f8bddf6 fix 2025-03-31 10:30:54 +02:00
squidbear
8f4feae8ef do the snap thing 2025-03-31 10:30:43 +02:00
squidbear
22ca91a61e fix 2025-03-31 10:23:49 +02:00
squidbear
ecc9a7f709 fix 2025-03-31 10:13:53 +02:00
squidbear
5947402c38 fix 2025-03-31 10:12:56 +02:00
squidbear
8a04e82516 fix 2025-03-31 10:11:40 +02:00
squidbear
846385627c fix 2025-03-31 10:09:02 +02:00
squidbear
fbf0fef203 fix 2025-03-31 10:07:46 +02:00
squidbear
150e721d9f fix 2025-03-31 10:03:11 +02:00
squidbear
1f913f7505 demo 2025-03-31 10:02:21 +02:00
squidbear
021a6ebc83 fix 2025-03-31 10:01:39 +02:00
squidbear
661b6c1315 do it 2025-03-31 09:57:37 +02:00
squidbear
5a0915d68c fix 2025-03-31 09:54:52 +02:00
squidbear
6ba821f721 fix 2025-03-31 09:15:08 +02:00
squidbear
afdadf8f8b fix 2025-03-31 09:12:03 +02:00
squidbear
1600e04fef fix 2025-03-31 09:07:53 +02:00
squidbear
4c7560053d make mulitcurl follow redirects 2025-03-31 08:56:41 +02:00
squidbear
69454c7e27 new try 2025-03-31 08:53:14 +02:00
squidbear
9804628c8b fix 2025-03-31 08:49:21 +02:00
squidbear
d69d91e46f fix 2025-03-31 08:48:12 +02:00
squidbear
fd6991a6ab fix 2025-03-31 08:44:48 +02:00
squidbear
04f23897b4 fix 2025-03-31 08:42:16 +02:00
squidbear
95c74c917d fix 2025-03-31 08:40:11 +02:00
squidbear
098991a688 this is victory 2025-03-31 08:28:40 +02:00
squidbear
55ce0f9653 welcome to the team 2025-03-31 07:32:05 +02:00
squidbear
04d13fa4ea correct network names 2025-03-31 07:30:53 +02:00
squidbear
bf867d6b1a fix 2025-03-31 07:28:54 +02:00
squidbear
0dad48e9bd fix 2025-03-31 07:16:21 +02:00
squidbear
6d1f2ff816 try 2025-03-31 06:07:20 +02:00
squidbear
8b16461f83 make it more similar 2025-03-31 06:01:58 +02:00
squidbear
bede49b624 move ws port to http port 2025-03-31 05:50:56 +02:00
squidbear
91f4c21573 more 2025-03-30 15:38:52 +02:00
squidbear
c33a1acd5f fix 2025-03-30 15:25:49 +02:00
squidbear
8da8e5af3c fix shackle 2025-03-30 15:16:34 +02:00
squidbear
6751e15468 fix 2025-03-30 14:32:40 +02:00
squidbear
ddfb8ad99d fix 2025-03-30 14:13:55 +02:00
squidbear
ffc62a3130 fix 2025-03-30 14:11:28 +02:00
squidbear
052125a4bf sync pebble fullnodes 2025-03-30 14:02:23 +02:00
squidbear
a6dc64aff8 no path 2025-03-30 14:00:30 +02:00
squidbear
de39ebe07f fix 2025-03-30 13:47:23 +02:00
squidbear
f33a72b6ff fix 2025-03-30 13:45:56 +02:00
squidbear
1306b400cf test 2025-03-30 13:44:50 +02:00
squidbear
ca692fbdd9 new ways 2025-03-30 13:43:40 +02:00
squidbear
674e559d5a fix 2025-03-30 13:43:17 +02:00
squidbear
398a9a916f fix fuse 2025-03-30 11:49:28 +02:00
squidbear
0f4bb6a7ea fix 2025-03-30 11:20:15 +02:00
squidbear
37a484af18 naming is shaming 2025-03-30 11:16:15 +02:00
squidbear
81dbfa8a1c make fuse aligned 2025-03-30 11:10:08 +02:00
squidbear
c1ae806ef0 new worldchain 2025-03-30 11:02:43 +02:00
squidbear
d6fc0261ab nicer 2025-03-30 10:56:31 +02:00
squidbear
9829b7427b fix the nodekey 2025-03-30 10:51:10 +02:00
squidbear
a862f5fc10 fix the nodekey 2025-03-30 10:50:06 +02:00
squidbear
c6e2708689 fix 2025-03-30 10:41:30 +02:00
squidbear
fa0b08a6cb fix 2025-03-30 10:36:14 +02:00
squidbear
e0ea45a5e5 try 2025-03-30 10:34:09 +02:00
squidbear
da8fcd51bd try custom genesis blocks 2025-03-30 10:32:45 +02:00
squidbear
fc3808716b fix 2025-03-30 10:32:11 +02:00
squidbear
d6e7ac9ab8 fix 2025-03-30 10:30:43 +02:00
squidbear
298e6b9636 try 2025-03-30 10:23:57 +02:00
squidbear
afcefe4c6f fast op 2025-03-30 10:21:50 +02:00
squidbear
f533d6bba6 fix 2025-03-30 09:38:24 +02:00
squidbear
1e25c4f52a try op-erigon 2025-03-30 09:32:55 +02:00
squidbear
67e5aeb77d new client 2025-03-30 09:31:50 +02:00
squidbear
4cc066551a fix 2025-03-30 09:31:37 +02:00
squidbear
d3517085fa haqq is final 2025-03-30 08:50:34 +02:00
squidbear
c64a90d92e cleanup the zircuit mess 2025-03-30 08:50:14 +02:00
squidbear
f39a5763d6 fix 2025-03-30 08:29:22 +02:00
squidbear
5038b0f721 fix 2025-03-30 08:23:56 +02:00
squidbear
177455c2ed fix 2025-03-30 08:20:04 +02:00
squidbear
fe6048e1aa fix 2025-03-30 08:18:26 +02:00
squidbear
7e37f1d012 add some legacy 2025-03-30 07:55:40 +02:00
squidbear
77fe6e0c60 new 2025-03-30 07:46:56 +02:00
squidbear
4d431ad75d do it 2025-03-30 07:44:00 +02:00
squidbear
bf98fd35ff more 2025-03-30 07:33:19 +02:00
squidbear
86e4ed49f2 proper testnet naming 2025-03-30 07:31:30 +02:00
squidbear
13b3db2327 fix 2025-03-30 07:30:50 +02:00
squidbear
acde267881 try 2025-03-30 07:25:11 +02:00
squidbear
eb2e27ca54 new 2025-03-30 07:24:17 +02:00
squidbear
6eb96a4531 fix 2025-03-30 07:09:19 +02:00
squidbear
a4ba04b1d1 this is a bsc fork 2025-03-30 06:59:23 +02:00
squidbear
d8e634a725 fix 2025-03-30 06:58:15 +02:00
squidbear
df4b44b838 more core 2025-03-30 06:58:00 +02:00
squidbear
42feb2166a fix 2025-03-30 06:35:04 +02:00
squidbear
f1c791685c new 2025-03-29 12:08:28 +01:00
squidbear
f4745d7484 fix 2025-03-29 11:46:51 +01:00
squidbear
231009dade done 2025-03-29 11:45:42 +01:00
squidbear
85ed047d60 fix 2025-03-29 11:29:23 +01:00
squidbear
f1c2ee776d fix 2025-03-29 11:22:02 +01:00
squidbear
1ff6e59982 fix 2025-03-29 11:19:39 +01:00
squidbear
549390fe06 fix 2025-03-29 11:13:25 +01:00
squidbear
b95cd28983 fix 2025-03-29 11:12:11 +01:00
squidbear
20081d0d53 making sense 2025-03-29 11:11:25 +01:00
squidbear
5bcfcdab1a moved 2025-03-29 11:10:26 +01:00
squidbear
5c49ea6fca new envs 2025-03-29 11:08:48 +01:00
squidbear
108e37fd6d fix 2025-03-29 11:05:41 +01:00
squidbear
339a09c1b8 fix 2025-03-29 11:04:17 +01:00
squidbear
60aaf6b194 new arrivals 2025-03-29 10:58:23 +01:00
squidbear
19d631fdc6 update 2025-03-29 10:57:12 +01:00
squidbear
74db467697 new 2025-03-29 10:46:52 +01:00
squidbear
50fd75c33e fix 2025-03-29 10:22:47 +01:00
squidbear
e80a076f71 new 2025-03-29 10:17:41 +01:00
squidbear
0c52bbd9d0 new 2025-03-29 09:51:14 +01:00
squidbear
c3e4e91c1b fix 2025-03-29 09:47:56 +01:00
squidbear
feacfbfc29 move 2025-03-29 09:43:35 +01:00
squidbear
75b3720a17 fix 2025-03-29 09:41:23 +01:00
squidbear
6895fae601 new 2025-03-29 09:36:42 +01:00
squidbear
04e1fc542c fix 2025-03-29 09:32:47 +01:00
squidbear
436d82f759 fix 2025-03-29 09:31:41 +01:00
squidbear
6326cca752 fix 2025-03-29 09:24:40 +01:00
squidbear
1476e2032f util 2025-03-29 08:58:01 +01:00
squidbear
2b82a11bcf fix 2025-03-29 08:27:45 +01:00
squidbear
3ca021b6c8 this is so retraded 2025-03-29 08:21:12 +01:00
squidbear
ab83982bf3 this is so retraded 2025-03-29 08:15:42 +01:00
squidbear
5aa26694b2 this is so retraded 2025-03-29 08:08:08 +01:00
squidbear
47e3ddad12 fix 2025-03-29 08:01:30 +01:00
squidbear
48ad1fa0d5 try again 2025-03-29 07:53:49 +01:00
squidbear
97384d28db fix 2025-03-29 07:52:15 +01:00
squidbear
d767ea905d try again 2025-03-29 07:50:29 +01:00
squidbear
934cbbd16d fix 2025-03-29 07:38:53 +01:00
squidbear
1661c82394 Dfix 2025-03-29 07:10:52 +01:00
squidbear
a077068316 Dfix 2025-03-29 07:09:30 +01:00
squidbear
c991c5a333 test 2025-03-29 06:59:37 +01:00
squidbear
62c7af241a fix 2025-03-29 06:51:20 +01:00
squidbear
3f9e0ff860 fix 2025-03-29 06:43:44 +01:00
squidbear
286941154d fix 2025-03-29 06:38:43 +01:00
squidbear
54b50584f4 fix 2025-03-29 06:36:44 +01:00
squidbear
8fcedfeddf fix 2025-03-29 06:22:27 +01:00
squidbear
e00ef5b0c3 fix 2025-03-29 06:20:00 +01:00
squidbear
8f62df644f fix 2025-03-29 06:16:43 +01:00
squidbear
9699235a63 fix 2025-03-29 06:13:25 +01:00
squidbear
16c89b550b fix 2025-03-29 06:10:07 +01:00
squidbear
d7755a1519 fix 2025-03-29 06:08:33 +01:00
squidbear
117ecfd7c1 fix 2025-03-29 06:07:24 +01:00
squidbear
877edb5bb1 fix 2025-03-29 06:05:12 +01:00
squidbear
8175d9e1de new 2025-03-29 06:01:39 +01:00
squidbear
f1e4953ace new sync 2025-03-29 02:42:19 +01:00
squidbear
7aaa67d886 fix 2025-03-29 02:31:27 +01:00
squidbear
82a3c4b5e1 fix 2025-03-29 02:29:18 +01:00
squidbear
2d5989d4fc fix 2025-03-29 02:23:41 +01:00
squidbear
7459c22470 fix 2025-03-29 02:01:15 +01:00
squidbear
a2dab10c4f fix 2025-03-29 01:55:16 +01:00
squidbear
34f6c45146 update 2025-03-28 15:30:11 +01:00
squidbear
f7795d916c fix 2025-03-28 15:23:02 +01:00
squidbear
b10e94a8d6 fix 2025-03-28 15:20:34 +01:00
squidbear
b955f5f1bf update 2025-03-28 15:09:42 +01:00
squidbear
072beff0e4 new try 2025-03-28 14:52:29 +01:00
squidbear
475fdc5bb6 fix 2025-03-28 14:33:51 +01:00
squidbear
1a8bd4aeb5 fix 2025-03-28 14:27:41 +01:00
squidbear
681c8390b3 fix 2025-03-28 14:17:56 +01:00
squidbear
c44b27e4ea fix 2025-03-28 14:16:14 +01:00
squidbear
67445fffe2 new life 2025-03-28 14:01:44 +01:00
squidbear
e05d094ea0 stupi 2025-03-28 14:00:16 +01:00
squidbear
a1c80e521c squeeze it 2025-03-28 13:48:46 +01:00
squidbear
9e4c34922b fix 2025-03-28 13:46:39 +01:00
squidbear
7e168203d3 fix 2025-03-28 13:42:38 +01:00
squidbear
a5f1736211 fix 2025-03-28 13:41:56 +01:00
squidbear
e9c866d85b fix 2025-03-28 13:35:31 +01:00
squidbear
e8ce9793e2 new flavor 2025-03-28 13:31:53 +01:00
squidbear
13d7cf8fd3 fix 2025-03-28 12:39:33 +01:00
squidbear
5ab6790148 fix 2025-03-28 12:34:07 +01:00
squidbear
ed6ed26de4 new gnosis 2025-03-28 11:39:31 +01:00
squidbear
2f2e0547f5 fix 2025-03-28 11:36:19 +01:00
squidbear
b20a8c119a new hekla 2025-03-28 11:23:06 +01:00
squidbear
ce1db6ccec fix 2025-03-28 11:18:55 +01:00
squidbear
fb850fc53d fix 2025-03-28 11:17:13 +01:00
squidbear
0dc6cb2088 fix 2025-03-28 11:16:50 +01:00
squidbear
16811b0629 fix 2025-03-28 11:13:34 +01:00
squidbear
03dba4352e fix 2025-03-28 11:04:14 +01:00
squidbear
6b61a3ed9b fix 2025-03-28 10:55:16 +01:00
squidbear
5c8aad323e fix 2025-03-28 10:53:16 +01:00
squidbear
e28442f146 fix 2025-03-28 10:51:44 +01:00
squidbear
7cb7a8c04e fix 2025-03-28 10:49:47 +01:00
squidbear
c9b9598f1c fix 2025-03-28 10:48:59 +01:00
squidbear
778a41982c fix 2025-03-28 10:48:06 +01:00
squidbear
78ef14f92f fix 2025-03-28 10:46:15 +01:00
squidbear
22b5237d49 fix 2025-03-28 10:43:35 +01:00
squidbear
d714869c57 fix 2025-03-28 10:39:49 +01:00
squidbear
3f74d2c880 fix 2025-03-28 10:38:48 +01:00
squidbear
1ae26e3b39 fix 2025-03-28 10:31:08 +01:00
squidbear
7dbadfcee8 fix 2025-03-28 10:30:19 +01:00
squidbear
27065430bc fix 2025-03-28 10:26:09 +01:00
squidbear
a52bf9ff7b try 2025-03-28 09:15:51 +01:00
squidbear
0459b4a0c5 add taiko 2025-03-28 09:14:44 +01:00
squidbear
71a8d9b66b fix 2025-03-28 07:14:47 +01:00
squidbear
daa5b43c12 fix 2025-03-28 07:10:06 +01:00
squidbear
cdc7bd301f fix 2025-03-28 07:07:59 +01:00
squidbear
93c074c216 moar 2025-03-28 07:02:23 +01:00
squidbear
b6c1823ccf fix 2025-03-28 06:28:59 +01:00
squidbear
3dc980afd9 fix 2025-03-28 05:23:48 +01:00
squidbear
370f73e2fc fix 2025-03-28 05:18:42 +01:00
squidbear
5fc967bc14 beast 2025-03-28 05:08:41 +01:00
squidbear
bf21b8d301 fix 2025-03-28 04:40:47 +01:00
squidbear
972d31f892 fix 2025-03-28 04:30:51 +01:00
squidbear
3b51820e95 fix 2025-03-28 04:29:19 +01:00
squidbear
526879337f try linea erigon 2025-03-28 04:20:26 +01:00
squidbear
4b54a6b074 hopefully well 2025-03-28 04:18:47 +01:00
squidbear
8ba371176d erigon and reth 2025-03-27 12:05:09 +01:00
squidbear
9dfff441e2 link 2025-03-27 10:56:00 +01:00
squidbear
e86636a52f prune smarter 2025-03-27 10:53:56 +01:00
squidbear
c1565e2adf fix 2025-03-27 10:44:40 +01:00
squidbear
538c48e61d fix 2025-03-27 10:42:52 +01:00
squidbear
ce86a91f21 fix? 2025-03-27 10:30:04 +01:00
squidbear
9b0f1dfc77 make a test link 2025-03-27 10:23:29 +01:00
squidbear
de5c886808 now bsc it unified 2025-03-27 10:21:54 +01:00
squidbear
f59838c36d bring back heimdall 2025-03-27 09:54:32 +01:00
squidbear
ecd591ada3 fix 2025-03-27 09:50:38 +01:00
squidbear
7183070ed4 node sync url 2025-03-27 09:42:17 +01:00
squidbear
425e7266f2 try bor 2025-03-27 09:33:40 +01:00
squidbear
5af4c7c904 fix 2025-03-27 08:49:38 +01:00
squidbear
67e12023a0 teh beacon 2025-03-27 07:36:04 +01:00
squidbear
575a33f6a9 add teh colon 2025-03-27 07:33:07 +01:00
squidbear
b2319be6cc better 2025-03-27 07:17:22 +01:00
squidbear
eb1c955705 try a pebble path arbitrum 2025-03-27 07:15:44 +01:00
squidbear
a6c94940df wroom 2025-03-27 07:13:13 +01:00
squidbear
870d099f0d wathc out for genesis blocks 2025-03-27 05:07:52 +01:00
squidbear
873c11f09d every tiume the templae flips a but 2025-03-26 18:17:15 +01:00
squidbear
352dd07e26 new norma 2025-03-26 18:16:59 +01:00
squidbear
8fffa07a64 port fix 2025-03-26 16:34:05 +01:00
squidbear
7f6a7d2030 link 2025-03-26 16:27:47 +01:00
squidbear
b83f8a9780 Merge branch 'main' of github.com:StakeSquid/ethereum-rpc-docker into main 2025-03-26 11:22:50 +01:00
squidbear
e31302fc4c fix the link 2025-03-26 11:22:13 +01:00
Brain🧠
c73acf342a home 2025-03-26 12:04:00 +02:00
Brain🧠
4704817f65 revert 2025-03-26 11:58:25 +02:00
Brain🧠
868a84443f merged 2025-03-26 11:45:28 +02:00
Brain🧠
4e9b032d42 deprecated 2025-03-26 11:43:39 +02:00
squidbear
36c7178bb6 do things 2025-03-26 10:40:21 +01:00
squidbear
7748ce0cd8 new 2025-03-26 08:57:34 +01:00
squidbear
a9a293ce7b dummy 2025-03-26 08:55:42 +01:00
squidbear
d0b7e1edd7 more chains 2025-03-26 08:53:38 +01:00
squidbear
01d4c85fdc fix 2025-03-26 06:50:27 +01:00
squidbear
6323da5d0e new ttry 2025-03-26 06:49:14 +01:00
squidbear
26e70a4238 fix 2025-03-26 06:32:37 +01:00
squidbear
bcd7b26ef4 fix 2025-03-26 06:30:47 +01:00
squidbear
8ef9d90959 variables 2025-03-26 06:25:40 +01:00
squidbear
a58f520072 fix 2025-03-26 05:48:23 +01:00
squidbear
54ae0a397b new chain config 2025-03-26 05:45:41 +01:00
squidbear
3f4f6256ba lalalalaaa 2025-03-26 04:37:41 +01:00
squidbear
68ed55b321 make it deprecated 2025-03-26 04:31:14 +01:00
squidbear
8db2c30d72 jo 2025-03-26 04:26:27 +01:00
squidbear
29b522ab3a ittle fixes 2025-03-26 04:08:08 +01:00
squidbear
217ea08c4b renamed 2025-03-26 03:55:20 +01:00
squidbear
656bef202d new network 2025-03-26 03:53:54 +01:00
squidbear
3c3b84f729 jo 2025-03-26 03:51:23 +01:00
squidbear
482bc07e7e update 2025-03-26 03:39:48 +01:00
squidbear
3b064da000 give prysm a data volume dammit 2025-03-25 11:24:14 +01:00
squidbear
211b4526d9 deepseek hallucinations removed 2025-03-25 11:16:54 +01:00
squidbear
f0032665e3 test it 2025-03-25 10:57:19 +01:00
squidbear
d189b8d1d0 all the geth configs shoud go here 2025-03-25 10:57:08 +01:00
squidbear
118b6a5a0e regnerate nodekeys for datadir transportability 2025-03-25 09:41:59 +01:00
squidbear
8468327236 bootnodes 2025-03-25 09:38:21 +01:00
squidbear
b70ed9b791 try 2025-03-25 09:33:26 +01:00
squidbear
5f76152f33 bigger family 2025-03-25 09:30:32 +01:00
squidbear
b5083c71d9 do fantom with opera smarter 2025-03-25 09:23:58 +01:00
squidbear
1056feab7e do fantom with sonic as well 2025-03-25 09:22:58 +01:00
squidbear
b6d3e39df6 new build process 2025-03-25 09:09:39 +01:00
squidbear
3265446bd9 change the current sonic build slightly 2025-03-25 09:08:30 +01:00
squidbear
1b9860885b small fixes 2025-03-25 09:03:56 +01:00
squidbear
2eee8e9c47 fix 2025-03-24 17:25:05 +01:00
squidbear
f2af5b3254 fix 2025-03-24 17:21:38 +01:00
squidbear
939d22ecb0 naming scheme 2025-03-24 17:10:43 +01:00
squidbear
5970d0f953 next move 2025-03-24 17:05:23 +01:00
squidbear
9e41f705a1 sharade 2025-03-24 17:04:26 +01:00
squidbear
a5983f9cb9 one step 2025-03-24 17:02:20 +01:00
squidbear
a5d3afe086 better 2025-03-24 16:59:20 +01:00
squidbear
3b8e27149d migrate one 2025-03-24 16:28:23 +01:00
squidbear
37729e1110 update bootnodes 2025-03-24 12:57:54 +01:00
squidbear
c0df43119d same same 2025-03-24 12:55:01 +01:00
squidbear
eb23591f5b more apis 2025-03-24 12:53:37 +01:00
squidbear
1f941ca26c try 2025-03-24 12:44:02 +01:00
squidbear
68a3179e6c fix 2025-03-24 10:55:56 +01:00
squidbear
de597f9985 maybe no initialisation needed? 2025-03-24 09:16:24 +01:00
squidbear
b8ce5f1319 maybe not genesis needed? 2025-03-24 09:14:13 +01:00
squidbear
cdb36e8b82 fix for old curl 2025-03-24 09:04:23 +01:00
squidbear
a6ea97e2d1 update 2025-03-24 08:25:10 +01:00
squidbear
95ae8c109d fix ronin saigon 2025-03-24 07:47:32 +01:00
squidbear
8954702640 fix 2025-03-24 05:10:04 +01:00
squidbear
97604b92b8 fix 2025-03-24 05:09:12 +01:00
squidbear
57eecf611f fix 2025-03-24 05:04:22 +01:00
squidbear
71f1930023 try 2025-03-24 05:02:35 +01:00
squidbear
9049a88672 convenient 2025-03-24 05:00:15 +01:00
squidbear
b4cd75587e update 2025-03-24 04:44:28 +01:00
squidbear
81888e7f1d allow upstream rpc to need 3 seconds befor erroring 2025-03-24 04:28:41 +01:00
squidbear
38f0db07b5 only accept 200 2025-03-23 12:08:14 +01:00
squidbear
df80758e55 only accept 200 2025-03-23 12:05:38 +01:00
squidbear
77e24c9401 fix 2025-03-23 11:48:20 +01:00
squidbear
3b7a425edd less wait 2025-03-23 11:34:54 +01:00
squidbear
0e85af9526 use mulitple reference rpc 2025-03-23 11:27:14 +01:00
squidbear
271d9041ce fix 2025-03-23 11:24:40 +01:00
squidbear
e5824ffcea update the rpc references 2025-03-23 05:52:52 +01:00
squidbear
33c57189f4 cleanup 2025-03-23 04:31:14 +01:00
squidbear
7acdb81fff move 2025-03-23 04:26:00 +01:00
squidbear
c72b36e523 human readable 2025-03-23 04:25:08 +01:00
squidbear
d91779dd3c fix 2025-03-22 12:58:33 +01:00
squidbear
eb8d6f2acf fix 2025-03-22 12:43:38 +01:00
squidbear
dbf8f690bf update 2025-03-22 12:32:36 +01:00
squidbear
a22e11f355 fix 2025-03-22 12:02:20 +01:00
squidbear
fbd4f822f1 fix 2025-03-22 12:00:21 +01:00
squidbear
3dd35a500e fix 2025-03-22 11:57:24 +01:00
squidbear
663f90b6d1 no validsatoe peers 2025-03-22 11:52:11 +01:00
squidbear
769525d3f5 fix the iplist label 2025-03-22 11:41:57 +01:00
squidbear
8ea5fac889 fix connecttivity 2025-03-22 11:35:29 +01:00
squidbear
1b9cca5a7c don't validate peers 2025-03-22 11:17:31 +01:00
squidbear
2165a688b9 pebblepath 2025-03-22 10:58:49 +01:00
squidbear
557337206b add the ronin 2025-03-22 10:14:12 +01:00
squidbear
15c7d5daa9 add the ronin 2025-03-22 10:04:55 +01:00
squidbear
552ad8c83c add the ronin 2025-03-22 10:00:16 +01:00
squidbear
2d3452c97e replace old with new 2025-03-22 09:25:01 +01:00
squidbear
03347cea73 dunno 2025-03-22 09:23:32 +01:00
squidbear
8c0119ee99 fix 2025-03-22 08:51:26 +01:00
squidbear
ab684cdf14 experiment with symlinks 2025-03-22 08:41:03 +01:00
squidbear
8d5afcb191 fix dbformat 2025-03-22 08:39:14 +01:00
squidbear
7a90ef6427 fix volumes 2025-03-22 08:36:17 +01:00
squidbear
6b08ee0ae8 moar op 2025-03-22 08:22:00 +01:00
squidbear
6699a7bd92 do the right network 2025-03-21 16:03:34 +01:00
squidbear
a736583d3c new eth mainnet 2025-03-21 10:02:14 +01:00
squidbear
d60630178e new eth mainnet 2025-03-21 10:00:54 +01:00
squidbear
37b3e49b15 update 2025-03-21 09:44:17 +01:00
squidbear
64dd0ab387 update 2025-03-21 09:43:32 +01:00
squidbear
8fc0e6ab9b donno 2025-03-21 09:30:48 +01:00
squidbear
3f329001ff better use the right network idf 2025-03-21 09:27:44 +01:00
squidbear
09f2cc01d3 transition 2025-03-21 09:23:03 +01:00
squidbear
cfdc71f001 add ronin 2025-03-21 09:20:40 +01:00
Sebastian
6b56d96ba5 more neat 2025-03-21 06:41:03 +01:00
Sebastian
8afc132b68 more neat 2025-03-21 06:40:19 +01:00
Sebastian
8b9e26a8c6 weird zsh bug with no blank line at the top allowed 2025-03-21 06:34:23 +01:00
Sebastian
003bcc7e7b speedup with less errors 2025-03-21 06:32:25 +01:00
Sebastian
d9c65cb01a speedup 2025-03-21 06:10:27 +01:00
Sebastian
907c90297d revert 2025-03-21 06:01:18 +01:00
Sebastian
a412965c27 more precision 2025-03-21 05:54:16 +01:00
Sebastian
42835e3e2c more precision 2025-03-21 05:53:45 +01:00
Sebastian
abdfab5d95 more precision 2025-03-21 05:52:11 +01:00
Sebastian
aca2301a2c better? 2025-03-21 05:34:32 +01:00
Sebastian
0c4c456610 fix sepolia 2025-03-20 15:55:28 +01:00
Sebastian
3b4f46da64 do the thing 2025-03-20 08:56:40 +01:00
Sebastian
fbd00a2314 why does OP never give a useful error message 2025-03-19 17:55:11 +01:00
Sebastian
a3c6df3a7b why does OP never give a useful error message 2025-03-19 17:52:28 +01:00
Sebastian
75005e0c2c meh 2025-03-19 17:34:48 +01:00
Sebastian
a1df3cd1bc fix 2025-03-19 17:27:37 +01:00
Sebastian
0188c40405 order 2025-03-19 14:24:33 +01:00
Sebastian
5e66c3f215 that would fail 2025-03-19 14:05:36 +01:00
Sebastian
28d7e8894c fix 2025-03-19 14:04:41 +01:00
Sebastian
9de26aab82 fix 2025-03-19 13:47:27 +01:00
Sebastian
c64022bd8b fix 2025-03-19 13:44:43 +01:00
Sebastian
ba4e65c06a fix 2025-03-19 13:40:32 +01:00
Sebastian
035706fa12 fix 2025-03-19 13:39:33 +01:00
Sebastian
aa5e92a5df fix 2025-03-19 13:36:37 +01:00
Sebastian
56d8cba406 fix 2025-03-19 13:27:42 +01:00
Sebastian
cc4d8fafe8 fix 2025-03-19 13:25:33 +01:00
Sebastian
5c864d34bd fix 2025-03-19 13:25:01 +01:00
Sebastian
1f085da99f fix 2025-03-19 13:24:12 +01:00
Sebastian
ba97af0a0f fix 2025-03-19 13:17:08 +01:00
Sebastian
9c7c057300 fix 2025-03-19 13:13:28 +01:00
Sebastian
fcfb960215 new chain 2025-03-19 13:05:57 +01:00
Sebastian
975442bcc0 new chain 2025-03-19 13:04:11 +01:00
Sebastian
453c1bc9c2 Merge branch 'main' of github.com:StakeSquid/ethereum-rpc-docker into main 2025-03-19 12:59:54 +01:00
Sebastian
9960f6144a new chain 2025-03-19 12:59:43 +01:00
Sebastian
1ff954ca5e trying to patch it 2025-03-19 11:29:53 +01:00
Brain🧠
6b32697954 Merge branch 'main' of https://github.com/StakeSquid/ethereum-rpc-docker 2025-03-19 11:39:02 +02:00
Brain🧠
9c0e2ad482 home commits 2025-03-19 11:36:56 +02:00
Sebastian
52b4629f1a fix 2025-03-19 07:46:47 +01:00
Sebastian
c46767b0a6 update 2025-03-19 07:40:35 +01:00
Sebastian
46d3918a6b update 2025-03-19 07:23:06 +01:00
Sebastian
4c56c42bb0 update 2025-03-19 07:21:26 +01:00
Sebastian
b9fd88cdcb update 2025-03-19 07:14:06 +01:00
Sebastian
92730d49a7 update 2025-03-19 07:03:53 +01:00
Sebastian
8a3ba3d1b3 save more money 2025-03-19 06:21:43 +01:00
Sebastian
68ba9d07fd safe money 2025-03-19 06:20:08 +01:00
Sebastian
eb696ccd39 fix arb one sep ref rpc 2025-03-18 13:27:43 +01:00
Sebastian
2d4e870b53 fix occ ref rpc 2025-03-18 13:23:32 +01:00
Sebastian
11efd1bc38 fix fantom testnet reference 2025-03-18 13:18:12 +01:00
Sebastian
6bbba05710 timeout everytwhere 2025-03-18 13:13:52 +01:00
Sebastian
48efc4fc65 timeout everytwhere 2025-03-18 13:12:31 +01:00
Sebastian
e4484b1712 timeout everytwhere 2025-03-18 13:11:55 +01:00
Sebastian
97f0148e42 timeout everytwhere 2025-03-18 13:10:35 +01:00
Sebastian
2832ca186a timeout everytwhere 2025-03-18 13:09:29 +01:00
Sebastian
1ef4598ca0 timeout everytwhere 2025-03-18 13:06:51 +01:00
Sebastian
e50dc2deee timeout everytwhere 2025-03-18 13:04:25 +01:00
Sebastian
c139630997 timeout everytwhere 2025-03-18 13:02:42 +01:00
Sebastian
d887840138 timeout everytwhere 2025-03-18 12:58:58 +01:00
Sebastian
70036ec47b stupid fixes 2025-03-18 12:53:22 +01:00
Sebastian
97c3396af0 we need to be gracious here 2025-03-18 12:35:57 +01:00
Sebastian
525879052a fix the monitoring 2025-03-18 12:22:58 +01:00
Sebastian
693959699c fix the monitoring 2025-03-18 12:22:09 +01:00
Sebastian
5e20ed40ca fix the monitoring 2025-03-18 12:20:18 +01:00
Sebastian
65e17d8009 fix the monitoring 2025-03-18 12:16:06 +01:00
Sebastian
9a07aafabb fix the monitoring 2025-03-18 12:15:07 +01:00
Sebastian
a98d858591 fix the monitoring 2025-03-18 12:13:42 +01:00
Sebastian
2f731d6828 fix the monitoring 2025-03-18 12:11:37 +01:00
Sebastian
5449adc8f8 fix the monitoring 2025-03-18 12:08:19 +01:00
Sebastian
2c3afa42cd fix the monitoring 2025-03-18 12:06:34 +01:00
Sebastian
0e6287cd95 better test endpoint 2025-03-18 11:46:03 +01:00
Sebastian
979e0d3a8d add chainspec 2025-03-18 11:41:40 +01:00
Sebastian
87bbb1f003 add chainspec 2025-03-18 11:40:56 +01:00
Sebastian
7790dc1daa fix 2025-03-18 11:34:26 +01:00
Sebastian
74d5fb09af fix 2025-03-18 11:33:54 +01:00
Sebastian
8af40dba2b fix tron check 2025-03-18 11:27:51 +01:00
Sebastian
4807aa4a0f fix tron check 2025-03-18 11:23:28 +01:00
Sebastian
e8a5eb4bbf more reliable 2025-03-18 11:21:32 +01:00
Sebastian
1cfcd14b20 update 2025-03-18 11:12:44 +01:00
Sebastian
48558da40a diable logging 2025-03-18 11:08:07 +01:00
Sebastian
78fb3dc32f more timeout for ref urls 2025-03-18 11:01:42 +01:00
Sebastian
86ec93ce53 don't look back the whole day 2025-03-18 10:30:49 +01:00
Sebastian
dadca852fd only the last hours 2025-03-18 10:24:38 +01:00
Sebastian
527a8760cb fix 2025-03-18 10:13:00 +01:00
Sebastian
f07a57f4b3 need more powerful for erigon 2025-03-18 10:02:59 +01:00
Sebastian
b3bb447baa no open port needed 2025-03-18 09:52:56 +01:00
Sebastian
5f52d93478 ask ai if the node is progressing 2025-03-18 09:52:09 +01:00
Sebastian
5159490e75 fix 2025-03-18 06:49:51 +01:00
Sebastian
1ac9afbd19 fix nossl 2025-03-18 06:48:49 +01:00
Sebastian
22f9372a83 better 2025-03-18 06:32:20 +01:00
root
86b1007e05 Merge branch 'main' of https://github.com/StakeSquid/ethereum-rpc-docker 2025-03-18 07:21:12 +02:00
root
2eaa8f99e4 homecoming 2025-03-18 07:18:19 +02:00
Sebastian
228d527af3 make the show-status script fail on errors 2025-03-18 06:02:34 +01:00
Sebastian
57f5091dcd try ai 2025-03-18 05:48:47 +01:00
Sebastian
dd8c5f6aa5 fix 2025-03-18 05:29:21 +01:00
Sebastian
fa4a9a43bd fix 2025-03-17 16:55:14 +01:00
Sebastian
a1b3d48f1f open port 2025-03-17 16:47:02 +01:00
Sebastian
4e17308d86 open port 2025-03-17 16:44:25 +01:00
Sebastian
910d32c05d ? 2025-03-17 04:39:57 +01:00
Sebastian
a0c850fb3b ? 2025-03-17 04:38:37 +01:00
Sebastian
0f52619c36 ? 2025-03-17 04:37:24 +01:00
Sebastian
62b049b2e1 ? 2025-03-17 04:36:11 +01:00
Sebastian
247d64bed5 fix 2025-03-15 14:07:41 +01:00
Sebastian
bb94e499e5 fix? 2025-03-15 13:59:17 +01:00
Sebastian
dc1f1183ab fix? 2025-03-15 13:58:11 +01:00
Sebastian
6072a158d2 fix? 2025-03-15 13:53:41 +01:00
Sebastian
429993aee7 fix? 2025-03-15 13:25:26 +01:00
Sebastian
b8e56c770e fix? 2025-03-15 13:22:52 +01:00
Sebastian
13cd30d24b fix? 2025-03-15 13:22:24 +01:00
Sebastian
242e2804b8 fix? 2025-03-15 13:17:42 +01:00
Sebastian
bc8ef71083 fix? 2025-03-15 12:53:41 +01:00
Sebastian
e7cf69dd9d fix? 2025-03-15 12:50:48 +01:00
Sebastian
c025dac861 fix? 2025-03-15 12:45:54 +01:00
Sebastian
d9521aac6b jo 2025-03-15 11:26:55 +01:00
Sebastian
4ae0790ce6 jo 2025-03-15 11:20:54 +01:00
Sebastian
7d0f71ef48 jo 2025-03-15 11:16:08 +01:00
Sebastian
33b50a34b2 jo 2025-03-15 11:10:07 +01:00
Sebastian
6278827e52 jo 2025-03-15 11:07:11 +01:00
Sebastian
8cda78738e jo 2025-03-15 11:02:33 +01:00
Sebastian
38997477ca jo 2025-03-15 11:01:02 +01:00
Sebastian
55edc8efcf jo 2025-03-15 10:59:50 +01:00
Sebastian
7e905ba31e jo 2025-03-15 10:54:07 +01:00
Sebastian
fe910a6bbe update to specific image 2025-03-15 10:40:50 +01:00
Sebastian
52238722a3 update to specific image 2025-03-15 10:35:32 +01:00
Sebastian
442d3a463e update to specific image 2025-03-15 10:34:01 +01:00
Sebastian
f7c36e18c9 update to specific image 2025-03-15 10:17:39 +01:00
Sebastian
24252922ee update to specific image 2025-03-15 10:16:52 +01:00
Sebastian
bf437081df update to specific image 2025-03-15 10:16:11 +01:00
Sebastian
7117a1102b new history 2025-03-15 09:51:23 +01:00
Sebastian
7dcebb7a8e fix? 2025-03-15 09:43:19 +01:00
Sebastian
232dd8698e fix? 2025-03-15 09:37:27 +01:00
Sebastian
31dcc6fdd0 no more wheel 2025-03-15 09:26:26 +01:00
Sebastian
6f64867a27 dencun 2025-03-15 07:10:26 +01:00
Sebastian
9427ddf511 safe me 2025-03-14 14:24:24 +01:00
Sebastian
767129bb16 add the wheel 2025-03-14 13:24:36 +01:00
Sebastian
c9ac737987 add the wheel 2025-03-14 13:21:50 +01:00
Sebastian
5226085db4 add the wheel 2025-03-14 13:18:16 +01:00
Sebastian
5433c9a8cd pinning version 2025-03-14 12:55:09 +01:00
Sebastian
27b2fb4b44 fix 2025-03-14 12:39:50 +01:00
Sebastian
297eaf6860 do it 2025-03-14 11:53:09 +01:00
Sebastian
0f08614981 update 2025-03-14 11:37:41 +01:00
Sebastian
15f59e61c4 update 2025-03-14 11:35:37 +01:00
Sebastian
a71e239cf8 add method 2025-03-13 16:24:30 +01:00
Sebastian
08da99d3c7 for trigger 2025-03-12 08:25:59 +01:00
Sebastian
b5b375e983 update 2025-03-12 08:01:46 +01:00
Sebastian
209af925bd update 2025-03-10 14:24:45 +01:00
Sebastian
347795ea5c pectra time 2025-03-10 14:20:31 +01:00
Sebastian
fff0810a36 update 2025-03-10 14:09:07 +01:00
Sebastian
45c1bdf475 update 2025-03-10 14:07:16 +01:00
Sebastian
e564e13631 do not stop the rpc 2025-03-10 13:51:35 +01:00
Sebastian
517f44d8e1 update 2025-03-09 12:00:38 +01:00
Sebastian
33975e7a6e blobs 2025-03-08 14:47:33 +01:00
Sebastian
ce91a0e350 blobs 2025-03-08 14:45:46 +01:00
Sebastian
bfee6a7653 blobs 2025-03-08 14:36:45 +01:00
Sebastian
45bf4723f8 blobs 2025-03-08 14:32:23 +01:00
Sebastian
2badd3463b fix] 2025-03-08 10:44:59 +01:00
Sebastian
36047f8b70 fix 2025-03-08 10:43:33 +01:00
Sebastian
ded4785699 fix 2025-03-08 10:40:15 +01:00
Sebastian
2397fbcb78 fix 2025-03-08 10:32:34 +01:00
Sebastian
12f5895284 things 2025-03-07 10:24:20 +01:00
Sebastian
cc5a644d7e fix 2025-03-07 09:28:30 +01:00
Sebastian
519d11a607 rework names 2025-03-07 09:27:37 +01:00
Sebastian
a05561d01f home 2025-03-06 10:56:30 +01:00
Sebastian
161a14934c fix the entrypoint to not crash the node 2025-03-06 10:39:33 +01:00
Sebastian
02dff8aa69 home 2025-03-06 10:37:08 +01:00
Sebastian
76d4382462 home 2025-03-06 10:31:35 +01:00
Sebastian
217f139f38 home 2025-03-06 10:14:30 +01:00
Sebastian
e3ed2c6f09 home 2025-03-06 10:13:53 +01:00
Sebastian
1ccb674599 home 2025-03-06 10:04:01 +01:00
Sebastian
e1744657a4 home 2025-03-06 09:59:09 +01:00
Sebastian
0cbe9e4656 home 2025-03-06 09:43:48 +01:00
Sebastian
9ceece41c5 home 2025-03-06 09:33:46 +01:00
Sebastian
d6801912e7 delete the disfunct file 2025-03-06 08:55:55 +01:00
Sebastian
ff61dd22dc fix 2025-03-06 08:31:53 +01:00
Sebastian
c5c6942151 fix 2025-03-06 08:30:08 +01:00
Sebastian
f79919776b switch to go 2025-03-06 08:25:03 +01:00
Sebastian
6277c1ad1c fix gzip 2025-03-06 08:01:30 +01:00
Sebastian
4fb24d8d0d fix gzip 2025-03-06 07:55:40 +01:00
Sebastian
7bcabb9ffd fix name 2025-03-06 07:47:04 +01:00
Sebastian
110a31e52b debug logs 2025-03-06 07:45:52 +01:00
Sebastian
4c6bf353ae initial commit 2025-03-06 07:33:29 +01:00
Sebastian
8656b63553 fix 2025-03-06 05:27:22 +01:00
Sebastian
487aca2851 make sure the process runs as pid 0 to allow docker to stop the containdr 2025-03-06 05:11:43 +01:00
Sebastian
f708f65cfc make sure the process id is 0 by using exec 2025-03-06 05:06:26 +01:00
Sebastian
05f6f36eb4 fix 2025-03-05 14:38:32 +01:00
Sebastian
4d23b18a16 init 2025-03-05 14:34:48 +01:00
Sebastian
1c373bc8bc fix 2025-03-05 14:22:48 +01:00
Sebastian
6173017749 init 2025-03-05 14:20:53 +01:00
Sebastian
2af51f57d5 nossl 2025-03-05 14:16:21 +01:00
Sebastian
a95bbd1345 make a new one for home 2025-03-05 14:03:48 +01:00
Sebastian
21f06357b6 jo 2025-03-05 13:54:13 +01:00
Sebastian
d39739f8ad renamed 2025-03-05 13:46:58 +01:00
Sebastian
8e0a995b36 fit for the future 2025-03-05 13:45:54 +01:00
Sebastian
ca4423065a initial 2025-03-05 13:25:51 +01:00
Sebastian
62188450c1 nossl 2025-03-05 10:54:05 +01:00
Sebastian
70c765ae34 initial 2025-03-05 10:52:10 +01:00
Sebastian
ada3984c2f make heimdall configurable 2025-03-05 10:48:11 +01:00
Sebastian
066c10713e bit of tweaks no idea if they work 2025-03-05 10:31:00 +01:00
Sebastian
2c7311c970 better peers 2025-03-05 10:28:25 +01:00
Sebastian
b07e7499c7 update 2025-03-05 09:46:53 +01:00
Sebastian
c7cad346c8 initial 2025-03-05 09:41:30 +01:00
Sebastian
7c648002fe initial 2025-03-05 09:01:57 +01:00
Sebastian
d2a49a88c6 home 2025-03-05 08:56:38 +01:00
Sebastian
2a838c99fe update 2025-03-04 10:28:50 +01:00
Sebastian
507cf0370e version configurable and update 2025-03-03 08:49:09 +01:00
Sebastian
4f268fe59a version configurable 2025-03-03 08:43:24 +01:00
Sebastian
227c1d83cd fix 2025-03-03 08:32:07 +01:00
Sebastian
6c4da93d15 update 2025-03-03 08:24:47 +01:00
Sebastian
2a9d504fa9 update 2025-03-03 08:20:21 +01:00
Sebastian
fc809a95c7 update 2025-03-03 08:11:35 +01:00
Sebastian
363b2a444f update 2025-03-03 07:59:17 +01:00
Sebastian
35a7169a72 update 2025-03-03 07:58:19 +01:00
Sebastian
2782ab84e0 update 2025-03-03 07:56:03 +01:00
Sebastian
e7cd7ef208 update version tags 2025-03-03 07:49:38 +01:00
Sebastian
2be3932f3d update 2025-03-03 07:42:42 +01:00
Sebastian
75a73652df more bootnodes 2025-03-02 10:31:21 +01:00
Sebastian
e4b652c13b fix 2025-03-02 09:56:09 +01:00
Sebastian
a62971623f open ports 2025-03-02 09:48:35 +01:00
Sebastian
b1cd72c194 open ports 2025-03-02 09:48:24 +01:00
Sebastian
5a445fb562 add bootnodes 2025-03-02 09:42:39 +01:00
Sebastian
3b78a06496 fix holesky 2025-03-02 09:25:35 +01:00
Sebastian
16c2da133d pectra update 2025-03-02 09:18:55 +01:00
Sebastian
845c4c85d8 bigger subnet 2025-02-27 11:49:46 +01:00
Sebastian
fe404c0bcb pectra fix 2025-02-27 11:37:41 +01:00
Sebastian
2434eb63f1 pectra fix 2025-02-27 11:36:23 +01:00
Sebastian
b8be36373b pectra fix 2025-02-27 11:35:20 +01:00
Sebastian
ee263139a0 pectra fix 2025-02-27 11:33:20 +01:00
Sebastian
c3c306cb6e update 2025-02-27 05:06:24 +01:00
Sebastian
c0aebe0d86 make HTTPS configurable 2025-02-25 09:45:44 +01:00
Sebastian
966cc2a620 make HTTPS configurable 2025-02-25 09:42:46 +01:00
Sebastian
ea8aed4d96 make HTTPS configurable 2025-02-25 09:34:55 +01:00
Sebastian
6d0d874b69 update 2025-02-25 08:30:00 +01:00
Sebastian
208db1b64f fix archive 2025-02-25 08:29:46 +01:00
Sebastian
8d273928c9 DATA as variable 2025-02-25 08:19:09 +01:00
Sebastian
c5bf946201 volume env variable 2025-02-25 08:17:27 +01:00
Sebastian
e8e3476b10 initial commit 2025-02-25 08:13:57 +01:00
Sebastian
6b6f3482f1 initial 2025-02-25 08:01:03 +01:00
Sebastian
5de61a1082 connectivity 2025-02-14 14:19:03 +01:00
Sebastian
2f826806c2 connectivity 2025-02-14 14:17:49 +01:00
Sebastian
7c8ffd3347 fix 2025-02-14 14:16:26 +01:00
Sebastian
6526a10ea0 fix 2025-02-14 14:15:02 +01:00
Sebastian
75aae4a826 fix config 2025-02-14 14:09:48 +01:00
Sebastian
be024a7625 udpated default configs 2025-02-14 14:04:30 +01:00
Sebastian
c0f077d531 update 2025-02-14 13:54:01 +01:00
Sebastian
a782ec3d7e naming fix 2025-02-12 14:11:35 +01:00
Sebastian
c40a0b2b4a try a name refactoring 2025-02-12 14:08:31 +01:00
Sebastian
dece12e555 make it invincible 2025-02-12 14:01:23 +01:00
Sebastian
54a0d4e5f6 add a status 2025-02-12 13:08:53 +01:00
Sebastian
d0cf554463 add jq again 2025-02-12 12:59:34 +01:00
Sebastian
67dcdbe57f fix 2025-02-12 12:56:52 +01:00
Sebastian
6f1c6c5bce fix 2025-02-12 12:47:34 +01:00
Sebastian
78554cb066 try to build a health check 2025-02-12 12:47:00 +01:00
Sebastian
e3fdb3fab6 try to build a health check 2025-02-12 12:46:13 +01:00
Sebastian
0cebb54c00 refactor 2025-02-12 12:32:46 +01:00
Sebastian
b9a6178238 more ports 2025-02-11 08:41:36 +01:00
Sebastian
35e241e0ce more ports 2025-02-11 08:38:10 +01:00
Sebastian
81d57c75e4 more ports 2025-02-11 08:34:25 +01:00
Sebastian
059ec0fee5 more ports 2025-02-11 08:28:16 +01:00
Sebastian
0659f70f5d fix 2025-02-10 08:26:38 +01:00
Sebastian
890610a510 fix it 2025-02-10 08:24:13 +01:00
Sebastian
ce77d4202b fix 2025-02-06 10:00:21 +01:00
Sebastian
7040a86f30 update 2025-02-06 09:18:34 +01:00
Sebastian
0461aa4943 update 2025-02-06 09:16:07 +01:00
Sebastian
a543cdf5f3 update 2025-02-06 09:15:02 +01:00
Sebastian
8232798521 update 2025-02-06 09:09:53 +01:00
Sebastian
8f7fd7f0a4 update 2025-02-06 09:05:48 +01:00
Sebastian
186d855bc1 one number 2025-02-06 07:51:47 +01:00
Sebastian
18c879b224 one number 2025-02-06 07:51:18 +01:00
Sebastian
abf39a84c4 one number 2025-02-06 07:50:51 +01:00
Sebastian
546acbbc35 one number 2025-02-06 07:49:57 +01:00
Sebastian
39a3dc4c0e consensus ports 2025-02-05 10:19:13 +01:00
Sebastian
9d069638ff follow symlinks 2025-02-04 10:28:01 +01:00
Sebastian
034a53f563 update 2025-02-04 10:27:13 +01:00
Sebastian
fd50ee4b30 try to squeeze sepolia 2025-02-04 09:37:35 +01:00
Sebastian
9d35619362 discovery via dns 2025-02-03 06:26:41 +01:00
Sebastian
4c075d6749 support ancient split 2025-02-03 06:09:43 +01:00
Sebastian
0a87672269 update 2025-02-03 06:09:03 +01:00
Sebastian
4a11ba6bf4 update 2025-02-02 08:57:27 +01:00
Sebastian
9af2fbfb14 fix 2025-02-02 08:14:20 +01:00
Sebastian
fb469e00b1 hand over the public ip 2025-02-02 08:13:11 +01:00
Sebastian
041bc7760c justr delete the nodekey 2025-02-02 08:11:43 +01:00
Sebastian
42466c6e91 generate the nodekey 2025-02-02 07:58:57 +01:00
Sebastian
f20bc90b24 try putting the nodekey somewhere else 2025-02-02 07:55:17 +01:00
Sebastian
e496c64f6a get public ip 2025-02-02 07:52:02 +01:00
Sebastian
342a390704 install dependencies into the right image 2025-02-02 07:46:34 +01:00
Sebastian
86f870fe91 better nodekey 2025-02-02 07:44:17 +01:00
Sebastian
73e72904d3 generate the nodekey 2025-02-02 07:41:07 +01:00
Sebastian
1b0f87d565 nodekey is now external to the volume 2025-02-02 07:31:44 +01:00
Sebastian
04062f675a public ip 2025-02-02 07:27:47 +01:00
Sebastian
1b7ddcbb50 big fix 2025-01-30 12:27:40 +01:00
Sebastian
9d491cbc9e unclogg 2025-01-30 12:23:56 +01:00
Sebastian
cf5ef9b5ca unclogg 2025-01-30 12:22:40 +01:00
Sebastian
144644fc26 unclogg 2025-01-30 12:16:39 +01:00
Sebastian
c24c890227 cleanup 2025-01-30 12:14:30 +01:00
Sebastian
116e0c5584 fix the reth peer id 2025-01-30 11:52:52 +01:00
Sebastian
4922d40c4a maybe 2025-01-30 11:29:36 +01:00
Sebastian
e656142601 it's needed afterall 2025-01-30 11:05:17 +01:00
Sebastian
5a12c6a61d fix the initialization 2025-01-30 06:09:35 +01:00
Sebastian
1f0b59a538 fix the initialization 2025-01-30 06:09:25 +01:00
Sebastian
4744ad38d3 new random port 2025-01-28 08:03:19 +01:00
Sebastian
8c69893c8a follow symlinks 2025-01-28 07:47:59 +01:00
Sebastian
58fe579555 respect exiting symlinks 2025-01-28 07:41:42 +01:00
Sebastian
f621a0c152 slowdisk 2025-01-27 12:20:07 +01:00
Sebastian
d192edfe50 update 2025-01-27 11:13:27 +01:00
Sebastian
8a13d4abb1 update 2025-01-27 06:01:14 +01:00
Sebastian
f99b0bf172 update 2025-01-26 12:02:18 +01:00
Sebastian
99028e4c42 update 2025-01-25 08:26:38 +01:00
Sebastian
ebec3de353 finish snaxchain 2025-01-24 10:11:20 +01:00
Sebastian
12b2732ddf disable execution layer sync 2025-01-24 10:08:47 +01:00
Sebastian
c82b5819c3 disable execution layer sync 2025-01-24 10:06:09 +01:00
Sebastian
694abee654 disable execution layer sync 2025-01-24 10:03:20 +01:00
Sebastian
3324ae5d7f disable execution layer sync 2025-01-24 10:02:07 +01:00
Sebastian
498705a5a8 disable execution layer sync 2025-01-24 09:59:42 +01:00
Sebastian
07c2a00a37 initial snaxchain 2025-01-24 09:54:13 +01:00
Sebastian
740311896c update 2025-01-22 07:30:52 +01:00
Sebastian
c8aafee5f1 update 2025-01-22 07:22:53 +01:00
Sebastian
d8a10e6c82 update 2025-01-16 15:46:31 +01:00
Sebastian
74e85c0698 fix for the selfmade yaml2json 2025-01-15 05:44:35 +01:00
Sebastian
78bab70a05 holocene explicit 2025-01-14 13:54:00 +01:00
Sebastian
a4b01ba358 override holocene 2025-01-14 13:52:01 +01:00
Sebastian
95c48c3d6f override holocene 2025-01-14 13:48:44 +01:00
Sebastian
2b6b6fd107 faster 2025-01-14 13:32:29 +01:00
Sebastian
1af7f87567 fix external rpcs 2025-01-14 05:32:17 +01:00
Sebastian
b8db21b3d4 cleanup 2025-01-14 04:40:34 +01:00
Sebastian
37d5a5caea fix 2025-01-14 04:39:55 +01:00
Sebastian
4033f1c09d fix 2025-01-14 04:39:23 +01:00
Sebastian
b937a8a8ad update 2025-01-14 04:38:18 +01:00
Sebastian
e5d1027763 fix 2025-01-14 04:38:02 +01:00
Sebastian
68460dfe4f ficx 2025-01-14 04:37:36 +01:00
Sebastian
18754a0c8f update 2025-01-14 04:37:00 +01:00
Sebastian
b2a86290c6 fix 2025-01-14 04:36:30 +01:00
Sebastian
8d04a393f4 update 2025-01-14 04:35:36 +01:00
Sebastian
ec00eb49c2 update 2025-01-14 04:34:48 +01:00
Sebastian
6117123c4a update 2025-01-14 04:34:07 +01:00
Sebastian
f3ed6f2a1c update 2025-01-14 04:16:27 +01:00
Sebastian
737a271e26 update 2025-01-14 04:15:40 +01:00
Sebastian
7f6af4cd5e update 2025-01-14 04:14:48 +01:00
Sebastian
bc74fd1414 update 2025-01-14 04:14:01 +01:00
Sebastian
3c5c76382f update 2025-01-14 04:12:11 +01:00
Sebastian
dc4710ef18 update 2025-01-14 04:06:10 +01:00
Sebastian
3c7d537898 update 2025-01-14 04:03:13 +01:00
Sebastian
603349518a update 2025-01-14 04:02:20 +01:00
Sebastian
166575953e external ip 2025-01-12 10:16:02 +01:00
Sebastian
3aa725102a I like unhealthy 2025-01-10 10:09:48 +01:00
Sebastian
249cffdee7 I like unhealthy 2025-01-10 10:08:58 +01:00
Sebastian
bb95f8adce I like unhealthy 2025-01-10 10:07:30 +01:00
Sebastian
2ee9222251 add bob archive 2025-01-06 15:53:52 +01:00
Sebastian
8776b382e7 update granite and holocene 2025-01-06 15:49:27 +01:00
Sebastian
421e8e6052 fix 2025-01-05 14:26:11 +01:00
Sebastian
449606eb1d fix 2025-01-05 09:23:45 +01:00
Sebastian
915ae99711 merge down and up 2025-01-05 06:48:53 +01:00
Sebastian
27bea18931 fix 2025-01-05 06:22:55 +01:00
Sebastian
ae23308523 fix 2025-01-05 06:17:27 +01:00
Sebastian
ba36be78d8 fix 2025-01-05 05:48:38 +01:00
Sebastian
c2c0cb5a2f fix 2025-01-04 17:20:58 +01:00
Sebastian
e38d778899 fix 2025-01-04 17:14:34 +01:00
Sebastian
10c0063295 no dav in status checks 2025-01-04 16:58:10 +01:00
Sebastian
ba91e8c024 hopefully 2025-01-04 16:56:04 +01:00
Sebastian
fa2153dd4a dav me 2025-01-04 16:49:37 +01:00
Sebastian
39ad0d25b5 use the weird chinese container in production 2025-01-04 16:48:00 +01:00
Sebastian
c3198d4b3c dav 2025-01-04 16:40:01 +01:00
Sebastian
eaea6796b7 lets try webdav 2025-01-04 16:38:30 +01:00
Sebastian
eaf29a931a try to write 2025-01-04 16:17:31 +01:00
Sebastian
699a454bbd open admin 2025-01-04 12:50:11 +01:00
Sebastian
1420117854 open admin 2025-01-04 12:40:29 +01:00
Sebastian
cd490b43ee open admin 2025-01-04 09:55:23 +01:00
Sebastian
9dd87cc73c update 2025-01-04 02:41:21 +01:00
Sebastian
4a07c61cfa update 2025-01-04 02:28:19 +01:00
Sebastian
5ac00a77b3 fix for ubuntu 24.04 2025-01-04 01:49:46 +01:00
Sebastian
5169fe0751 fix for ubuntu 24.04 2025-01-04 01:30:24 +01:00
Sebastian
d3ccc28d2c fix for ubuntu 24.04 2025-01-04 01:16:22 +01:00
Sebastian
92fb5d3c2a fix 2025-01-02 16:25:16 +01:00
Sebastian
f7451b6d2c experiment 2025-01-02 16:23:35 +01:00
Sebastian
0c82ef1f83 update 2025-01-02 04:50:44 +01:00
Sebastian
ca2c3861a1 update 2025-01-02 02:40:47 +01:00
Sebastian
e7bc0a56f0 get back polygon archive 2024-12-31 11:07:28 +01:00
Sebastian
d78e615869 no cap on tx fee 2024-12-28 08:17:36 +01:00
Sebastian
8a0657669d temporary remove whitelist 2024-12-24 15:37:39 +01:00
Sebastian
4f3d904594 fix 2024-12-24 02:08:36 +01:00
Sebastian
2d97c54741 update 2024-12-24 01:53:32 +01:00
Sebastian
90a4d3ce55 update 2024-12-24 01:52:02 +01:00
Sebastian
658e64a67b update 2024-12-24 01:47:51 +01:00
Sebastian
3ad79dced3 executable 2024-12-22 08:26:05 +01:00
Sebastian
df7c4b12af own port 2024-12-22 08:23:14 +01:00
Sebastian
f788e75947 new net 2024-12-22 08:21:12 +01:00
Sebastian
31941a7446 experiment 2024-12-22 07:25:58 +01:00
Sebastian
05c87006ac remove unnecessary default monitoring tools due to weirtd load characteristics on small machines 2024-12-22 07:04:20 +01:00
Sebastian
7291c047ad remove unnecessary default monitoring tools due to weirtd load characteristics on small machines 2024-12-22 07:03:25 +01:00
Sebastian
e0887aff53 streamline 2024-12-22 06:34:37 +01:00
Sebastian
592e317e82 add archive to gnosis 2024-12-22 06:30:08 +01:00
Sebastian
bb0fb37fd7 a fresh start 2024-12-22 06:29:07 +01:00
Sebastian
a81c05cd5d update to extract without storing the archive when /backup doesn't exist 2024-12-21 07:05:07 +01:00
Sebastian
0c955dbeb1 disbale walking back 2024-12-20 06:18:12 +01:00
Sebastian
d85e5bdc96 fix 2024-12-19 07:10:07 +01:00
Sebastian
7db695a94b update 2024-12-18 15:50:53 +01:00
Sebastian
bbaeff524e update 2024-12-18 15:49:18 +01:00
Sebastian
15453f264c add sonic for real 2024-12-18 09:14:46 +01:00
Sebastian
e77335f84f fix 2024-12-18 09:01:22 +01:00
Sebastian
72cf47d0f1 fix 2024-12-18 08:59:50 +01:00
Sebastian
4b1012c0e0 fix 2024-12-18 08:59:13 +01:00
Sebastian
0dfe49484f fix 2024-12-18 08:57:17 +01:00
Sebastian
2d7b84544b fix 2024-12-18 08:54:34 +01:00
Sebastian
00dbee6696 fixes 2024-12-18 08:52:37 +01:00
Sebastian
08b1ef0df2 fix 2024-12-18 08:41:48 +01:00
Sebastian
925ccbadb9 first try 2024-12-18 08:39:52 +01:00
Sebastian
f9cde96de0 hopefully 2024-12-18 07:14:32 +01:00
Sebastian
d308602cb6 hopefully 2024-12-18 07:13:32 +01:00
Sebastian
a3547fd432 hopefully 2024-12-18 07:11:29 +01:00
Sebastian
759045035c hopefully 2024-12-18 07:10:12 +01:00
Sebastian
72ed5de513 hopefully 2024-12-18 07:05:24 +01:00
Sebastian
c415429b31 holocene update 2024-12-18 05:36:29 +01:00
Sebastian
99ad467449 maybe fix 2024-12-18 05:34:28 +01:00
Sebastian
7881123910 maybe fix 2024-12-18 05:33:42 +01:00
Sebastian
27ce017de2 maybe fix 2024-12-18 05:31:13 +01:00
Sebastian
27faab2b9b rename volumes 2024-12-18 04:28:53 +01:00
Sebastian
eaf7ed9b4f initial sync 2024-12-18 04:27:58 +01:00
Sebastian
94ccec871e enable extragas 2024-12-18 03:46:02 +01:00
Sebastian
e469731eeb axtragas 2024-12-18 03:33:52 +01:00
Sebastian
1027470287 fix 2024-12-17 16:56:31 +01:00
Sebastian
b3ead4ab76 fix 2024-12-17 16:54:53 +01:00
Sebastian
22c7ed8d56 fix 2024-12-17 16:53:14 +01:00
Sebastian
9ae6963441 fix 2024-12-17 16:51:24 +01:00
Sebastian
168f3bae0d fix 2024-12-17 16:50:26 +01:00
Sebastian
182b36114a a new start 2024-12-17 16:44:19 +01:00
Sebastian
9e962018ca lets go 2024-12-17 16:18:26 +01:00
Sebastian
620d1ab696 fix 2024-12-17 16:04:07 +01:00
Sebastian
168d0d04a1 fix 2024-12-17 15:59:33 +01:00
Sebastian
f0989bff07 genesis 2024-12-17 15:50:14 +01:00
Sebastian
e55802af54 genesis 2024-12-17 15:47:30 +01:00
Sebastian
b091448b98 init 2024-12-17 15:42:52 +01:00
Sebastian
bad75e211b init 2024-12-17 15:42:27 +01:00
Sebastian
56a90cd688 update 2024-12-17 09:49:25 +01:00
Sebastian
5e01eb445e do not validate peers 2024-12-17 06:34:18 +01:00
Sebastian
c2dfaf6192 quick help 2024-12-16 07:17:30 +01:00
Sebastian
881d2e3c6c update 2024-12-16 06:34:40 +01:00
Sebastian
c6da52d9d6 update 2024-12-16 06:25:50 +01:00
Sebastian
aea68dc422 update 2024-12-16 02:59:51 +01:00
Sebastian
12858a4388 make it yaml 2024-12-15 09:32:04 +01:00
Sebastian
e75b544788 open debug methods 2024-12-15 09:30:39 +01:00
Sebastian
95819e9884 release the thunder 2024-12-15 09:22:26 +01:00
Sebastian
09c94f55e6 smallish fix for better compatibility maybe 2024-12-15 08:42:35 +01:00
Sebastian
c61b1964af do allow debug 2024-12-15 08:38:42 +01:00
Sebastian
2a16f5b3a1 fix 2024-12-15 08:33:14 +01:00
Sebastian
c299275992 get going with tron 2024-12-15 08:27:07 +01:00
Sebastian
422a4d0f5a can't do snap :( 2024-12-15 08:24:14 +01:00
Sebastian
c4ad96701f onboard viction 2024-12-15 08:18:18 +01:00
Sebastian
e69ca8805c slurp db 2024-12-14 12:18:06 +01:00
Sebastian
3a3845decf make it perfect 2024-12-14 09:14:46 +01:00
Sebastian
3dca497c52 do it 2024-12-14 09:07:00 +01:00
Sebastian
039f902065 fix 2024-12-14 09:03:40 +01:00
Sebastian
7a2c5699aa fix 2024-12-14 08:57:49 +01:00
Sebastian
455b633f87 fix 2024-12-14 08:49:33 +01:00
Sebastian
cad4819cc5 enable jsonrpc 2024-12-14 08:43:30 +01:00
Sebastian
aaae21a747 fixes 2024-12-14 08:35:42 +01:00
Sebastian
86203e4c74 first try 2024-12-14 08:31:18 +01:00
Sebastian
a2431070a5 open websockets 2024-12-14 06:45:04 +01:00
Sebastian
4352e5e37b update 2024-12-13 10:27:51 +01:00
Sebastian
d41f231705 update 2024-12-13 10:27:25 +01:00
Sebastian
1fdde3bc8c even better 2024-12-13 10:07:15 +01:00
Sebastian
e62e204cac even better 2024-12-13 10:05:21 +01:00
Sebastian
381159a0d7 even better 2024-12-13 10:02:29 +01:00
Sebastian
7793a32a0d even better 2024-12-13 10:00:16 +01:00
Sebastian
00cd9aafb8 neat one liner 2024-12-13 09:56:57 +01:00
Sebastian
55ba3ebcaa add ip 2024-12-13 09:28:41 +01:00
Sebastian
16a8d82ea8 fix 2024-12-13 09:27:01 +01:00
Sebastian
8ac87b7c28 fix the config 2024-12-13 09:20:46 +01:00
Sebastian
ef94c32e50 fix the config 2024-12-13 09:19:05 +01:00
Sebastian
98a7faa0b5 fix the config 2024-12-13 09:16:13 +01:00
Sebastian
3238af6086 fix 2024-12-12 12:05:43 +01:00
Sebastian
e4055a6ffa fix 2024-12-12 11:56:42 +01:00
Sebastian
bf91065d8c update 2024-12-12 10:49:27 +01:00
Sebastian
bb7e4eb9b3 fix 2024-12-12 10:38:56 +01:00
Sebastian
f5860f9b3d fix 2024-12-12 10:35:14 +01:00
Sebastian
68572a6285 fix 2024-12-12 10:29:22 +01:00
Sebastian
58bd2bd587 update 2024-12-12 10:26:30 +01:00
Sebastian
a51df66b4b public port 2024-12-12 09:40:26 +01:00
Sebastian
6a16a0e4fc discover 2024-12-12 09:36:00 +01:00
Sebastian
c65f279074 snap sync 2024-12-12 09:31:44 +01:00
Sebastian
777956191b resync as pbss 2024-12-12 09:13:36 +01:00
Sebastian
89f199bcd2 update 2024-12-12 09:03:39 +01:00
Sebastian
68f0f98cc6 do not wlkback 2024-12-12 08:33:54 +01:00
Sebastian
c5f3952c42 update stats server 2024-12-11 08:12:55 +01:00
Sebastian
14df6ea00a enable debug 2024-12-11 08:01:26 +01:00
Sebastian
c36667850b update 2024-12-11 03:04:13 +01:00
Sebastian
e1a2dec38a fix 2024-12-09 12:45:20 +01:00
Sebastian
94a4547b0e fix 2024-12-09 12:42:56 +01:00
Sebastian
4dbfb26767 fix 2024-12-09 12:40:13 +01:00
Sebastian
1f45610cbb fix 2024-12-09 08:38:38 +01:00
Sebastian
e0bad01cf0 downgrade 2024-12-09 08:34:33 +01:00
Sebastian
65788673d8 update to pebble 2024-12-09 08:25:54 +01:00
Sebastian
670b8bce16 fix? 2024-12-09 06:15:37 +01:00
Sebastian
42ec54e512 arb-sep-peb 2024-12-09 06:07:43 +01:00
Sebastian
9dd151af58 add nova pebble 2024-12-09 06:03:08 +01:00
Sebastian
d5aab68175 hash not path 2024-12-09 05:53:41 +01:00
Sebastian
3586424217 init pbss 2024-12-08 08:12:20 +01:00
Sebastian
764a987395 fix 2024-12-08 07:44:02 +01:00
Sebastian
84e1236582 give debug a new chance to shine 2024-12-08 07:41:07 +01:00
Sebastian
9b09e08daf update 2024-12-08 07:39:15 +01:00
Sebastian
e7b3d2b998 update 2024-12-08 07:32:07 +01:00
Sebastian
3c56a0b8cb update 2024-12-08 07:30:51 +01:00
Sebastian
488b729004 update 2024-12-08 07:27:33 +01:00
Sebastian
bfcbe68c35 update 2024-12-08 07:20:17 +01:00
Sebastian
d9d96fc1ad update 2024-12-08 07:19:24 +01:00
Sebastian
9fc832022e update 2024-12-08 07:14:37 +01:00
Sebastian
6c2e2e1b02 update 2024-12-07 08:01:01 +01:00
Sebastian
0b85297d76 update 2024-12-06 11:49:39 +01:00
Sebastian
03b8ce32d9 update 2024-12-06 11:49:06 +01:00
Sebastian
ddcebe4744 more efficient 2024-12-04 08:12:06 +01:00
Sebastian
a827cab203 more efficient 2024-12-04 08:09:39 +01:00
Sebastian
573da345ce advertise public ip 2024-12-04 08:04:43 +01:00
Sebastian
1bbfe1ee10 fix 2024-12-04 08:01:18 +01:00
Sebastian
121acebe3b update{ 2024-12-04 07:59:38 +01:00
Sebastian
ff0ef06f08 fix 2024-12-03 16:29:52 +01:00
Sebastian
c74423ce03 fix 2024-12-03 16:21:31 +01:00
Sebastian
81e7f6e50d quick n dirty 2024-12-03 15:57:27 +01:00
Sebastian
26dd8f118b convenience 2024-12-02 13:21:32 +01:00
Sebastian
fb7ff43a61 disab;e faulty method 2024-12-02 13:10:02 +01:00
Sebastian
9a7bfac022 update 2024-11-30 16:53:26 +01:00
Sebastian
2e34b1734b fix 2024-11-30 15:26:18 +01:00
Sebastian
512594a4c2 meh 2024-11-30 14:34:04 +01:00
Sebastian
36bae6707d not attach the node part of OP to shackle 2024-11-30 13:47:36 +01:00
Sebastian
635c5407a8 yeay 2024-11-30 13:37:21 +01:00
Sebastian
4abbf293f8 fix the patch thingy 2024-11-30 13:16:30 +01:00
Sebastian
dcbd9d2e81 fix 2024-11-28 08:32:50 +01:00
Sebastian
4449dc0444 fix 2024-11-28 08:30:43 +01:00
Sebastian
4abfa497be fix 2024-11-28 06:58:05 +01:00
Sebastian
5005597969 update 2024-11-28 06:57:30 +01:00
Sebastian
38cd0baf87 update 2024-11-28 04:09:54 +01:00
Sebastian
d952e5c0f4 update 2024-11-27 12:18:24 +01:00
Sebastian
221aa34077 update 2024-11-27 08:44:31 +01:00
Sebastian
09718ce61e update 2024-11-27 08:35:14 +01:00
Sebastian
9abacb22cc update 2024-11-26 16:26:40 +01:00
Sebastian
54c2da9a3c update 2024-11-26 13:48:23 +01:00
Sebastian
b809bcab8a update 2024-11-26 13:28:16 +01:00
Sebastian
3468d97747 swoosh 2024-11-26 11:51:05 +01:00
Sebastian
97827e00c2 fix 2024-11-26 11:41:30 +01:00
Sebastian
6e63a06707 fix 2024-11-26 11:40:12 +01:00
Sebastian
92c205d896 update and patch 2024-11-26 11:38:49 +01:00
Sebastian
006f34d40c external ip 2024-11-24 10:53:12 +01:00
Sebastian
fb4b769814 add public ip 2024-11-24 10:51:37 +01:00
Sebastian
bd2ef46f9b change chainid 2024-11-23 09:05:38 +01:00
Sebastian
bc2f40994f change reference rpc 2024-11-23 09:04:55 +01:00
Sebastian
d34f0d2c2e remove peer on old network 2024-11-23 09:01:29 +01:00
Sebastian
ac50b97a5a make executable 2024-11-23 08:58:34 +01:00
Sebastian
2c2a09878c update 2024-11-23 08:53:42 +01:00
Sebastian
4ae5c77f2d update static peers 2024-11-21 10:32:17 +01:00
Sebastian
97439ad751 update 2024-11-21 05:26:26 +01:00
Sebastian
12cdf6a9cb update 2024-11-21 05:24:39 +01:00
Sebastian
acea00d0f4 update 2024-11-21 05:23:42 +01:00
Sebastian
3cb23ff2b1 unstuck it 2024-11-19 10:23:00 +01:00
Sebastian
6f5709463c update 2024-11-19 10:20:10 +01:00
Sebastian
ecc3f97892 update 2024-11-19 10:18:18 +01:00
Sebastian
d3130f0ea2 downgrade 2024-11-19 09:37:27 +01:00
Sebastian
8aa9af34e7 downgrade 2024-11-19 09:36:31 +01:00
Sebastian
5435c59eec graceful 2024-11-19 09:28:08 +01:00
Sebastian
af46f41578 update 2024-11-19 09:19:22 +01:00
Sebastian
53e287ce79 add extra gas 2024-11-09 05:09:49 +01:00
Sebastian
fb0665c141 do not drop websocket connections 2024-11-09 05:06:45 +01:00
Sebastian
94c443dbac do not drop websocket connections 2024-11-09 05:05:44 +01:00
Sebastian
41ad2761fc do not drop websocket connections 2024-11-09 05:01:36 +01:00
Sebastian
e97d947294 do not drop websocket connections 2024-11-09 04:58:58 +01:00
Sebastian
a756d45cb2 do not drop websocket connections 2024-11-09 04:56:53 +01:00
Sebastian
ddeba389c8 do not drop websocket connections 2024-11-09 04:53:10 +01:00
Sebastian
26eb9f430f update 2024-11-09 03:08:42 +01:00
Sebastian
89b7b1607f update 2024-11-09 03:08:28 +01:00
Sebastian
46ff876a04 update 2024-11-09 03:07:18 +01:00
Sebastian
d0887fd183 update 2024-11-09 03:05:44 +01:00
Sebastian
0284e260e2 update 2024-11-09 03:00:19 +01:00
Sebastian
b4cac40a24 update 2024-11-09 02:57:06 +01:00
Sebastian
273abcea31 update 2024-11-09 02:51:23 +01:00
Sebastian
4a6c4b7cc6 fix 2024-11-06 14:27:56 +01:00
Sebastian
610a152446 slight fix 2024-11-06 13:55:11 +01:00
Sebastian
108f126c1f fix rsk with fixed host header in nginx proxy 2024-11-06 13:32:01 +01:00
Sebastian
283040ace0 fix 2024-11-06 13:10:57 +01:00
Sebastian
375856bd13 fix 2024-11-06 12:40:46 +01:00
Sebastian
8ed7698702 drop the rsk 2024-11-06 12:35:30 +01:00
Sebastian
202555f320 add gascap 2024-11-06 04:45:02 +01:00
Sebastian
4a84f6e149 update 2024-11-05 12:04:50 +01:00
Sebastian
2c14893d16 fix goat testnet chainid 2024-11-05 11:54:40 +01:00
Sebastian
28e89bcf16 fixes 2024-11-05 11:53:33 +01:00
Sebastian
b165482ed9 fix goat 2024-11-05 11:51:47 +01:00
Sebastian
ba35ff8a6d add another static path 2024-11-03 10:51:28 +01:00
Sebastian
cc3538a407 add another static path 2024-11-03 10:41:52 +01:00
Sebastian
3d2026421c try to figure 2024-11-03 10:13:32 +01:00
Sebastian
51f400fe59 find me the gateway ips 2024-11-03 08:55:57 +01:00
Sebastian
664cc8ccb2 update 2024-11-03 04:35:26 +01:00
Sebastian
05e7216a09 fix 2024-11-01 13:57:27 +01:00
Sebastian
24cdf0b4e6 initial 2024-11-01 13:31:31 +01:00
Sebastian
0592b7b9b6 fix 2024-10-28 10:52:26 +01:00
Sebastian
6752469c8d fix 2024-10-28 10:45:56 +01:00
Sebastian
63a47db15c fix 2024-10-26 07:52:47 +02:00
Sebastian
86642de9ca maybe fix 2024-10-26 07:38:22 +02:00
Sebastian
ebb804e9b8 fix 2024-10-26 07:23:10 +02:00
Sebastian
2e40528e33 fix 2024-10-26 07:19:24 +02:00
Sebastian
b7fbf030ae fix2 2024-10-26 07:16:49 +02:00
Sebastian
1882de3f16 fix 2024-10-26 07:14:11 +02:00
Sebastian
808082a7c2 make a debug build 2024-10-26 07:09:08 +02:00
Sebastian
2375550c7c update 2024-10-24 15:12:52 +02:00
Sebastian
10cbf262c2 update 2024-10-24 15:00:51 +02:00
Sebastian
7c7cc48fba fix 2024-10-24 09:19:48 +02:00
Sebastian
9a802e1252 revert 2024-10-24 08:55:07 +02:00
Sebastian
5db8749420 fix 2024-10-24 08:53:13 +02:00
Sebastian
ac697688f0 fix 2024-10-24 08:52:51 +02:00
Sebastian
a5c4cc0aca update 2024-10-24 08:52:25 +02:00
Sebastian
d08713822e fix 2024-10-24 08:21:13 +02:00
Sebastian
61ff75b36e fix 2024-10-24 08:17:18 +02:00
Sebastian
1ddebffc49 fix 2024-10-24 08:16:28 +02:00
Sebastian
2bc376f8e8 update 2024-10-24 06:16:42 +02:00
Sebastian
ad4887659a update 2024-10-24 06:15:50 +02:00
Sebastian
2531c6eab7 send it 2024-10-24 06:09:50 +02:00
Sebastian
bb1b563d76 update 2024-10-24 05:56:45 +02:00
Sebastian
f8b6adb95e update 2024-10-24 05:54:16 +02:00
Sebastian
eefa603b38 update 2024-10-24 05:52:56 +02:00
Sebastian
9bf6a91048 update 2024-10-24 05:51:51 +02:00
Sebastian
0c36a6488c update 2024-10-24 05:49:32 +02:00
Sebastian
e7e25aeb43 update 2024-10-24 05:48:37 +02:00
Sebastian
e2e2d2704d update 2024-10-24 05:37:15 +02:00
Sebastian
325abbed2c update 2024-10-24 05:35:47 +02:00
Sebastian
e081e1d220 update open port 2024-10-23 05:42:58 +02:00
Sebastian
24ade40556 Dfix] 2024-10-23 05:38:45 +02:00
Sebastian
e37c67bbb9 fix 2024-10-23 05:37:23 +02:00
Sebastian
7ffba3114f add testnet 2024-10-23 04:13:38 +02:00
Sebastian
0c56968419 op arch 2024-10-23 03:51:05 +02:00
Sebastian
6f9bb930a5 update 2024-10-22 10:06:55 +02:00
Sebastian
cda4598863 update 2024-10-20 16:19:55 +02:00
Sebastian
95bd317d56 make it accessible 2024-10-20 15:20:12 +02:00
Sebastian
f29ef5a236 remove catchup form status 2024-10-20 14:55:58 +02:00
Sebastian
18aa6600e7 leave a hint 2024-10-20 04:58:25 +02:00
Sebastian
d4287dd205 fix 2024-10-20 04:48:03 +02:00
Sebastian
ea140229ae fix 2024-10-20 04:44:26 +02:00
Sebastian
5bd28a4878 fix 2024-10-20 04:42:24 +02:00
Sebastian
933c5174fd fix 2024-10-20 04:41:03 +02:00
Sebastian
0f31d7fd46 update 2024-10-20 04:30:07 +02:00
Sebastian
05bc981714 update 2024-10-20 04:27:26 +02:00
Sebastian
2c1622b52b update 2024-10-20 04:24:10 +02:00
Sebastian
407b7ffd0a update 2024-10-20 04:19:53 +02:00
Sebastian
8e80fd6c8a update 2024-10-20 04:17:54 +02:00
Sebastian
62fca8931f update 2024-10-20 04:16:14 +02:00
Sebastian
4af8d3a65e initial 2024-10-20 04:07:18 +02:00
Sebastian
fb6ebad7bf fix 2024-10-20 02:31:40 +02:00
Sebastian
d52ce39184 aleph pbss 2024-10-20 02:09:19 +02:00
Sebastian
6636b47344 fix 2024-10-20 02:01:40 +02:00
Sebastian
107cdd9983 fix 2024-10-20 01:55:22 +02:00
Sebastian
e69b6e6fc2 fix 2024-10-20 01:41:03 +02:00
Sebastian
aa8c4a93e6 fix 2024-10-19 18:54:15 +02:00
Sebastian
7643078aa2 fix 2024-10-19 05:38:12 +02:00
Sebastian
51129f590f path 2024-10-17 13:00:58 +02:00
Sebastian
82cffd4953 do optimism reth 2024-10-17 08:19:57 +02:00
Sebastian
3a8bc7ec2f fix 2024-10-17 07:02:52 +02:00
Sebastian
6163377682 fix 2024-10-17 06:57:15 +02:00
Sebastian
82f08a87d1 sync a fullnode save some space 2024-10-17 06:53:17 +02:00
Sebastian
1700537df1 add slowdisk mount 2024-10-17 06:36:28 +02:00
Sebastian
d23e06e321 init fullnode 2024-10-17 06:21:13 +02:00
Sebastian
1e4623a4f0 fix 2024-10-17 06:14:33 +02:00
Sebastian
18532b349b fix 2024-10-17 06:11:36 +02:00
Sebastian
0efdc2e71a add reth for base 2024-10-17 06:07:01 +02:00
Sebastian
f5b25b0da5 update 2024-10-06 15:19:24 +02:00
Sebastian
33c98457ca fix 2024-10-03 17:05:42 +02:00
Sebastian
e0478f5774 add hardforks 2024-10-03 16:56:54 +02:00
Sebastian
34d3fb4925 add hardforks 2024-10-03 16:54:50 +02:00
Sebastian
01883fca09 fix 2024-10-03 16:19:46 +02:00
Sebastian
a5e0200878 trying 2024-10-03 16:17:21 +02:00
Sebastian
014cc3fc51 init fuji archive 2024-10-03 16:13:08 +02:00
Sebastian
6a79b839df fresh start with playnance 2024-10-03 15:32:25 +02:00
Sebastian
8242f85f4c downgrade 2024-10-03 15:15:21 +02:00
Sebastian
901a96f511 fix 2024-10-03 09:59:04 +02:00
Sebastian
803786263c fix 2024-10-03 09:57:04 +02:00
Sebastian
9ac56cc9a7 init linea sepolia 2024-10-03 09:56:19 +02:00
Sebastian
43bd3903d6 fix 2024-10-03 09:25:39 +02:00
Sebastian
bf49142a59 init zora sepolia 2024-10-03 09:24:36 +02:00
Sebastian
33de7762fb fix 2024-10-03 08:22:25 +02:00
Sebastian
9547a26193 fix 2024-10-03 08:15:10 +02:00
Sebastian
75d39e10e5 add that thang 2024-10-03 07:39:58 +02:00
Sebastian
9bf038343e fix 2024-10-03 07:33:21 +02:00
Sebastian
21963b6b3d initial commit 2024-10-03 07:28:25 +02:00
Sebastian
b8dc48866c add admin 2024-09-30 17:24:16 +02:00
Sebastian
3e1135c458 update 2024-09-30 15:35:58 +02:00
Sebastian
216e33af86 downgrade 2024-09-29 07:12:41 +02:00
Sebastian
4a8a350710 update 2024-09-29 06:57:36 +02:00
Sebastian
7994cf7d0e keep the old for a bit 2024-09-28 10:57:58 +02:00
Sebastian
1ce52e8e57 fix 2024-09-28 10:57:03 +02:00
Sebastian
d2fa8cb535 fix 2024-09-28 10:28:22 +02:00
Sebastian
c9421b6947 fix 2024-09-28 10:26:58 +02:00
Sebastian
926728a29d fix 2024-09-28 10:25:04 +02:00
Sebastian
3ac96e6121 fix 2024-09-28 10:24:23 +02:00
Sebastian
3d9f645a3c fix 2024-09-28 10:23:17 +02:00
Sebastian
d7da405614 fix 2024-09-28 09:58:14 +02:00
Sebastian
fd2519a0fc initial commit celo migration 2024-09-28 09:56:48 +02:00
Sebastian
ea8912fff3 update ffs 2024-09-28 08:11:55 +02:00
Sebastian
f5a73b7ff0 update 2024-09-27 13:38:58 +02:00
Sebastian
6220c2eb2c update 2024-09-27 13:36:34 +02:00
Sebastian
f44a091ec4 update 2024-09-27 13:35:45 +02:00
Sebastian
4de425066e update 2024-09-27 13:28:32 +02:00
Sebastian
956a8f0b01 fix 2024-09-25 17:15:58 +02:00
Sebastian
d69f2ce122 fix\git push 2024-09-25 03:42:30 +02:00
Sebastian
0ca6a9e52d rename 2024-09-25 03:39:34 +02:00
Sebastian
115ec3cd06 init reth 2024-09-25 03:37:09 +02:00
Sebastian
7b0df6cbf8 update 2024-09-24 09:15:32 +02:00
Sebastian
75d06ef967 init 2024-09-24 08:28:11 +02:00
Sebastian
7f9d5200ef initial 2024-09-24 07:26:48 +02:00
Sebastian
e7d371cd2f fix 2024-09-24 07:14:55 +02:00
Sebastian
57fce1908b parameterize 2024-09-24 07:10:43 +02:00
Sebastian
9e91f100d9 parameterize 2024-09-24 07:08:03 +02:00
Sebastian
8777dd736b fix 2024-09-23 12:19:01 +02:00
Sebastian
3b16fb7243 moar bootnodes 2024-09-23 12:06:55 +02:00
Sebastian
40ebc8f7c5 fix 2024-09-23 06:47:57 +02:00
Sebastian
9654c88357 fix 2024-09-23 06:43:23 +02:00
Sebastian
f9aaa67a51 update 2024-09-23 06:13:05 +02:00
Sebastian
94ca54ff0d update 2024-09-23 06:08:02 +02:00
Sebastian
ff35adf9ae update 2024-09-22 12:32:33 +02:00
Sebastian
4a1dde629e rename 2024-09-21 18:00:03 +02:00
Sebastian
328ac28244 fix 2024-09-21 17:54:08 +02:00
Sebastian
908c5bc93e enable debug 2024-09-20 04:34:23 +02:00
Sebastian
d945358d25 fix 2024-09-19 10:46:43 +02:00
Sebastian
3e77cde88c update 2024-09-19 10:45:21 +02:00
Sebastian
90075c746b must be archive :( 2024-09-19 10:00:32 +02:00
Sebastian
e15c0bf9c0 prune 2024-09-19 09:58:42 +02:00
Sebastian
ecd51a45ee update 2024-09-19 09:55:06 +02:00
Sebastian
b352b58afb add berachain 2024-09-18 16:03:57 +02:00
Sebastian
5f098bf14f add berachain 2024-09-18 15:45:23 +02:00
Sebastian
a7c1bac157 other bootnodes 2024-09-18 09:59:43 +02:00
Sebastian
474cd78e03 fix 2024-09-18 09:45:52 +02:00
Sebastian
5bd851283c fix 2024-09-18 09:42:39 +02:00
Sebastian
a6a4811230 fix 2024-09-18 09:35:32 +02:00
Sebastian
b4d826b247 fix 2024-09-18 09:33:42 +02:00
Sebastian
b37b71e4ec fix 2024-09-18 09:32:50 +02:00
Sebastian
7158dfa838 half fix 2024-09-18 09:29:10 +02:00
Sebastian
1bfcb8c512 moar linea 2024-09-18 09:24:16 +02:00
Sebastian
e2131027d5 update 2024-09-18 08:47:46 +02:00
Sebastian
e26290121c update 2024-09-18 08:46:53 +02:00
Sebastian
832a4f09fa update 2024-09-18 08:40:24 +02:00
Sebastian
56fed20b4e update 2024-09-18 08:39:07 +02:00
Sebastian
42ad013dd1 update 2024-09-18 08:37:14 +02:00
Sebastian
5b9a94c808 update 2024-09-18 08:26:12 +02:00
Sebastian
33294cbea0 fix 2024-09-18 08:23:56 +02:00
Sebastian
6af917da86 update 2024-09-18 08:19:19 +02:00
Sebastian
fbe02e26a2 fix 2024-09-17 21:50:53 +02:00
Sebastian
3f0f433faa fix 2024-09-17 21:30:53 +02:00
Sebastian
c799568aa6 fix 2024-09-17 21:24:47 +02:00
Sebastian
dcd09ea5aa update 2024-09-17 21:17:01 +02:00
Sebastian
acf47cc2e1 fix 2024-09-17 21:15:37 +02:00
Sebastian
837c35d3ba zircuit testnet 2024-09-17 21:14:33 +02:00
Sebastian
52838f1cf5 zircuit testnet 2024-09-17 21:12:23 +02:00
Sebastian
ae7045b607 fix 2024-09-17 10:52:46 +02:00
Sebastian
a3a428b0e5 update 2024-09-17 10:45:14 +02:00
Sebastian
9257179d2d everclear mainnet 2024-09-16 14:44:03 +02:00
Sebastian
3e1e0ff9ab fix 2024-09-16 13:27:01 +02:00
Sebastian
f4a663db24 this is faster 2024-09-16 13:22:15 +02:00
Sebastian
a91ae0ddd0 fix 2024-09-16 13:10:00 +02:00
Sebastian
9fcd4f9a6b fix 2024-09-16 13:03:53 +02:00
Sebastian
88ec7cbd31 fix 2024-09-16 12:54:43 +02:00
Sebastian
4603a04c3b new feature 2024-09-16 12:16:04 +02:00
Sebastian
afa19bb887 do the thing 2024-09-16 12:04:27 +02:00
Sebastian
9134e3d21f fix 2024-09-15 16:18:29 +02:00
Sebastian
11f737c75f fix 2024-09-15 16:15:53 +02:00
Sebastian
b0f12276c8 init zksync 2024-09-15 15:42:33 +02:00
Sebastian
68b2e612ba fix 2024-09-15 11:56:06 +02:00
Sebastian
3f9ab7adf3 fix 2024-09-15 11:51:49 +02:00
Sebastian
c00cf4486e neat tool 2024-09-15 11:51:02 +02:00
Sebastian
bac273fa23 fix 2024-09-15 11:49:12 +02:00
Sebastian
2435ce388b add that thing 2024-09-15 11:43:03 +02:00
Sebastian
a299f72036 trying reth to rule 2024-09-15 11:42:19 +02:00
Sebastian
6b9c423c32 fix 2024-09-12 13:45:44 +02:00
Sebastian
d930c640ba fix 2024-09-12 13:35:23 +02:00
Sebastian
a66e72528f update 2024-09-12 13:30:06 +02:00
Sebastian
0d792d5088 make external rpcs available 2024-09-12 13:20:41 +02:00
Sebastian
59b132ee62 helpar 2024-09-11 15:00:23 +02:00
Sebastian
a12c17bada update 2024-09-11 14:51:37 +02:00
Sebastian
d601f3aa33 update 2024-09-11 13:54:31 +02:00
Sebastian
271a39d176 fix 2024-09-11 13:35:14 +02:00
Sebastian
9e8036186d flexible 2024-09-11 13:32:48 +02:00
Sebastian
5ad15b9823 add estimate 2024-09-11 13:24:34 +02:00
Sebastian
70fd7e0346 add estimate 2024-09-11 13:23:28 +02:00
Sebastian
5cb026697a calculate catchup time 2024-09-11 13:20:03 +02:00
Sebastian
6cf158fe36 calculate catchup time 2024-09-11 12:29:56 +02:00
Sebastian
d37ac5dd93 mmh? 2024-09-11 12:16:41 +02:00
Sebastian
03eeb4737f separate 2024-09-11 12:13:14 +02:00
Sebastian
2bb25ffef7 show time diff in chains 2024-09-11 11:58:54 +02:00
Sebastian
7c37f5dcce renaming 2024-09-11 11:48:53 +02:00
Sebastian
fc8b64f82b fix again 2024-09-11 06:01:46 +02:00
Sebastian
a3c9c0e0ac fix 2024-09-11 05:54:10 +02:00
Sebastian
0a935a9bd3 fix 2024-09-11 05:51:11 +02:00
Sebastian
321c3c6f18 fix 2024-09-11 05:44:14 +02:00
Sebastian
088ab82e51 update 2024-09-11 05:26:16 +02:00
Sebastian
9c09a7a4e3 fix 2024-09-10 06:54:22 +02:00
Sebastian
ec90cb9a54 try 2024-09-09 15:32:57 +02:00
Sebastian
b14ed827bb fix 2024-09-09 15:31:09 +02:00
Sebastian
70c72dcdca fix 2024-09-09 15:30:17 +02:00
Sebastian
cf05d1eeb2 fix 2024-09-09 15:16:44 +02:00
Sebastian
f62e1ce178 fix 2024-09-09 15:04:39 +02:00
Sebastian
6038bb8c9c fix 2024-09-09 15:03:11 +02:00
Sebastian
357799e3c7 fix 2024-09-09 14:40:52 +02:00
Sebastian
786bcbb9fd fix 2024-09-09 14:38:57 +02:00
Sebastian
d78ab74a66 first try 2024-09-09 14:37:47 +02:00
Sebastian
2705057a2a add fuji 2024-09-09 13:28:26 +02:00
Sebastian
d54e3f3ac9 initial commit 2024-09-09 13:23:45 +02:00
Sebastian
0e2bf75e05 fix 2024-09-08 19:36:07 +02:00
Sebastian
6e9c1ddcaa mmh 2024-09-08 19:33:43 +02:00
Sebastian
e4ad028a5f fix 2024-09-08 19:24:08 +02:00
Sebastian
34d3488a2c fix 2024-09-08 19:20:18 +02:00
Sebastian
ef1d9f0cfc fix 2024-09-08 19:17:59 +02:00
Sebastian
39dd51c282 fix 2024-09-08 19:09:34 +02:00
Sebastian
13b3ade46b mauybe 2024-09-08 19:06:03 +02:00
Sebastian
1640630d4d fix amoy erigon 2024-09-08 18:48:31 +02:00
Sebastian
8a55588c21 useful script 2024-09-08 13:14:10 +02:00
Sebastian
d3526dbac9 add bootnodes 2024-09-07 16:37:54 +02:00
Sebastian
8cfbf4c268 update 2024-09-07 16:07:10 +02:00
Sebastian
3c14f627fe fix 2024-09-06 17:42:41 +02:00
Sebastian
3842460115 fix 2024-09-06 17:41:04 +02:00
Sebastian
590c21013b fix 2024-09-06 17:36:57 +02:00
Sebastian
e1bf532680 update op stack to granite 2024-09-06 16:53:18 +02:00
Sebastian
f55b51d47f update 2024-09-06 16:45:50 +02:00
Sebastian
42cab51487 arbitrum sepolia archive 2024-09-05 12:05:22 +02:00
Sebastian
1bcd168590 stay updated 2024-09-05 05:21:15 +02:00
Sebastian
a1e8474193 downgrade 2024-09-05 05:19:48 +02:00
Sebastian
c1daf236dc update 2024-09-05 05:16:14 +02:00
Sebastian
0df30b5087 fix 2024-09-04 17:45:14 +02:00
Sebastian
7ccb694f79 init 2024-09-04 16:26:08 +02:00
Sebastian
54050b4825 update and enable debug 2024-09-04 16:14:09 +02:00
Sebastian
4f07ef0291 enabe debug 2024-09-04 16:12:54 +02:00
Sebastian
e2f9c6b340 anable debug and admin api 2024-09-04 16:10:06 +02:00
Sebastian
c65d2653d1 update 2024-09-04 10:12:07 +02:00
Sebastian
b879e23407 another try 2024-09-03 19:51:11 +02:00
Sebastian
2ab53a937a fix 2024-09-03 15:21:50 +02:00
Sebastian
31ca3b5482 fix 2024-09-03 15:15:03 +02:00
Sebastian
86347fd849 update 2024-09-03 11:36:49 +02:00
Sebastian
90eca50f2b fix 2024-09-03 10:56:39 +02:00
Sebastian
2f2794905e more peers 2024-09-02 05:56:53 +02:00
Sebastian
ca90b62324 everclear is still connext 2024-08-30 10:49:59 +02:00
Sebastian
ddb37f5b54 update 2024-08-28 13:33:04 +02:00
Sebastian
f82245a946 update 2024-08-28 12:18:27 +02:00
Sebastian
3b50b7523e update 2024-08-28 12:12:01 +02:00
Sebastian
ff843759db update 2024-08-28 12:09:30 +02:00
Sebastian
137e776118 update 2024-08-28 12:08:50 +02:00
Sebastian
aa81a19a04 update 2024-08-28 12:07:30 +02:00
Sebastian
e5e16f0dcc update 2024-08-28 12:05:23 +02:00
Sebastian
1ac35d7eeb revert path change 2024-08-28 12:04:55 +02:00
Sebastian
eca977ca2d update 2024-08-28 12:03:45 +02:00
Sebastian
69d097f488 update 2024-08-28 11:59:50 +02:00
Sebastian
739a19f63c update 2024-08-28 11:55:04 +02:00
Sebastian
fdab516df6 update 2024-08-28 11:49:29 +02:00
Sebastian
6559dfec99 update 2024-08-28 11:46:01 +02:00
Sebastian
ba798c659a update 2024-08-28 11:45:09 +02:00
Sebastian
99c19a64c5 update 2024-08-28 11:44:38 +02:00
Sebastian
43f5856b2e update 2024-08-28 11:42:47 +02:00
Sebastian
c575bd727a update 2024-08-28 11:40:47 +02:00
Sebastian
af89b63ef0 update 2024-08-28 10:52:46 +02:00
Sebastian
c6a90a3a43 smart log script 2024-08-28 10:40:46 +02:00
Sebastian
3af587dd70 fix 2024-08-27 03:22:21 +02:00
Sebastian
e1f8c9b4a3 update 2024-08-23 12:01:04 +02:00
Sebastian
5da8fb507d make it executable 2024-08-23 11:23:19 +02:00
Sebastian
87ea6162e8 add a tool 2024-08-23 11:21:42 +02:00
Sebastian
427def01bf rename connex 2024-08-22 00:45:58 +02:00
Sebastian
8e541f55d5 add method 2024-08-22 00:36:58 +02:00
Sebastian
666e70cc66 add method 2024-08-22 00:32:49 +02:00
Sebastian
7dafb9e4e2 update 2024-08-21 18:53:40 +02:00
Sebastian
71a205d8bb update 2024-08-18 10:22:18 +02:00
Sebastian
025bf91980 fix 2024-08-17 17:24:12 +02:00
Sebastian
195bc67947 update 2024-08-17 17:14:41 +02:00
Sebastian
9b600ee651 update 2024-08-17 16:31:32 +02:00
Sebastian
a8f2ae8d56 doesn twork 2024-08-17 16:12:02 +02:00
Sebastian
61aeb6380d restart on error 2024-08-17 15:43:46 +02:00
Sebastian
9d2d6304f9 restart on failure 2024-08-17 15:37:46 +02:00
Sebastian
0b2633add8 fix 2024-08-17 12:07:34 +02:00
Sebastian
4cceb4a9dd update 2024-08-17 11:54:46 +02:00
Sebastian
9f758da034 fix 2024-08-17 11:27:43 +02:00
Sebastian
f51a2d742d support for bob 2024-08-17 11:01:18 +02:00
Sebastian
f4cd56a842 fix teh port 2024-08-16 13:04:32 +02:00
Sebastian
d9256870a3 do it 2024-08-16 11:56:50 +02:00
Sebastian
64ac80246f try to enable the debug api 2024-08-16 11:52:30 +02:00
Sebastian
1c765c4fec get ronin rolling 2024-08-16 11:45:25 +02:00
Sebastian
e2d2915dc6 add ronin 2024-08-16 11:26:05 +02:00
Sebastian
55808d71cf fix 2024-08-16 07:54:35 +02:00
Sebastian
0ce8327aec enable zircuit methods 2024-08-16 07:44:57 +02:00
Sebastian
f48b86aff6 update 2024-08-16 07:11:10 +02:00
Sebastian
60f54ac1a2 fight the edge case 2024-08-15 02:37:25 +02:00
Sebastian
1d9b382656 update 2024-08-14 14:24:41 +02:00
Sebastian
c4367ebf83 update 2024-08-14 14:13:38 +02:00
Sebastian
c6478c42f4 add static sequencer 2024-08-14 13:48:40 +02:00
Sebastian
a56168a9f4 add archiver 2024-08-14 13:18:52 +02:00
Sebastian
bd77c974d4 downgrade 2024-08-14 12:55:42 +02:00
Sebastian
0ccd9bbb10 add static sequencer 2024-08-14 12:41:36 +02:00
Sebastian
e91e24e915 add granite time 2024-08-14 12:39:17 +02:00
Sebastian
06e8e94b81 add a reference 2024-08-14 08:23:11 +02:00
Sebastian
2857257cdf update 2024-08-14 03:59:01 +02:00
Sebastian
eed2aebc9f update 2024-08-13 15:13:47 +02:00
Sebastian
b3c7040e85 update 2024-08-13 15:11:42 +02:00
Sebastian
27aac1a258 update 2024-08-13 13:25:10 +02:00
Sebastian
384f60408a update 2024-08-13 13:04:32 +02:00
Sebastian
bdffbd3052 update 2024-08-13 12:30:27 +02:00
Sebastian
9abd904bde update 2024-08-12 17:18:49 +02:00
Sebastian
48f1ecf7fc update 2024-08-12 16:46:41 +02:00
Sebastian
5ee15f3367 update 2024-08-12 08:00:07 +02:00
Sebastian
5598734467 update 2024-08-11 16:32:41 +02:00
Sebastian
622e04314a update 2024-08-11 16:30:57 +02:00
Sebastian
8751079128 update 2024-08-11 16:27:47 +02:00
Sebastian
5d76b86d0c restart on failure 2024-08-05 17:21:19 +02:00
Sebastian
f553c9964d fix 2024-08-01 10:23:12 +02:00
Sebastian
62c652ca23 change path 2024-08-01 10:11:01 +02:00
Sebastian
d88835bce4 fix 2024-08-01 10:09:38 +02:00
Sebastian
41e92b4d70 reduce gas price minimum 2024-07-31 14:54:59 +02:00
Sebastian
a4e4fb6157 support alephzero mainnet 2024-07-31 13:03:33 +02:00
Sebastian
0b54f3568d init 2024-07-30 13:39:57 +02:00
Sebastian
0c22a58ddd change path 2024-07-30 12:49:43 +02:00
Sebastian
07cbefa8a4 change the path 2024-07-30 12:48:21 +02:00
Sebastian
f5c771a36c fix 2024-07-30 08:59:21 +02:00
Sebastian
c02fae6be8 create a fullnode 2024-07-30 03:58:34 +02:00
Sebastian
494fcf53b5 fix ws 2024-07-30 03:53:11 +02:00
Sebastian
c706027a21 Dfix 2024-07-30 03:49:45 +02:00
Sebastian
d9ff90cb7a fix 2024-07-30 03:34:32 +02:00
Sebastian
47922feefb fixc 2024-07-30 03:33:17 +02:00
Sebastian
d0fedcd243 initial support for bobabnb 2024-07-30 03:15:41 +02:00
Sebastian
669b6107f9 update polygon for new eip 2024-07-30 02:54:57 +02:00
Sebastian
9f5f8e8cca fix 2024-07-30 02:48:07 +02:00
Sebastian
b89aa58260 fix 2024-07-30 02:42:44 +02:00
Sebastian
48f3e109d8 fix 2024-07-30 02:42:06 +02:00
Sebastian
e6bf392f25 make it executable 2024-07-30 02:36:05 +02:00
Sebastian
662c8308ad make a maintenance mechanism 2024-07-30 02:35:37 +02:00
Sebastian
4a209279be fix 2024-07-28 14:20:36 +02:00
Sebastian
cf97a3ebf7 fix 2024-07-28 14:17:49 +02:00
Sebastian
852c53bd49 fix 2024-07-28 14:16:51 +02:00
Sebastian
9f34021ba5 fix 2024-07-28 14:08:40 +02:00
Sebastian
cdc3057520 remove ecotone 2024-07-27 05:28:02 +02:00
Sebastian
6756b4b24b do ecotone 2024-07-27 05:27:07 +02:00
Sebastian
cab02417d8 fix 2024-07-27 03:29:07 +02:00
Sebastian
3c56a30c98 fix 2024-07-27 03:27:49 +02:00
Sebastian
5f46dcc097 fix 2024-07-27 03:26:19 +02:00
Sebastian
7c47667d19 fix 2024-07-27 03:24:17 +02:00
Sebastian
6693e07e10 add bobda 2024-07-27 03:21:35 +02:00
Sebastian
0d6453086d fix 2024-07-27 03:16:32 +02:00
Sebastian
e30a1e8795 fix 2024-07-27 03:01:20 +02:00
Sebastian
5dabc44699 add alfajores 2024-07-27 02:50:08 +02:00
Sebastian
4447e49f0c support alfajores 2024-07-27 02:47:18 +02:00
Sebastian
ff4d45482d update 2024-07-26 08:21:55 +02:00
Sebastian
e7803b1d68 update 2024-07-26 08:19:12 +02:00
Sebastian
bb995ccb1a update 2024-07-26 08:16:49 +02:00
Sebastian
f97e28f939 update 2024-07-25 05:03:06 +02:00
Sebastian
3821af347d update 2024-07-25 05:02:17 +02:00
Sebastian
25c16b2c81 update 2024-07-23 12:47:00 +02:00
Sebastian
f546981377 update 2024-07-21 05:35:55 +02:00
Sebastian
7d35dba31f update 2024-07-21 05:34:53 +02:00
Sebastian
5ee332aba5 fix 2024-07-21 03:06:58 +02:00
Sebastian
80a8c1f0a5 chiado 2024-07-21 02:56:42 +02:00
Sebastian
939300f84a fix 2024-07-21 01:47:54 +02:00
Sebastian
cdd85770c5 overrides 2024-07-20 06:21:03 +02:00
Sebastian
4db32f4dff fix 2024-07-20 06:04:47 +02:00
Sebastian
ab3b0631c0 try to rewind 2024-07-20 06:01:33 +02:00
Sebastian
bbcbc7c599 unify base with op 2024-07-18 14:37:04 +02:00
Sebastian
6f02c0453d update 2024-07-18 14:26:40 +02:00
Sebastian
4eb09c6662 update 2024-07-18 14:24:25 +02:00
Sebastian
b0af5c86e3 fix 2024-07-14 11:28:56 +02:00
Sebastian
dc05b1b571 test if config gets applied 2024-07-14 10:42:37 +02:00
Sebastian
60f97b4807 update 2024-07-14 10:31:54 +02:00
Sebastian
0a46467bb8 updates 2024-07-14 03:16:19 +02:00
Sebastian
20c4e8f003 p2p 2024-07-12 18:15:47 +02:00
Sebastian
8c02e70f79 p2p 2024-07-12 18:12:30 +02:00
Sebastian
08a292f3ff fix 2024-07-12 15:22:59 +02:00
Sebastian
965cd0c9f8 fix 2024-07-12 11:48:18 +02:00
Sebastian
fa020e88a5 quicksearch for compare blocks 2024-07-12 11:34:46 +02:00
Sebastian
f71e45b8e5 fix 2024-07-12 11:30:00 +02:00
Sebastian
dfa1e8238c tool 2024-07-12 11:24:10 +02:00
Sebastian
83d54888d8 update 2024-07-12 11:22:08 +02:00
Sebastian
5809ac3670 change identifier 2024-07-12 10:58:23 +02:00
Sebastian
ba3dde63dd fjord 2024-07-12 00:40:40 +02:00
Sebastian
fcccb06b7c update 2024-07-12 00:37:47 +02:00
Sebastian
a1ceec81dc fjord 2024-07-12 00:29:56 +02:00
Sebastian
bfba840f98 update fjord override 2024-07-12 00:21:20 +02:00
Sebastian
7a1563915d comment 2024-07-12 00:19:03 +02:00
Sebastian
ab7f9b9cec fix 2024-07-12 00:02:40 +02:00
Sebastian
270ff15621 fix 2024-07-12 00:00:01 +02:00
Sebastian
ef05004508 fix 2024-07-11 23:59:15 +02:00
Sebastian
57d8d0d582 fix 2024-07-11 23:57:09 +02:00
Sebastian
69a92cab71 add debug 2024-07-11 23:56:12 +02:00
Sebastian
37de16d3ce make a debug build and add FJORD 2024-07-11 23:51:56 +02:00
Sebastian
d3cc2de14d downgrade 2024-07-09 12:41:10 +02:00
Sebastian
2a133511da downgrade 2024-07-09 12:25:51 +02:00
Sebastian
7daef1d46d downgrade 2024-07-09 12:25:10 +02:00
Sebastian
c1edf978d0 update arbitrum L3 2024-07-09 12:06:54 +02:00
Sebastian
773b04eb46 update 2024-07-09 11:53:22 +02:00
Sebastian
6538716288 update 2024-07-09 11:52:11 +02:00
Sebastian
db60b243e5 update 2024-07-09 11:51:11 +02:00
Sebastian
1757657d84 update 2024-07-09 11:47:29 +02:00
Sebastian
915a8e58c4 fix 2024-07-08 13:14:26 +02:00
Sebastian
dd60cd3970 downgrade 2024-07-08 13:05:30 +02:00
Sebastian
c86e0adcf3 beacon 2024-07-08 12:57:11 +02:00
Sebastian
fb0edb9186 fix 2024-07-08 12:46:32 +02:00
Sebastian
42297439a5 open the gates 2024-07-08 12:25:15 +02:00
Sebastian
dcafde3a1c update 2024-07-08 12:21:11 +02:00
Sebastian
49b4c82480 fix 2024-07-08 12:19:28 +02:00
Sebastian
0b8ea35893 add zircuit without rpcs 2024-07-08 12:13:04 +02:00
Sebastian
c057ea791a fix 2024-07-08 12:03:25 +02:00
Sebastian
a66a66490b fix 2024-07-08 11:59:28 +02:00
Sebastian
4395ed71ff fix 2024-07-08 11:59:17 +02:00
Sebastian
7731f31eb0 fix 2024-07-08 11:54:26 +02:00
Sebastian
5496e4a1e5 fix 2024-07-08 11:52:16 +02:00
Sebastian
62f35c5a56 fix 2024-07-08 11:40:13 +02:00
Sebastian
9b23529483 fix? 2024-07-08 11:35:54 +02:00
Sebastian
c1ec1471d4 initial 2024-07-08 11:32:29 +02:00
Sebastian
25e4470c7b update 2024-06-29 09:04:59 +02:00
Sebastian
a0f7896228 update 2024-06-29 09:00:49 +02:00
Sebastian
b1f2f9b250 update 2024-06-29 09:00:33 +02:00
Sebastian
53402c490b arbitrum update 2024-06-29 08:59:19 +02:00
Sebastian
b8f2bfc238 update 2024-06-29 08:49:36 +02:00
Sebastian
e18e6f5fcd update 2024-06-29 08:45:18 +02:00
Sebastian
a3bd1b0b2e update 2024-06-29 08:42:45 +02:00
Sebastian
9cb5d9f95a update 2024-06-29 08:38:47 +02:00
Sebastian
322c62152d update 2024-06-29 08:34:07 +02:00
Sebastian
32de12c179 nimbus > lodestart 2024-06-26 17:58:31 +02:00
Sebastian
34a0ba0647 nimbus > lodestart 2024-06-26 17:57:09 +02:00
Sebastian
6743b05ab3 ports 2024-06-26 17:45:16 +02:00
Sebastian
684152643b addchiado 2024-06-26 17:42:15 +02:00
Sebastian
b00a13498c update and chiado 2024-06-26 17:37:30 +02:00
Sebastian
4d5d488a88 extra gw whitelist 2024-06-26 09:51:45 +02:00
Sebastian
72f5d52852 extra gw whitelist 2024-06-26 09:45:27 +02:00
Sebastian
124b95b5a4 extra gw whitelist 2024-06-26 09:41:31 +02:00
Sebastian
d34580f230 add proxy to dshackle 2024-06-26 09:33:48 +02:00
Sebastian
eb1df3ecaf disable peer validation 2024-06-20 13:05:34 +02:00
Sebastian
3c57c18293 fix ip 2024-06-18 08:54:39 +02:00
Sebastian
1d5e13bc2b fix 2024-06-18 08:52:34 +02:00
Sebastian
c8eac8eb17 update 2024-06-18 08:50:37 +02:00
Sebastian
92d702e050 update 2024-06-18 04:32:35 +02:00
Sebastian
3e2699dd4e add opencampuscodex 2024-06-17 10:09:38 +02:00
Sebastian
d76f1ff61f update version 2024-06-17 09:58:51 +02:00
Sebastian
a34419f14e move it to srb sepolia 2024-06-17 09:53:53 +02:00
Sebastian
33da00c46d initial suport 2024-06-17 09:39:17 +02:00
Sebastian
c2c2094e36 change port 2024-06-13 06:52:23 +02:00
Sebastian
e33706abf5 update 2024-06-13 06:15:54 +02:00
Sebastian
c101c360a2 update 2024-06-13 06:15:43 +02:00
Sebastian
1357e85f6d update 2024-06-13 06:13:52 +02:00
Sebastian
a0b7159ebe update 2024-06-13 05:56:14 +02:00
Sebastian
de59e7c233 add scroll sepolia 2024-06-13 05:55:59 +02:00
Sebastian
0034c942a8 fix 2024-06-13 05:14:52 +02:00
Sebastian
30bd1befd4 fix ws 2024-06-13 05:07:31 +02:00
Sebastian
71e8e8987c update 2024-06-12 18:02:23 +02:00
Sebastian
e4f5be87e4 update 2024-06-12 17:56:15 +02:00
Sebastian
ae1728ff9d fix 2024-06-11 07:57:55 +02:00
Sebastian
bd32edc758 update 2024-06-11 07:57:34 +02:00
Sebastian
f27e6827c7 downgrade 2024-06-11 07:49:18 +02:00
Sebastian
b7e5c61ca4 update 2024-06-11 07:41:22 +02:00
Sebastian
6896c3a907 fix for lisk 2024-06-11 06:29:11 +02:00
Sebastian
2b31c4c495 fix 2024-06-11 06:25:49 +02:00
Sebastian
3dbe874a97 add core chain 2024-06-11 06:24:53 +02:00
Sebastian
c77d05968c update 2024-06-09 08:12:08 +02:00
Sebastian
15c06809ed more peers 2024-06-08 07:13:00 +02:00
Sebastian
ff5ee8934b update 2024-06-08 06:54:24 +02:00
Sebastian
0b180f9472 make zora archive 2024-06-08 06:06:00 +02:00
Sebastian
062213a0b1 update 2024-06-06 09:29:37 +02:00
Sebastian
78b1874cc3 update 2024-06-06 09:28:44 +02:00
Sebastian
6afb72c47d update 2024-06-06 09:28:01 +02:00
Sebastian
d7fbdfb28e fix 2024-06-06 08:31:38 +02:00
Sebastian
de11c1c953 fix? 2024-06-06 08:27:31 +02:00
Sebastian
af92f58b74 update 2024-06-06 08:14:42 +02:00
Sebastian
fdc8191914 add the net 2024-06-06 08:05:29 +02:00
Sebastian
4fb0044224 hi blast-sepolia 2024-06-06 08:01:19 +02:00
Sebastian
3f8281e3c0 fix 2024-06-06 07:44:59 +02:00
Sebastian
eab6ef2a9b change ports 2024-06-06 07:43:03 +02:00
Sebastian
04ef2d887f fix 2024-06-06 07:41:45 +02:00
Sebastian
21dcb803fe fix 2024-06-06 07:39:30 +02:00
Sebastian
59b4a85412 fix 2024-06-06 07:38:01 +02:00
Sebastian
b199af7242 hi taiko hekla 2024-06-06 07:32:41 +02:00
Sebastian
8081e7215e auth port 2024-06-06 07:02:58 +02:00
Sebastian
32745d9f18 make it executable 2024-06-06 07:01:56 +02:00
Sebastian
a013a92b55 hi taiko 2024-06-06 06:59:57 +02:00
Sebastian
0535bc8541 update 2024-06-04 13:09:00 +02:00
Sebastian
22b06275a4 fix 2024-06-04 10:54:38 +02:00
Sebastian
507196e185 fix 2024-06-04 10:41:45 +02:00
Sebastian
5f06ad9453 updates 2024-06-04 10:28:30 +02:00
Sebastian
f3206e3eaa fix 2024-06-04 10:21:36 +02:00
Sebastian
c9c6c6f5ac fix 2024-06-04 10:18:59 +02:00
Sebastian
216b64136f add haqq 2024-06-04 10:15:59 +02:00
Sebastian
36346d9e6e add metis 2024-06-04 07:28:34 +02:00
Sebastian
9f4472491d fix 2024-06-04 07:24:47 +02:00
Sebastian
bf193f0918 try to sync metis again 2024-06-04 07:22:29 +02:00
Sebastian
ad9ce66bcb add beacon archiver 2024-06-04 05:35:23 +02:00
Sebastian
3736563397 update ford override 2024-06-04 05:20:01 +02:00
Sebastian
17703cf0d4 meh 2024-06-04 05:07:17 +02:00
Sebastian
99f89aa93f fix 2024-06-03 11:58:23 +02:00
Sebastian
52aae5b4eb let the admins in 2024-06-03 11:51:04 +02:00
Sebastian
e9d56711d6 let ws subscribe to heads 2024-06-03 11:47:41 +02:00
Sebastian
4d48c17e3f fix traefik 2024-06-03 11:39:47 +02:00
Sebastian
729a09d780 update 2024-06-03 10:56:18 +02:00
Sebastian
6d6ffeeae8 meh 2024-06-03 10:34:34 +02:00
Sebastian
5df69f4d5e meh 2024-06-03 10:30:14 +02:00
Sebastian
32481783ab meh 2024-06-03 10:28:41 +02:00
Sebastian
0a0fe40e57 meh 2024-06-03 10:27:43 +02:00
Sebastian
0dbf1cf5c7 meh 2024-06-03 10:26:55 +02:00
Sebastian
2abfe53d42 change L1 2024-06-03 09:55:11 +02:00
Sebastian
2183d7c593 add bease sepolia 2024-06-03 09:50:20 +02:00
Sebastian
4856d70345 archiver 2024-06-03 08:19:05 +02:00
Sebastian
80c9a9c5f2 update and archive 2024-06-03 08:07:57 +02:00
Sebastian
ebdfeda20a open websockets 2024-06-03 07:58:00 +02:00
Sebastian
bd8e0db743 disable p2p 2024-06-03 07:46:54 +02:00
Sebastian
8608417cf5 update 2024-06-03 07:45:10 +02:00
Sebastian
ec57f36345 update 2024-06-03 07:42:44 +02:00
Sebastian
0afa8b872d update 2024-06-03 07:19:33 +02:00
Sebastian
87671e1ebb do not cripple so hard 2024-06-03 07:09:30 +02:00
Sebastian
00d92ca50b update 2024-06-03 07:01:31 +02:00
Sebastian
c86a06bc52 permissions 2024-06-02 17:31:25 +02:00
Sebastian
b0da99c5dd permissions 2024-06-02 17:19:03 +02:00
Sebastian
676c58fd9a revert 2024-06-02 12:24:22 +02:00
Sebastian
62a3925551 change default sync mode 2024-06-02 12:21:16 +02:00
Sebastian
20d70ee317 fix 2024-06-02 12:12:51 +02:00
Sebastian
960a361347 change port 2024-06-02 12:11:39 +02:00
Sebastian
d4e1b2ec20 change port 2024-06-02 12:02:34 +02:00
Sebastian
46f15f6755 change port 2024-06-02 11:57:23 +02:00
Sebastian
b6ae195cb7 change the maxpeer 2024-06-02 11:15:10 +02:00
Sebastian
c22973f322 add optimism-mainnet 2024-06-02 10:26:58 +02:00
Sebastian
a7d00f119b change port 2024-06-02 09:53:26 +02:00
Sebastian
a1b841a94d add optimism pbss 2024-06-02 09:50:20 +02:00
Sebastian
667bf4b036 make sepolia proud again 2024-06-01 06:30:58 +02:00
Sebastian
1986e29f15 set 1 gwei gas price 2024-06-01 06:19:53 +02:00
Sebastian
33fd941711 update 2024-05-31 14:59:02 +02:00
Sebastian
1813f500c1 do not do p2p netyworking 2024-05-31 07:41:41 +02:00
Sebastian
020c83dfc9 do not do p2p netyworking 2024-05-31 07:40:47 +02:00
Sebastian
6a52d26462 update 2024-05-31 07:36:04 +02:00
Sebastian
17566e8c7f update 2024-05-30 18:15:48 +02:00
Sebastian
856585d16e add admin api 2024-05-29 18:23:01 +02:00
Sebastian
2e96e51102 enable the admin api 2024-05-29 08:48:55 +02:00
Sebastian
31f2a0f954 downgrade and beacon api 2024-05-29 08:44:40 +02:00
Sebastian
050505d1d4 fix 2024-05-29 08:38:06 +02:00
Sebastian
c975435c72 fix 2024-05-29 08:37:15 +02:00
Sebastian
12e2ab46aa disbale networking 2024-05-29 04:06:24 +02:00
Sebastian
314a8148c3 downgrade 2024-05-28 11:56:25 +02:00
Sebastian
3e4ce0354a add the new geth 2024-05-28 11:53:35 +02:00
Sebastian
5771bbef02 initial commit 2024-05-28 11:52:47 +02:00
Sebastian
217c89f340 fix the limit 2024-05-28 07:33:39 +02:00
Sebastian
f3a95fbf7d better compat 2024-05-28 07:33:18 +02:00
Sebastian
afb3d9716c fix the ip variable 2024-05-28 05:31:37 +02:00
Sebastian
8cad973636 update 2024-05-27 17:12:56 +02:00
Sebastian
41060f4d72 fix 2024-05-27 12:02:47 +02:00
Sebastian
fd48ccb46c update 2024-05-27 11:56:37 +02:00
Sebastian
624812ee3e provide the ip 2024-05-27 10:46:05 +02:00
Sebastian
43a06887b7 update beacon checkpoint url 2024-05-27 10:32:26 +02:00
Sebastian
6bb50e5a3f add erigon 2024-05-27 10:24:00 +02:00
Sebastian
6181be12c0 add slowdir to reth 2024-05-27 10:19:20 +02:00
Sebastian
be9798262a open the admin api 2024-05-27 07:23:47 +02:00
Sebastian
d5d3efb549 fix the host port 2024-05-25 15:12:33 +02:00
Sebastian
a490423b3f use archiver on top of beacon rest api 2024-05-25 13:44:48 +02:00
Sebastian
7d7ce9549e update op stack 2024-05-25 13:35:39 +02:00
Sebastian
eab8289c77 update op stack 2024-05-25 13:34:39 +02:00
Sebastian
9e90373317 give geth linea a ip 2024-05-25 08:09:06 +02:00
Sebastian
6e896b53ff update bootnodes 2024-05-25 08:03:59 +02:00
Sebastian
46b855b838 fix the provider label 2024-05-24 10:06:20 +02:00
Sebastian
f083fa2494 remove the whitelist 2024-05-24 10:04:41 +02:00
Sebastian
b4d46b5669 add the erigon beacon api to dshackle yaj 2024-05-24 10:03:44 +02:00
Sebastian
852c77088d fix beacon api 2024-05-23 17:29:55 +02:00
Sebastian
4448cac071 add the gascap config for nova 2024-05-22 17:54:27 +02:00
Sebastian
8d47554a47 add a mainnet erigon to download historical blobs and have a consensus archive node 2024-05-21 10:31:05 +02:00
Sebastian
e3aceb3c4c make a holeksy archive with consensus 2024-05-21 10:20:18 +02:00
Sebastian
1d2c6adde6 ficx 2024-05-21 06:55:02 +02:00
Sebastian
0232b5124c disable some methods that do not seem to perform well 2024-05-21 06:38:04 +02:00
Sebastian
c3825e5cbc convenience command 2024-05-21 06:06:53 +02:00
Sebastian
b34d70dae9 switch out the backup http server to something that supports aria2 2024-05-21 05:18:32 +02:00
Sebastian
4090708b38 fix 2024-05-20 15:26:21 +02:00
Sebastian
f6a1ad4185 fix 2024-05-20 15:25:27 +02:00
Sebastian
4a006efd6e gascap 2024-05-20 14:58:39 +02:00
Sebastian
44df9e433d make it possible to move the ancient data to a separate disk 2024-05-20 07:44:51 +02:00
Sebastian
bd0ae44938 fix 2024-05-19 08:32:34 +02:00
Sebastian
083a636247 fix 2024-05-19 08:30:43 +02:00
Sebastian
1827e25bb9 make arbitrum sepolia pruneable 2024-05-19 08:24:45 +02:00
Sebastian
6192204ba3 each node needs separate tmp folder 2024-05-19 07:48:05 +02:00
Sebastian
5f9a84ba40 make a tmp folder for downloading the chain data backup to make sure that arbitrum container doesn't grow to 3 TB on initialisation 2024-05-19 07:25:12 +02:00
Sebastian
9b06ce40b6 fix tab 2024-05-18 14:52:47 +02:00
Sebastian
de4d0d8fbe remove tab 2024-05-17 14:09:43 +02:00
Sebastian
eec6153ede add mode 2024-05-17 12:17:20 +02:00
Sebastian
5cd9eeb651 update 2024-05-17 11:41:23 +02:00
Sebastian
d28a327cf7 initial rootstock 2024-05-17 11:36:32 +02:00
Sebastian
8333da6004 update ports 2024-05-17 10:51:29 +02:00
Sebastian
fa8f120dae a mode node 2024-05-17 10:50:05 +02:00
Sebastian
905aead44e fix 2024-05-17 07:33:37 +02:00
Sebastian
c58177e230 fix 2024-05-17 07:29:55 +02:00
Sebastian
05f03ed192 do the thing 2024-05-17 07:16:25 +02:00
Sebastian
f792ee7af5 make besu the default for linea already 2024-05-17 07:05:50 +02:00
Sebastian
cb3fb44da1 initial besu linea 2024-05-17 07:03:53 +02:00
Sebastian
361237acca fix 2024-05-17 01:21:50 +02:00
Sebastian
778799ed7b fix 2024-05-17 01:21:09 +02:00
Sebastian
a09578be06 update 2024-05-17 01:13:10 +02:00
Sebastian
1545aab55a nn 2024-05-16 17:20:20 +02:00
Sebastian
86cb0d912b open ports 2024-05-16 12:46:22 +02:00
Sebastian
6593ddd9e3 fix it 2024-05-16 05:08:27 +02:00
Sebastian
c87832d1e3 backup httpd 2024-05-15 06:26:51 +02:00
Sebastian
0245930c89 update 2024-05-14 12:02:07 +02:00
Sebastian
7d876e8137 remove whitelist 2024-05-13 16:19:30 +02:00
Sebastian
a8255c218e fix 2024-05-13 15:19:42 +02:00
Sebastian
f79e76dd30 add lisk mainnet 2024-05-13 14:27:02 +02:00
Sebastian
f5e76ebab3 fix 2024-05-13 12:00:36 +02:00
Sebastian
8f36a32da6 update 2024-05-13 11:14:28 +02:00
Sebastian
11fbd6d797 fix? 2024-05-13 11:07:12 +02:00
Sebastian
e2999f4bbb fix 2024-05-13 10:13:46 +02:00
Sebastian
dce2f81a3b fix 2024-05-13 09:47:02 +02:00
Sebastian
4d24791dd1 make pruning versions of the compose file to make pruning possible without modifying the git repo 2024-05-13 09:23:06 +02:00
Sebastian
a8c286f820 make a pruning version of the avalanche config 2024-05-13 09:02:54 +02:00
Sebastian
29efdbd346 actually delete them also 2024-05-13 05:36:54 +02:00
Sebastian
23c21c9ec3 only leave the freshest backup of each volume 2024-05-13 05:35:36 +02:00
Sebastian
95587dbc4e do the bor pbss thing 2024-05-12 06:24:33 +02:00
Sebastian
e670df8768 init arb connect sepolia 2024-05-10 13:22:10 +02:00
Sebastian
b1bb37aaee beacon 2024-05-10 13:05:02 +02:00
Sebastian
8b4b080abe update 2024-05-10 13:01:36 +02:00
Sebastian
035fac71b2 init alephzero sepolia 2024-05-10 12:56:47 +02:00
Sebastian
8ae7e25e02 version update 2024-05-10 07:34:02 +02:00
Sebastian
7be3ca6a8e optimistically add overrides 2024-05-10 07:08:15 +02:00
Sebastian
dd37d4c409 extra gas limit for bsc in dshackle 2024-05-09 09:59:05 +02:00
Sebastian
630e27623f 600M bsc gas call limit 2024-05-09 09:51:45 +02:00
Sebastian
87305d6f23 update version 2024-05-09 04:50:36 +02:00
Sebastian
2d5d3db3f5 update bsc gas limit 2024-05-09 04:49:46 +02:00
Sebastian
f25661fed3 enable txpool 2024-05-08 14:02:54 +02:00
Sebastian
e9af2773ac things 2024-05-07 14:57:28 +02:00
Sebastian
aff7a47966 update plus txpool 2024-05-07 14:47:15 +02:00
Sebastian
68d190f6c9 fix 2024-05-06 14:52:12 +02:00
Sebastian
6a9e8f8db6 fix 2024-05-06 14:44:15 +02:00
Sebastian
c68a913b15 fix 2024-05-06 14:38:49 +02:00
Sebastian
62c374ec72 fix 2024-05-06 14:35:56 +02:00
Sebastian
0f23f7c0cd ficx 2024-05-06 14:27:13 +02:00
Sebastian
de360ef9c2 let's try 2024-05-06 14:24:53 +02:00
Sebastian
f2149b1d28 update gascap 2024-05-03 15:42:20 +02:00
Sebastian
4a357f3685 download old bodies first as that seem to be implied by old receipts 2024-04-28 15:53:43 +02:00
Sebastian
399381ddab add receipts 2024-04-28 15:44:33 +02:00
Sebastian
6016184417 update 2024-04-26 14:16:56 +02:00
Sebastian
f38b0ed2f8 no bodie no receipts please 2024-04-26 12:36:03 +02:00
Sebastian
479fbf499b no bodie no receipts please 2024-04-26 12:33:32 +02:00
Sebastian
cc54e2f21e fix? 2024-04-26 11:07:33 +02:00
Sebastian
0aa7c955c4 fix? 2024-04-26 11:05:23 +02:00
Sebastian
2125053663 fixes? 2024-04-26 11:02:11 +02:00
Sebastian
0a0e30cf61 update 2024-04-26 10:54:52 +02:00
Sebastian
01e805b484 update 2024-04-26 03:33:35 +02:00
Sebastian
157da49e8c jo 2024-04-24 11:11:28 +02:00
Sebastian
6187f57b41 jo 2024-04-24 11:09:28 +02:00
Sebastian
f249198169 add besu to mainnet 2024-04-24 07:12:23 +02:00
Sebastian
99bb5f9de5 make a maximum pruned nethermind 2024-04-24 07:07:08 +02:00
Sebastian
f1ee7c2583 update lighthouse 2024-04-24 06:36:54 +02:00
Sebastian
008c729f42 change beacon sync endpoint 2024-04-24 06:33:32 +02:00
Sebastian
fa0cfa8ce1 update besu 2024-04-24 05:59:30 +02:00
Sebastian
c58e094ff7 fix 2024-04-20 05:40:31 +02:00
Sebastian
0e7817184e nodiscover 2024-04-20 05:37:42 +02:00
Sebastian
e923ceb384 update env variables 2024-04-20 05:35:33 +02:00
Sebastian
4aaebe16d0 fix rollup.json 2024-04-20 05:14:56 +02:00
Sebastian
de22d5a38a teh agent 2024-04-20 05:12:19 +02:00
Sebastian
bd67e2fefe teh agent 2024-04-20 05:08:57 +02:00
Sebastian
01296611cc downgrade 2024-04-20 04:53:30 +02:00
Sebastian
d90a39593e do it 2024-04-20 04:30:53 +02:00
Sebastian
f11615832d make zora great again 2024-04-20 04:19:50 +02:00
Sebastian
7731aad4f1 a few scripts 2024-04-19 09:42:08 +02:00
Sebastian
45f2900250 update 2024-04-18 10:01:41 +02:00
Sebastian
bb41d17047 add the public ip 2024-04-17 09:37:48 +02:00
Sebastian
fc980407c2 update 2024-04-17 08:45:56 +02:00
Sebastian
03cd9ae1d0 update 2024-04-17 08:43:18 +02:00
Sebastian
da3526a50b update 2024-04-17 08:41:00 +02:00
Sebastian
e1bca8337f update 2024-04-17 08:37:51 +02:00
Sebastian
89e99a9319 better calldata limit 2024-04-13 11:54:23 +02:00
Sebastian
f7cb92e1ec update 2024-04-10 15:40:35 +02:00
Sebastian
90e66b78e6 new compose command 2024-04-10 15:38:36 +02:00
Sebastian
3def8cc77b update 2024-04-10 15:28:30 +02:00
Sebastian
34878cf527 fix 2024-04-10 14:26:43 +02:00
Sebastian
dacd6f2d65 identation 2024-04-09 16:47:38 +02:00
Sebastian
5848581890 returndata limit 2024-04-09 16:44:56 +02:00
Sebastian
543d499f2e update 2024-04-09 11:39:38 +02:00
Sebastian
e25b8a57c0 make scroll fullnode 2024-04-08 13:28:19 +02:00
Sebastian
dd1db5ef13 enabel debug and filter on default 2024-04-07 13:30:28 +02:00
Sebastian
7cba27f732 enabel debug and filter on default 2024-04-07 13:25:24 +02:00
Sebastian
4c752f6157 give it more time 2024-04-06 16:53:26 +02:00
Sebastian
07b07b2208 change protocol to 68 2024-04-06 16:20:57 +02:00
Sebastian
d455725f74 let it come up first 2024-04-06 15:49:53 +02:00
Sebastian
da991ae504 fix 2024-04-06 15:44:07 +02:00
Sebastian
c34b370ad5 try 2024-04-06 15:26:37 +02:00
Sebastian
3bc546f701 fix a variable 2024-04-05 15:08:49 +02:00
Sebastian
df992a70ea update 2024-04-05 11:10:04 +02:00
Sebastian
8afe4606fc add the ip address 2024-04-04 13:37:22 +02:00
Sebastian
e5cbff1b92 go figure 2024-04-04 07:16:37 +02:00
Sebastian
b1782a5541 websockets 2024-04-03 15:24:21 +02:00
Sebastian
9bcc185102 bind ws to the right port 2024-04-03 10:10:08 +02:00
Sebastian
2cecef5511 fix ports 2024-04-03 10:03:34 +02:00
Sebastian
508b4999af try fullnode 2024-04-03 09:18:07 +02:00
Sebastian
38db9c1750 fix 2024-04-01 15:37:03 +02:00
Sebastian
5a23f0bf21 add debug 2024-04-01 15:35:48 +02:00
Sebastian
68b6c69723 enable again 2024-04-01 15:06:58 +02:00
Sebastian
99bc757497 block them again 2024-04-01 14:59:08 +02:00
Sebastian
638234e97c update 2024-04-01 14:52:16 +02:00
Sebastian
53f4ee80e2 enable logs 2024-04-01 13:52:58 +02:00
Sebastian
faad14993b enable getlogs 2024-04-01 13:51:39 +02:00
Sebastian
c82464ac83 napoli 2024-04-01 13:45:32 +02:00
Sebastian
12e5776bcf napoli update 2024-04-01 13:43:15 +02:00
Sebastian
249b9b2b14 update 2024-03-29 15:36:54 +01:00
Sebastian
1dde92c74f init archive 2024-03-29 15:34:54 +01:00
Sebastian
c9fdf48c43 update origins 2024-03-29 15:20:48 +01:00
Sebastian
15a49f32c3 expose 2024-03-29 15:18:08 +01:00
Sebastian
e6fab97249 initial polygon 2024-03-29 14:49:20 +01:00
Sebastian
09b5c80f88 initial polygon 2024-03-29 14:48:13 +01:00
Sebastian
a01638ae01 fix 2024-03-29 14:00:31 +01:00
Sebastian
bda22f8328 fixc 2024-03-29 13:59:35 +01:00
Sebastian
f1759a000d start op-bnb fullnode 2024-03-29 05:01:34 +01:00
Sebastian
8b7808d61d beacon talks 2024-03-28 17:08:23 +01:00
Sebastian
2c276de976 needs l1 endpoint 2024-03-28 14:02:08 +01:00
Sebastian
4f130a3f52 add ip 2024-03-28 13:48:47 +01:00
Sebastian
b78256c025 add ip 2024-03-28 13:47:55 +01:00
Sebastian
7e10f4cf9e not defined. revert 2024-03-28 13:36:09 +01:00
Sebastian
e61541c4a9 more discovery 2024-03-28 13:34:14 +01:00
Sebastian
487899b8b6 update 2024-03-27 15:00:33 +01:00
Sebastian
235596c306 jwt path 2024-03-27 03:49:09 +01:00
Sebastian
3d5df1af3e updates 2024-03-27 03:42:01 +01:00
Sebastian
8736fe2844 fix 2024-03-27 03:34:52 +01:00
Sebastian
17d89995a4 add priv key 2024-03-27 03:21:45 +01:00
Sebastian
17fba3bb43 update 2024-03-27 01:30:14 +01:00
Sebastian
5024d5a1c5 fix 2024-03-27 01:03:59 +01:00
Sebastian
6d83f14045 websocket port 8545 2024-03-26 15:57:20 +01:00
Sebastian
9f54e37239 gnosis slowdisk 2024-03-26 15:42:03 +01:00
Sebastian
7c252a1d7c fix 2024-03-26 15:31:13 +01:00
Sebastian
6134250100 anable debug methods by default 2024-03-26 15:23:43 +01:00
Sebastian
4821fb094b init 2024-03-26 15:21:09 +01:00
Sebastian
364272f9ca get out of my way 2024-03-26 13:02:29 +01:00
Sebastian
c4db864982 get out of my way 2024-03-26 12:58:58 +01:00
Sebastian
4c7d911bf3 fix 2024-03-26 10:54:59 +01:00
Sebastian
b9e436c4a5 fix 2024-03-26 10:53:48 +01:00
Sebastian
65a1b21f91 add op-bnb 2024-03-26 10:48:03 +01:00
Sebastian
96fbdba5f5 new testnet 2024-03-26 10:12:51 +01:00
Sebastian
59e0bd8c6b snap me plz 2024-03-26 09:56:41 +01:00
Sebastian
4913a7eb83 more gracefeullness 2024-03-26 08:33:33 +01:00
Sebastian
4863317fab update 2024-03-26 08:31:47 +01:00
Sebastian
397d82493f snap sync 2024-03-26 08:06:49 +01:00
Sebastian
c2d1c0d148 fix 2024-03-25 15:33:20 +01:00
Sebastian
2f204507c2 update 2024-03-25 15:29:38 +01:00
Sebastian
9ac98eea0f update 2024-03-25 15:28:06 +01:00
Sebastian
f44aa5541f fix 2024-03-25 13:36:19 +01:00
Sebastian
a2566076c8 fixc 2024-03-25 13:23:56 +01:00
Sebastian
475fcb4c21 check the sync status before reporting a node as available to serve 2024-03-25 07:19:20 +01:00
Sebastian
0e34199386 make sure unfinished uploads don't qualify 2024-03-25 07:14:16 +01:00
Sebastian
43e4d02c35 make sure unfinished uploads don't qualify 2024-03-25 07:12:58 +01:00
Sebastian
f8e1f0ccf4 rreadable 2024-03-25 00:10:51 +01:00
Sebastian
bcae174cb0 fix 2024-03-25 00:03:22 +01:00
Sebastian
92d4e2e2f8 fix 2024-03-24 16:31:34 +01:00
Sebastian
abbb294c4a fix 2024-03-24 16:16:04 +01:00
Sebastian
16637d2b60 estimate available space 2024-03-24 15:54:06 +01:00
Sebastian
09001707be fix 2024-03-24 15:29:29 +01:00
Sebastian
1c20ed878a add one option 2024-03-24 15:10:48 +01:00
Sebastian
994f440672 bump 2024-03-24 15:09:31 +01:00
Sebastian
2b2c7572b2 delete what's not needed 2024-03-24 15:04:05 +01:00
Sebastian
9c209cfcc1 tb to gb 2024-03-24 15:01:26 +01:00
Sebastian
77d93454a2 fix 2024-03-24 09:45:09 +01:00
Sebastian
0dd2b510ac grace 2024-03-24 09:06:13 +01:00
Sebastian
1ed6b40d7c graceful 2024-03-24 08:43:55 +01:00
Sebastian
e3571d0444 amazing grace 2024-03-24 08:29:50 +01:00
Sebastian
253f5bebf9 jo 2024-03-24 08:07:25 +01:00
Sebastian
07a808f6ad fix 2024-03-24 08:00:40 +01:00
Sebastian
61c2025345 fix the list 2024-03-24 07:44:03 +01:00
Sebastian
2f4bc11408 find options 2024-03-24 07:42:33 +01:00
Sebastian
ff6f4c1e87 look at local volumes too 2024-03-24 07:09:20 +01:00
Sebastian
adccfb06fd version and grace 2024-03-24 06:51:06 +01:00
Sebastian
9d42c2defb fix the grace period 2024-03-24 06:44:08 +01:00
Sebastian
57549f3cc3 better 2024-03-24 05:31:46 +01:00
Sebastian
a97aa7238b update 2024-03-24 04:53:58 +01:00
Sebastian
fa09bd4748 more sophisticated 2024-03-24 04:51:34 +01:00
Sebastian
61fe5ecf87 tag the file names 2024-03-24 04:06:09 +01:00
Sebastian
cd76603996 sam same but different 2024-03-23 17:09:03 +01:00
Sebastian
26de04f61f sam same but different 2024-03-23 17:04:48 +01:00
Sebastian
7efe727024 sam same but different 2024-03-23 17:03:41 +01:00
Sebastian
51f4d22ab0 verbosity 2024-03-23 16:55:19 +01:00
Sebastian
b21228fedb log to deocker 2024-03-23 16:53:53 +01:00
Sebastian
de7affedf0 root 2024-03-23 16:53:03 +01:00
Sebastian
25eb746d45 root 2024-03-23 16:42:26 +01:00
Sebastian
c14e8a41ff fix 2024-03-23 16:40:32 +01:00
Sebastian
a41f223cb9 update 2024-03-23 16:33:49 +01:00
Sebastian
f749a9564b graceful 2024-03-23 16:29:48 +01:00
Sebastian
ce8167f0dd give geth a chance 2024-03-23 16:28:53 +01:00
Sebastian
fccedcc532 configurable backup 2024-03-23 13:01:27 +01:00
Sebastian
83fb9ddfb8 add bsc 2024-03-23 12:34:12 +01:00
Sebastian
1506401dc1 update 2024-03-23 12:18:18 +01:00
Sebastian
a9572b720c update 2024-03-23 12:14:55 +01:00
Sebastian
00052dfa44 update 2024-03-23 12:12:21 +01:00
Sebastian
125e9717eb be graceful 2024-03-23 07:13:46 +01:00
Sebastian
74f6aabb0e grace 2024-03-23 06:29:06 +01:00
Sebastian
8bcd9697bc grace 2024-03-23 06:12:37 +01:00
Sebastian
99eaf640f3 jo 2024-03-23 04:42:19 +01:00
Sebastian
27783acb28 fix 2024-03-22 14:16:22 +01:00
Sebastian
8d4ffaac96 added a prune 2024-03-22 06:24:56 +01:00
Sebastian
c703088370 plz backfill 2024-03-22 05:45:06 +01:00
Sebastian
00da516923 plz backfill 2024-03-22 05:43:16 +01:00
Sebastian
d644f7f02b extend the stop grace period 2024-03-22 05:18:00 +01:00
Sebastian
9858c98c78 fix 2024-03-22 05:04:30 +01:00
Sebastian
f3e6c9f991 fix 2024-03-22 05:01:03 +01:00
Sebastian
0217450f33 fix 2024-03-22 04:59:58 +01:00
Sebastian
a2bd7ca7ca fix 2024-03-22 04:59:08 +01:00
Sebastian
c26d2003c0 fix 2024-03-22 04:57:05 +01:00
Sebastian
bae06a4681 fix 2024-03-22 04:54:53 +01:00
Sebastian
366bb8e5e5 fiox 2024-03-22 04:52:23 +01:00
Sebastian
9f4ac2d8b2 fiox 2024-03-22 04:50:20 +01:00
Sebastian
d27693b1e5 shuffle around 2024-03-22 04:40:07 +01:00
Sebastian
8db5c30f8f mount the rollup josn to lisk 2024-03-22 04:02:11 +01:00
Sebastian
ed96d394dd renamed 2024-03-21 13:28:47 +01:00
Sebastian
81afd8274a jo 2024-03-21 13:24:48 +01:00
Sebastian
a6ceb5c758 fiddle backups 2024-03-21 09:09:28 +01:00
Sebastian
60d3d5681e initial commit 2024-03-21 08:32:36 +01:00
Sebastian
68d1ab5495 fix 2024-03-21 07:42:46 +01:00
Sebastian
24595bdb57 exec 2024-03-21 07:29:23 +01:00
Sebastian
27ebd2136a add a script 2024-03-21 07:28:49 +01:00
Sebastian
d4449d445e add teh information 2024-03-21 05:35:23 +01:00
Sebastian
ce0ca0234b update 2024-03-21 01:42:11 +01:00
Sebastian
2c38f6bbb2 fix 2024-03-21 01:40:19 +01:00
Sebastian
648c4d63c5 don't need that port 2024-03-21 01:16:57 +01:00
Sebastian
e6f6807110 fix 2024-03-21 01:14:34 +01:00
Sebastian
d8e572165d fix] 2024-03-21 00:10:20 +01:00
Sebastian
ecef80d8c2 updat echainlisty 2024-03-21 00:07:28 +01:00
Sebastian
962d3c788a update 2024-03-20 23:51:26 +01:00
Sebastian
8d0950cbb4 free endpoints 2024-03-20 23:27:38 +01:00
Sebastian
a65e3cc7ed update 2024-03-20 14:47:10 +01:00
Sebastian
dab409f649 be more fault tolerant 2024-03-20 14:02:27 +01:00
Sebastian
468718327d fix 2024-03-20 12:53:12 +01:00
Sebastian
3dd1d66eb5 fix 2024-03-20 08:53:21 +01:00
Sebastian
012379b56c fix 2024-03-20 06:30:58 +01:00
Sebastian
f6901a6501 fixed? 2024-03-20 06:17:31 +01:00
Sebastian
2d920ab392 get local updatreams too 2024-03-20 06:06:18 +01:00
Sebastian
f6bf087a27 prevent the bug 2024-03-19 19:24:32 +01:00
Sebastian
eb93e9e7e8 moar chains 2024-03-19 19:18:08 +01:00
Sebastian
0d5c197058 update 2024-03-19 18:50:25 +01:00
Sebastian
10a51943dc fix 2024-03-19 18:43:23 +01:00
Sebastian
3f22d9f7b9 generate dshackle configs 2024-03-19 18:15:08 +01:00
Sebastian
f44f172625 add a proxy config 2024-03-19 16:51:28 +01:00
Sebastian
a250046807 add a proxy config 2024-03-19 16:50:07 +01:00
Sebastian
c4b7b527d7 arbitrum sepolia 2024-03-19 15:49:54 +01:00
Sebastian
624ad08374 fixc 2024-03-19 13:22:27 +01:00
Sebastian
034ec13009 fix 2024-03-19 13:01:27 +01:00
Sebastian
b90aefdce5 fix 2024-03-19 11:44:03 +01:00
Sebastian
9d518791eb fix 2024-03-19 11:34:31 +01:00
Sebastian
31458cd045 better switch 2024-03-19 11:26:10 +01:00
Sebastian
ba57f74fdf better switch 2024-03-19 11:25:14 +01:00
Sebastian
0612986875 pin the version 2024-03-19 08:18:45 +01:00
Sebastian
496ce8ebf1 update 2024-03-19 08:06:14 +01:00
Sebastian
daaf8bb221 fix 2024-03-19 07:54:51 +01:00
Sebastian
f34e7ce667 fix 2024-03-19 07:28:03 +01:00
Sebastian
b0f40e2ac6 update 2024-03-19 07:20:36 +01:00
Sebastian
21dd0c95ef fix 2024-03-19 07:17:51 +01:00
Sebastian
fba825626f fix 2024-03-19 07:15:26 +01:00
Sebastian
f69081ec4e fix 2024-03-19 07:05:47 +01:00
Sebastian
dca9138c1b fix 2024-03-19 07:04:12 +01:00
Sebastian
90995f8e6c fix 2024-03-19 07:02:20 +01:00
Sebastian
16a03cd9ea show me 2024-03-19 06:46:18 +01:00
Sebastian
9d414b123b fix 2024-03-19 06:42:01 +01:00
Sebastian
c83fd35b80 fix 2024-03-19 06:38:37 +01:00
Sebastian
59a0369f8a fix 2024-03-19 06:32:40 +01:00
Sebastian
dab6283658 fix 2024-03-19 06:31:27 +01:00
Sebastian
48c1360554 fix 2024-03-19 06:29:32 +01:00
Sebastian
8da3e98c83 figure 2024-03-19 06:28:02 +01:00
Sebastian
3c32852fd9 only show errors 2024-03-19 06:25:27 +01:00
Sebastian
367f9928ab chainlist update 2024-03-19 06:22:23 +01:00
Sebastian
70b15f804b better 2024-03-19 06:14:38 +01:00
Sebastian
40a7d93801 better 2024-03-19 06:13:23 +01:00
Sebastian
0d72a7d0a7 stats 2024-03-19 06:11:54 +01:00
Sebastian
bbb3cd28b3 make blacklist configurable 2024-03-19 05:59:24 +01:00
Sebastian
fa64d26843 fix 2024-03-18 17:13:00 +01:00
Sebastian
cf257870e3 init 2024-03-18 17:04:00 +01:00
Sebastian
6d18778286 fix 2024-03-18 16:50:15 +01:00
Sebastian
eb12637cc3 fix 2024-03-18 16:36:17 +01:00
Sebastian
d873bb263a better sync status 2024-03-18 16:21:27 +01:00
Sebastian
cb2aaef19e fixc 2024-03-18 15:28:31 +01:00
Sebastian
0e608d282b env vars 2024-03-18 15:25:36 +01:00
Sebastian
80a2c9c0e5 try archive 2024-03-18 15:22:28 +01:00
Sebastian
583652a276 i was stupid 2024-03-18 15:04:24 +01:00
Sebastian
7305512c72 i was stupid 2024-03-18 15:02:51 +01:00
Sebastian
84e2c0f4a4 just remember this 2024-03-18 14:13:23 +01:00
Sebastian
5a347724fe almost 2024-03-18 08:42:41 +01:00
Sebastian
1d059d0bb8 super smart 2024-03-18 08:36:34 +01:00
Sebastian
b720d2c74e update 2024-03-18 08:31:36 +01:00
Sebastian
d67a169626 more 2024-03-18 08:25:28 +01:00
Sebastian
0dbf236167 executable 2024-03-18 08:10:10 +01:00
Sebastian
4c73bc0492 reference endpoints 2024-03-18 08:09:25 +01:00
Sebastian
ce7c32da47 fix 2024-03-18 04:27:41 +01:00
Sebastian
08109790e6 init 2024-03-18 04:20:42 +01:00
Sebastian
db1b22ae5e add node key 2024-03-17 17:50:57 +01:00
Sebastian
8ae5f5d570 add node key 2024-03-17 17:40:47 +01:00
Sebastian
ef09e3dda1 add node key 2024-03-17 17:08:08 +01:00
Sebastian
f0abac5047 force it 2024-03-17 16:52:23 +01:00
Sebastian
ee040d7ca2 force it 2024-03-17 16:51:03 +01:00
Sebastian
ee0cd975ec generate the node key 2024-03-17 16:45:41 +01:00
Sebastian
a7eaff414e generate the node key 2024-03-17 16:43:11 +01:00
Sebastian
ca9c451885 generate the node key 2024-03-17 16:39:07 +01:00
Sebastian
18f9936238 generate the node key 2024-03-17 16:35:00 +01:00
Sebastian
c6d793bcb7 new mantle 2024-03-17 16:01:58 +01:00
Sebastian
8e34db7740 update 2024-03-17 15:00:32 +01:00
Sebastian
cf61b549b3 let's connect 2024-03-17 14:53:54 +01:00
Sebastian
2ddacc43b5 let's connect 2024-03-17 14:48:39 +01:00
Sebastian
43e9cc1d23 let's connect 2024-03-17 14:46:25 +01:00
Sebastian
dda64f1291 better 2024-03-17 14:01:43 +01:00
Sebastian
4e59addb33 open teh port 2024-03-17 13:47:50 +01:00
Sebastian
29ef71b33c init 2024-03-17 11:13:30 +01:00
Sebastian
7500c65647 another neat script 2024-03-17 04:36:48 +01:00
Sebastian
fde7d45171 make it executable 2024-03-17 04:33:24 +01:00
Sebastian
a7df497245 neat script 2024-03-17 04:25:58 +01:00
Sebastian
51d2959d3c update 2024-03-14 12:21:04 +01:00
Sebastian
f3f93f6dda fix 2024-03-14 11:26:38 +01:00
Sebastian
964c49d016 fix 2024-03-14 11:12:42 +01:00
Sebastian
cabe143e45 back to fix 2024-03-14 08:55:26 +01:00
Sebastian
efa7dda32f back to fix 2024-03-14 08:53:18 +01:00
Sebastian
a3458f7ee0 fix 2024-03-14 08:47:26 +01:00
Sebastian
9f7f6c479c fix 2024-03-14 08:33:08 +01:00
Sebastian
70699d59b8 version tags 2024-03-14 08:32:00 +01:00
Sebastian
58354128ff version tags 2024-03-14 08:31:22 +01:00
Sebastian
14781b1b33 update names 2024-03-14 08:30:18 +01:00
Sebastian
f34cc18025 blow up 2024-03-14 04:57:11 +01:00
Sebastian
e60eaa48ad update 2024-03-13 16:21:23 +01:00
Sebastian
486cc6f799 update 2024-03-13 16:06:41 +01:00
Sebastian
6bb6d270b6 fix 2024-03-13 14:46:25 +01:00
Sebastian
994a4a4d8f update 2024-03-13 12:13:23 +01:00
Sebastian
cbdbc4de0c update 2024-03-13 12:10:01 +01:00
Sebastian
5a83083009 baseConfig 2024-03-13 11:36:03 +01:00
Sebastian
903d2b419d init 2024-03-13 11:32:06 +01:00
Sebastian
07f5134dee update 2024-03-13 10:46:47 +01:00
Sebastian
9e07c51727 update arbitrum 2024-03-13 10:37:54 +01:00
Sebastian
ac21386665 update 2024-03-12 13:51:19 +01:00
Sebastian
2209c9acb1 update 2024-03-12 10:30:19 +01:00
Sebastian
628cc5b3a0 use other heimdfall endpoint 2024-03-11 05:04:55 +01:00
Sebastian
02efe6d649 use other heimdfall endpoint 2024-03-11 05:00:18 +01:00
Sebastian
a631fcfafc update 2024-03-11 04:53:06 +01:00
Sebastian
3269de9cbf update 2024-03-09 15:15:45 +01:00
Sebastian
40374f6823 update 2024-03-05 20:21:43 +01:00
Sebastian
cedbbdc470 update 2024-03-05 20:20:08 +01:00
Sebastian
458eb8081f fix 2024-03-03 07:59:02 +01:00
Sebastian
0cc2954725 fix 2024-03-03 07:36:51 +01:00
Sebastian
26dff9a849 fix 2024-03-03 07:24:00 +01:00
Sebastian
448aa7d022 fix 2024-03-03 07:20:40 +01:00
Sebastian
b265526ce7 fix 2024-03-03 07:07:55 +01:00
Sebastian
3a4ff87005 fix 2024-03-03 07:05:52 +01:00
Sebastian
252b378e4b fix 2024-03-03 07:05:04 +01:00
Sebastian
56a9951fee fix 2024-03-03 07:03:04 +01:00
Sebastian
b35fe4d4f7 fix 2024-03-03 06:49:58 +01:00
Sebastian
8db61343cf init blast 2024-03-03 06:40:15 +01:00
Sebastian
ee28a9dcd7 update 2024-03-01 15:04:10 +01:00
Sebastian
c6b36c3c0b update 2024-03-01 15:03:06 +01:00
Sebastian
61b819dda3 remove public port 2024-02-29 06:24:54 +01:00
Sebastian
e2e17e21b8 fix 2024-02-29 05:55:24 +01:00
Sebastian
43928c2810 fix 2024-02-29 05:48:31 +01:00
Sebastian
9198bc3709 fix 2024-02-29 05:46:39 +01:00
Sebastian
084089958a path lisk 2024-02-29 05:43:55 +01:00
Sebastian
c40207a536 update 2024-02-23 18:15:14 +01:00
Sebastian
1f1fa2e643 update 2024-02-23 04:31:59 +01:00
Sebastian
6735bafec4 update 2024-02-22 13:23:50 +01:00
Sebastian
1ffb7a4f50 update 2024-02-22 13:09:47 +01:00
Sebastian
b450cec1af for us10 2024-02-22 12:12:08 +01:00
Sebastian
3be1851090 update 2024-02-21 19:13:35 +01:00
Sebastian
5d5ff900e5 update 2024-02-21 19:10:01 +01:00
Sebastian
2c9a893ed7 update 2024-02-20 14:51:07 +01:00
Sebastian
5fcd9571a4 fix 2024-02-20 08:33:58 +01:00
Sebastian
78b1a2d8f9 update 2024-02-20 08:23:16 +01:00
Sebastian
d776277986 update 2024-02-20 08:20:33 +01:00
Sebastian
fc737d9e7e fix 2024-02-20 08:15:32 +01:00
Sebastian
484749bdd7 update 2024-02-20 08:11:50 +01:00
Sebastian
6227d97e48 update 2024-02-20 08:06:13 +01:00
Sebastian
e05fa83aa6 init 2024-02-20 07:40:37 +01:00
Sebastian
848a3334df fix 2024-02-17 20:56:27 +01:00
Sebastian
8b8334f2ed jo 2024-02-17 08:54:20 +01:00
Sebastian
15eb8a3442 fix 2024-02-17 08:41:57 +01:00
Sebastian
f4f58cf430 fix 2024-02-17 08:33:22 +01:00
Sebastian
b2080926c5 delete rubbish 2024-02-17 08:30:20 +01:00
Sebastian
dd44f904d9 pathes 2024-02-17 08:28:47 +01:00
Sebastian
0568929aa6 try to generalize op stack 2024-02-17 08:27:26 +01:00
Sebastian
714c5cb1c8 init 2024-02-17 05:29:26 +01:00
Sebastian
68b0449fc7 update 2024-02-16 04:06:23 +01:00
Sebastian
b18bbcfb86 update 2024-02-16 04:03:58 +01:00
Sebastian
6ca8a95dcd update 2024-02-15 09:10:37 +01:00
Sebastian
f4c2af0618 update 2024-02-15 07:53:34 +01:00
Sebastian
a3cb98d2c0 fix 2024-02-15 07:48:48 +01:00
Sebastian
3d43be95aa fix 2024-02-15 07:47:14 +01:00
Sebastian
29e2a7fda9 updates 2024-02-15 07:39:16 +01:00
Sebastian
54ecb3577a duh 2024-02-14 16:00:23 +01:00
Sebastian
d72622208f duh 2024-02-14 15:59:34 +01:00
Sebastian
f43e09c9f8 tunnel trhough traefik 2024-02-14 15:52:48 +01:00
Sebastian
e86ca7ad1c new image 2024-02-14 05:45:38 +01:00
Sebastian
f33a3de62f update 2024-02-14 00:57:38 +01:00
Sebastian
467c5469f1 update 2024-02-14 00:55:40 +01:00
Sebastian
85d5b4bb63 init 2024-02-09 12:49:33 +01:00
Sebastian
6369d2fc61 init 2024-02-09 12:21:36 +01:00
Sebastian
be541d9ec1 fix 2024-02-09 07:35:31 +01:00
Sebastian
53c01989b9 add ip 2024-02-08 10:44:55 +01:00
Sebastian
a9f5ee4532 update 2024-02-08 10:38:48 +01:00
Sebastian
10907ab3b9 update 2024-02-08 10:27:01 +01:00
Sebastian
9041549d65 fix the name 2024-02-08 10:13:07 +01:00
Sebastian
79371a9e5f init 2024-02-08 05:53:58 +01:00
Sebastian
41fc5d96bd yo 2024-02-07 04:40:16 +01:00
Sebastian
c7c91b33f1 init 2024-02-07 02:20:27 +01:00
Sebastian
512d884b04 update 2024-02-06 12:58:37 +01:00
Sebastian
e8a7c440fd update 2024-02-06 12:58:05 +01:00
Sebastian
402ad21df0 update 2024-02-06 12:57:13 +01:00
Sebastian
c2cc346052 update 2024-02-06 12:56:07 +01:00
Sebastian
debdc13be7 init 2024-02-06 07:03:52 +01:00
Sebastian
74512caa56 revert 2024-02-06 06:34:53 +01:00
Sebastian
2d8f241b21 try 2024-02-06 06:30:03 +01:00
Sebastian
32c99c6b80 revert 2024-02-06 06:25:39 +01:00
Sebastian
1c94561b74 same same 2024-02-06 06:23:39 +01:00
Sebastian
159abfcb3a make port explicit same as rpc 2024-02-06 06:23:00 +01:00
Sebastian
4920756db1 make port explicit 2024-02-06 06:21:37 +01:00
Sebastian
6470297efd update path 2024-02-06 06:10:50 +01:00
Sebastian
531deb1596 update path 2024-02-06 06:10:04 +01:00
Sebastian
5785de265a fix 2024-02-05 13:27:19 +01:00
Sebastian
3f86a152e6 fix 2024-02-05 13:26:32 +01:00
Sebastian
d75e3fb6e5 fix 2024-02-05 13:24:16 +01:00
Sebastian
de2906b353 init 2024-02-05 13:22:56 +01:00
Sebastian
456cb03faf init 2024-02-05 12:03:32 +01:00
Sebastian
f164e29116 update 2024-02-05 11:55:58 +01:00
Sebastian
22ebeff495 update 2024-02-05 11:53:49 +01:00
Sebastian
eaa84807d5 castrate to nethermind 2024-02-05 11:51:43 +01:00
Sebastian
bc627a5160 bug 2024-02-05 11:33:00 +01:00
Sebastian
a1e41d8c01 update 2024-02-05 11:25:01 +01:00
Sebastian
22dcd5d826 init 2024-02-05 11:03:12 +01:00
Sebastian
72180aff88 update 2024-02-05 06:44:43 +01:00
Sebastian
d009a36e23 init 2024-02-05 06:04:38 +01:00
Sebastian
dbfa00bac1 update 2024-02-05 05:46:43 +01:00
Sebastian
595aa08c6e fix for new docker version 2024-02-05 05:22:12 +01:00
Sebastian
f8b9f7a17d fix 2024-02-04 15:02:47 +01:00
Sebastian
8f74627a4a fix 2024-02-04 15:00:12 +01:00
Sebastian
bf62ff8a32 fix 2024-02-04 14:56:40 +01:00
Sebastian
ed353ca490 fix port 2024-02-04 14:52:33 +01:00
Sebastian
7817fe2fda update 2024-02-04 14:03:15 +01:00
Sebastian
8e2b639b75 go for it 2024-02-04 13:44:51 +01:00
Sebastian
28867d5532 update 2024-02-03 04:11:10 +01:00
Sebastian
7e61e4ea45 update 2024-02-03 04:10:45 +01:00
Sebastian
3f2e96997e update 2024-02-01 01:34:01 +01:00
Sebastian
cab000cc97 lets start 2024-02-01 01:29:12 +01:00
Sebastian
5c949e86d8 update 2024-02-01 01:24:38 +01:00
Sebastian
58f7db98d6 update 2024-02-01 00:54:37 +01:00
Sebastian
78421c3a6b update 2024-02-01 00:48:03 +01:00
Sebastian
9b8e2ab154 nat 2024-02-01 00:34:42 +01:00
Sebastian
852d67a828 update 2024-01-29 07:13:40 +01:00
Sebastian
11eb3107cf fix 2024-01-28 04:13:33 +01:00
Sebastian
e84fa6b643 da sweet snapshots 2024-01-28 04:07:35 +01:00
Sebastian
dd644e0910 fix 2024-01-27 12:18:10 +01:00
Sebastian
b2ef62295f fix 2024-01-27 12:17:43 +01:00
Sebastian
debd7e56da fix 2024-01-27 12:16:29 +01:00
Sebastian
cefe892917 make a metis node 2024-01-27 12:14:12 +01:00
Sebastian
e5e064687d start a fullnode 2024-01-27 11:04:59 +01:00
Sebastian
fc5f43f4b7 start a fullnode 2024-01-27 10:54:51 +01:00
Sebastian
cdcef4942f update 2024-01-27 04:26:36 +01:00
Sebastian
d97f52dd08 yo 2024-01-27 04:15:48 +01:00
Sebastian
531531993a do it 2024-01-27 04:12:04 +01:00
Sebastian
f283dc17cd line breaks 2024-01-27 04:09:25 +01:00
Sebastian
51cfb62b09 do not download the snapshot 2024-01-27 04:02:37 +01:00
Sebastian
dc1279f075 ports 2024-01-26 15:51:17 +01:00
Sebastian
f6fa297fcf base fullnode 2024-01-26 15:48:24 +01:00
Sebastian
1a02518fd4 fix 2024-01-26 08:39:14 +01:00
Sebastian
a9d366a6ca lets squeeze some bits 2024-01-26 08:32:40 +01:00
Sebastian
34c595a213 updaTEW 2024-01-26 05:04:25 +01:00
Sebastian
9ba6364fad update 2024-01-26 04:48:18 +01:00
Sebastian
563c672b56 update 2024-01-26 04:45:39 +01:00
Sebastian
7ff25d5f73 version update 2024-01-26 04:43:17 +01:00
Sebastian
2a8102f67c version update 2024-01-26 04:03:53 +01:00
Sebastian
70ef2101ea yo 2024-01-25 04:12:41 +01:00
Sebastian
bb5f444c6b properly 2024-01-25 04:09:26 +01:00
Sebastian
30a776186b version update 2024-01-25 03:52:20 +01:00
Sebastian
ad744e07da update 2024-01-24 04:05:35 +01:00
Sebastian
992b541464 update 2024-01-23 09:33:44 +01:00
Sebastian
7901f5ee7b try to fix 2024-01-23 09:10:07 +01:00
Sebastian
3e0aad6b0c oopsie 2024-01-23 08:32:11 +01:00
Sebastian
de2472b659 fix 2024-01-22 09:40:54 +01:00
Sebastian
c70034d091 fix 2024-01-22 09:39:27 +01:00
Sebastian
be5d9526dc sync base fullnode 2024-01-22 09:35:03 +01:00
Sebastian
7884feba5b changes 2024-01-20 17:12:57 +01:00
Sebastian
ed3e943a6c update 2024-01-19 17:16:57 +01:00
Sebastian
92d81faa7a fix node 2024-01-19 04:39:52 +01:00
Sebastian
8777f3a424 fix 2024-01-18 16:34:02 +01:00
Sebastian
1e522241ee try more base 2024-01-18 16:30:22 +01:00
Sebastian
fc70c0b5a0 update 2024-01-11 18:55:05 +01:00
Sebastian
e150f73a50 update 2024-01-11 18:52:47 +01:00
Sebastian
88887ce059 update 2024-01-11 18:49:29 +01:00
Sebastian
cada9fa30d new try 2023-12-31 13:04:31 +01:00
Sebastian
f555984da3 checkp[oint sync update 2023-12-29 10:58:20 +01:00
Sebastian
dce7b8b832 make a reload script 2023-12-29 06:31:47 +01:00
Sebastian
6969049fce mount a key 2023-12-29 06:03:18 +01:00
Sebastian
81b1cdb80f update 2023-12-28 06:12:09 +01:00
Sebastian
57d23d4ca6 remove erigon 2023-12-27 09:06:37 +01:00
Sebastian
de9d7dd33c version update 2023-12-27 08:56:42 +01:00
Sebastian
b2537534df sync endpoint 2023-12-25 17:16:47 +01:00
Sebastian
6d691107f2 initial 2023-12-25 17:12:58 +01:00
Sebastian
dbe45ca6b3 adding goerli reth 2023-12-21 10:53:18 +01:00
Sebastian
b6b12b7271 up the connection limit 2023-12-20 07:26:07 +01:00
Sebastian
e62a8fef0a update 2023-12-20 06:35:20 +01:00
Sebastian
ff2e21ddd3 missing file 2023-12-20 06:22:19 +01:00
Sebastian
2b99d2e1e9 ports 2023-12-20 06:21:45 +01:00
Sebastian
a3674c42db sync it like it's hot 2023-12-20 06:14:10 +01:00
Sebastian
00364fd11e update 2023-12-15 06:14:00 +01:00
Sebastian
a6b5599d29 make it full 2023-12-13 16:41:42 +01:00
Sebastian
61751657da initial 2023-12-13 16:40:19 +01:00
Sebastian
40a496124c update 2023-12-13 15:28:51 +01:00
Sebastian
0c414cc90e kick nimbus 2023-12-12 09:29:23 +01:00
Sebastian
ee2fd76691 open more ports for better connectitivity 2023-12-07 04:41:21 +01:00
Sebastian
805bc249d4 update 2023-12-07 04:34:30 +01:00
Sebastian
bdc48da5ba fix 2023-12-05 11:02:21 +01:00
Sebastian
fe08e56d63 fix 2023-12-05 10:58:02 +01:00
Sebastian
1a6464dc18 update 2023-12-05 10:54:40 +01:00
Sebastian
08957600c1 change the label in grafana 2023-12-05 08:20:02 +01:00
Sebastian
b551288b95 update 2023-12-02 10:39:54 +01:00
Sebastian
cfbab12cf6 support more protocols 2023-11-30 18:09:41 +01:00
Sebastian
d574602f2b support more protocols 2023-11-30 18:04:42 +01:00
Sebastian
dd50a9ff66 extra domain 2023-11-30 10:57:59 +01:00
Sebastian
442ad89abe for tests 2023-11-30 10:30:47 +01:00
Sebastian
006f680e13 cosmetcis 2023-11-30 10:26:11 +01:00
Sebastian
0f78e2e308 udpate 2023-11-30 08:53:19 +01:00
Sebastian
8f598bde9f update 2023-11-30 08:52:41 +01:00
Sebastian
9829b55dcb add nat 2023-11-30 08:16:06 +01:00
Sebastian
3195ddd044 maybe that makes it faster 2023-11-24 03:09:46 +01:00
Sebastian
4a519edc4d erigon is archive 2023-11-23 07:17:32 +01:00
Sebastian
e9a941d76a ah welp 2023-11-22 13:13:09 +01:00
Sebastian
31bbe2fef9 reth is archive 2023-11-22 13:12:34 +01:00
Sebastian
0ac1b07419 path 2023-11-22 13:07:54 +01:00
Sebastian
aeb5811ba1 try reth for base 2023-11-22 12:59:47 +01:00
Sebastian
998a98cbdd start with reth and prysm 2023-11-22 07:24:40 +01:00
Sebastian
b8ede65b1c start with reth 2023-11-22 07:14:23 +01:00
Sebastian
615640f3e0 update linea 2023-11-22 05:59:13 +01:00
Sebastian
28f817f9d9 update 2023-11-20 18:13:17 +01:00
Sebastian
3a1238ee6e update 2023-11-20 18:10:09 +01:00
Sebastian
556eadd088 update 2023-11-20 18:06:27 +01:00
Sebastian
4a17e3a74a fix 2023-11-15 10:11:25 +01:00
Sebastian
68376144ab fix 2023-11-15 09:51:21 +01:00
Sebastian
38680d3e6f ... 2023-11-15 09:49:05 +01:00
Sebastian
e66c2066bf fix 2023-11-11 07:20:16 +01:00
Sebastian
b9e41669c4 remove whitelist 2023-11-11 07:14:25 +01:00
Sebastian
307ba72fa9 farm on 2023-11-11 07:11:48 +01:00
Sebastian
e7cbf5830e allow slowdisk 2023-11-01 11:05:31 +01:00
Sebastian
56ea6b861f new naming scheme 2023-11-01 11:04:26 +01:00
Sebastian
d7a96d50a3 jo 2023-11-01 09:44:45 +01:00
Sebastian
da0793dc53 try new schema 2023-11-01 09:30:38 +01:00
Sebastian
eb2eb3b8fe version update 2023-10-31 06:19:35 +01:00
Sebastian
9e0804123a update 2023-10-28 18:16:10 +02:00
Sebastian
9197d0b3dc wow there is no officially working dorcker image 2023-10-28 15:53:19 +02:00
Sebastian
8e34cba621 wow there is no officially working dorcker image 2023-10-28 15:52:20 +02:00
Sebastian
c44b76f747 update bsc erigon 2023-10-28 15:43:26 +02:00
Sebastian
b777e5a0ed lets try 2023-10-25 10:53:06 +02:00
Sebastian
caf7df7b0c move subsquid to the hetzner abuse protected network as it's llibp2p 2023-10-24 04:24:48 +02:00
Sebastian
3538395bed fix 2023-10-22 09:55:13 +02:00
Sebastian
357ca6e9ca fix 2023-10-22 09:54:31 +02:00
Sebastian
b7c2cffb13 missing files 2023-10-22 09:52:25 +02:00
Sebastian
e54050b60b first steps with base erigon 2023-10-22 09:49:16 +02:00
Sebastian
78c5ec9bea init base chain from snapshot 2023-10-22 08:05:27 +02:00
Sebastian
4808637b2e init base chain from snapshot 2023-10-22 07:58:59 +02:00
Sebastian
90219ab8da init base chain from snapshot 2023-10-22 07:52:20 +02:00
Sebastian
3303bf960f network change 2023-10-22 03:15:21 +02:00
Sebastian
2b633e5aa8 nethermind sucks 2023-10-22 03:12:47 +02:00
Sebastian
38dbebf7d5 open ports 2023-10-22 02:51:57 +02:00
Sebastian
ca0128b0dc add a network 2023-10-22 02:47:38 +02:00
Sebastian
859a7f1373 fantom fullnode 2023-10-20 18:17:41 +02:00
Sebastian
3562a7df32 update 2023-10-20 18:02:49 +02:00
Sebastian
cfd51c2627 fix 2023-10-20 17:46:20 +02:00
Sebastian
f53fcb3bbc mubai 2023-10-20 17:39:59 +02:00
Sebastian
5d537571ae fix 2023-10-20 17:04:17 +02:00
Sebastian
e84519646e join the fantom testnet fun 2023-10-20 17:01:58 +02:00
Sebastian
818db4a42e better polygon fullnode sync 2023-10-20 05:06:42 +02:00
Sebastian
760ebc8347 init 2023-10-20 04:45:31 +02:00
Sebastian
882e313516 update version 2023-10-20 04:19:04 +02:00
Sebastian
1c047dd3cf update version 2023-10-19 14:50:48 +02:00
Sebastian
8f349a0f09 open port in dshackle to serve requests with linea 2023-10-19 09:38:30 +02:00
Sebastian
8327e0d1fa new version resync required 2023-10-19 07:41:59 +02:00
Sebastian
bf3cdb855f maybe better connectivity 2023-10-18 14:50:37 +02:00
Sebastian
f331090f16 expose the p2p port also 2023-10-18 09:38:01 +02:00
Sebastian
8b1602130b version update 2023-10-15 01:39:38 +02:00
Sebastian
39ad932274 geth-goerli.yml 2023-10-13 11:44:37 +02:00
Sebastian
6dcb0fafc0 geth-goerli.yml 2023-10-13 11:37:12 +02:00
Sebastian
208ab56d0e maybe unstuck polygon sync 2023-10-13 03:45:01 +02:00
Sebastian
07f867ad6d version update 2023-10-12 16:06:58 +02:00
Sebastian
50fde8f9bf final version update before resync 2023-10-11 08:53:10 +02:00
Sebastian
ff6b2c4e32 version update 2023-10-11 08:48:04 +02:00
Sebastian
6a468a25ad no archive for nova 2023-10-10 10:37:47 +02:00
Sebastian
7c30323708 update checkpoint sync 2023-10-10 08:55:26 +02:00
Sebastian
8ce16c67e8 make it compatible with dopcker-compose version 2023-10-10 08:45:02 +02:00
Sebastian
7e1c1b8a7d fix the base rpc endpoint config 2023-10-10 08:03:49 +02:00
Sebastian
f21320e667 make a gnosis fullnode 2023-10-10 07:08:28 +02:00
Sebastian
dbc1de12e1 update geth 2023-10-10 06:39:05 +02:00
Sebastian
f01d7f1535 update base to not require nethermind 2023-10-10 06:05:26 +02:00
Sebastian
7b1e3cddcc version update 2023-10-10 05:49:30 +02:00
Sebastian
886fb42c5e version update 2023-10-10 05:36:32 +02:00
Sebastian
cbb587c043 version update 2023-10-10 05:31:24 +02:00
Sebastian
e1bf52860e support the squids 2023-10-09 12:28:00 +02:00
Sebastian
8e62c18e16 let it do websockets too 2023-10-09 11:22:06 +02:00
Sebastian
9047693b1f version update 2023-10-09 10:21:56 +02:00
Sebastian
d6bcc68bae make dtl endpoint flexible 2023-10-09 10:18:06 +02:00
Sebastian
97b0383924 update nethermind to fix those missing recipes 2023-10-07 10:04:09 +02:00
Sebastian
e40152acae unstuck with heimdall 2023-10-07 08:57:15 +02:00
Sebastian
a68650ca56 update erigon for polygon hardfork 2023-10-06 15:52:25 +02:00
Sebastian
06c1c55b59 open torrent port 2023-10-04 05:24:21 +02:00
Sebastian
920911d833 update dshackle 2023-10-04 04:02:47 +02:00
Sebastian
80c1faed7a fix copy paste error 2023-10-03 09:29:58 +02:00
Sebastian
6a5334d1d2 fix the nimbus version 2023-10-03 09:16:23 +02:00
Sebastian
66a4fc138e needs more ram? 2023-10-03 08:59:22 +02:00
Sebastian
aa6cd65d37 needs the executable in command 2023-10-03 08:57:23 +02:00
Sebastian
9d459c294d make myself an bsc fullnode 2023-10-03 08:54:24 +02:00
Sebastian
b08e95f2ec solve the websocket port problem 2023-10-03 08:20:17 +02:00
Sebastian
bbb68ff7c4 maybe order matters 2023-10-03 06:04:03 +02:00
Sebastian
90951b9802 maybe path matching order matters 2023-10-03 05:45:56 +02:00
Sebastian
0541554986 update zkevm 2023-10-01 10:24:00 +02:00
Sebastian
198567c10e no op 2023-09-28 12:22:09 +02:00
Sebastian
097cf904f1 remove flag 2023-09-28 07:14:57 +02:00
Sebastian
7cd3c5a8af update 2023-09-28 07:11:52 +02:00
Sebastian
2452bb56a7 update celo 2023-09-27 11:08:11 +02:00
Sebastian
2e6127811c fix 2023-09-26 11:35:59 +02:00
Sebastian
e09547feab make changes permanent 2023-09-26 11:32:08 +02:00
Sebastian
9e5f1eb2d7 replace heimdall 2023-09-26 11:18:38 +02:00
Sebastian
90b94119f2 update heimdall endpoint 2023-09-26 11:16:31 +02:00
Sebastian
a2865f1586 update heimdall endpoint 2023-09-26 11:15:04 +02:00
Sebastian
d436a5d9b8 wrong volume 0mg 2023-09-21 10:27:26 +02:00
Sebastian
e0c238206c up the calldata limit 2023-09-21 10:21:29 +02:00
Sebastian
a7e1232fb4 version upgrade 2023-09-21 09:45:25 +02:00
Sebastian
6f0e9c7256 update the prover 2023-09-21 09:36:29 +02:00
Sebastian
9891cf33f1 also need to tell traefik which container to assign the routes to 2023-09-21 09:17:19 +02:00
Sebastian
85464ece1b move ws to it's own port 2023-09-21 09:13:34 +02:00
Sebastian
cd6f6b06cd pin the postgres version 2023-09-21 09:07:27 +02:00
Sebastian
39291859ec expose the right port in the compose file... to enable ipv4 in the container? 2023-09-17 20:36:36 +02:00
Sebastian
37cb3bdc77 split the arb classic 2023-09-17 07:01:26 +02:00
Sebastian
0d59d95b5b allow cors for metamask 2023-09-06 10:45:30 +02:00
Sebastian
e23f645442 two redis is one too much 2023-09-06 10:32:45 +02:00
Sebastian
e626ceb6f3 make the move 2023-09-06 10:27:42 +02:00
Sebastian
2e656a5d05 get rid of lighthouse 2023-09-03 12:35:49 +02:00
Sebastian
6777cffbd4 fix 2023-09-03 12:10:19 +02:00
Sebastian
8bf0200220 make fix permanent 2023-09-02 13:29:57 +02:00
Sebastian
45c64dcabf make the fullnode a archive node bc why not 2023-09-02 13:26:46 +02:00
Sebastian
d00c7a5d65 let the fire burning 2023-09-02 07:23:28 +02:00
Sebastian
4529a19763 disable snashots for testing 2023-08-29 15:05:48 +02:00
Sebastian
d57d405e1b fix 2023-08-26 10:03:12 +02:00
Sebastian
e5e71aca37 no validator 2023-08-26 10:00:56 +02:00
Sebastian
8bb341f696 fix 2023-08-26 09:56:35 +02:00
Sebastian
cb6dfcbd0d support base 2023-08-26 09:51:18 +02:00
Sebastian
6ca3aa2d4b support base 2023-08-26 09:49:18 +02:00
Sebastian
1d956f2ffb add nova 2023-08-26 08:47:53 +02:00
Sebastian
52dce3bde7 move gnosis to nimbus 2023-08-26 06:47:54 +02:00
Sebastian
078a4efdc5 I'm no validator 2023-08-26 06:38:02 +02:00
Sebastian
3c5121e423 nana 2023-08-26 06:29:31 +02:00
Sebastian
25bf469b57 jaja 2023-08-26 06:25:18 +02:00
Sebastian
549676bb1b jaja 2023-08-26 06:22:49 +02:00
Sebastian
4a2ae8a1f7 nimbus needs build files 2023-08-26 06:20:44 +02:00
Sebastian
08c7858450 try nimbus instead of lighthouse 2023-08-26 06:19:19 +02:00
Sebastian
f67c8c6604 fix besu? 2023-08-26 05:30:54 +02:00
Sebastian
2bfa6bc650 add besu 2023-08-26 05:04:46 +02:00
Sebastian
c9230c75f8 add besu 2023-08-26 05:02:50 +02:00
Sebastian
bb90df86f3 add besu 2023-08-26 05:00:25 +02:00
Sebastian
76e9833b1c add besu 2023-08-26 04:58:50 +02:00
Sebastian
21ce573416 add besu 2023-08-26 04:39:54 +02:00
Sebastian
0be8adb641 add besu 2023-08-26 04:38:50 +02:00
Sebastian
42a70603a8 add besu 2023-08-26 04:37:33 +02:00
Sebastian
1fc8f8c3f8 enable debug 2023-08-26 03:10:57 +02:00
Sebastian
a4508906c9 make a arbitrum fullnode 2023-08-26 03:08:01 +02:00
Sebastian
203c955ab1 make a arbitrum fullnode 2023-08-26 02:50:54 +02:00
Sebastian
1d01601abf update da nethermind 2023-08-26 02:27:45 +02:00
Sebastian
5d15eacbd0 name clashed 2023-08-25 11:51:42 +02:00
Sebastian
b9558bdaf7 log to journald 2023-08-25 11:40:35 +02:00
Sebastian
a14c53ced8 lets try to run ws on the same port as http 2023-08-25 08:06:32 +02:00
Sebastian
f1dd692a10 make the websocket accessible 2023-08-25 07:59:32 +02:00
Sebastian
709fbb3aea better output 2023-08-25 06:32:12 +02:00
Sebastian
c77879a0aa show dat handy tool 2023-08-25 06:21:57 +02:00
Sebastian
41f05f4f4f can not use websocket on same port as http 2023-08-24 14:53:10 +02:00
Sebastian
7b7f02cbbb name change 2023-08-24 13:23:19 +02:00
Sebastian
124d2ed057 fix the whitespace 2023-08-24 13:22:06 +02:00
Sebastian
eb194fa0f3 add mantle support 2023-08-24 13:21:45 +02:00
Sebastian
eb0f7ed1dc make a mainnet fullnode 2023-08-24 10:30:49 +02:00
Sebastian
07519f677b fix 2023-08-24 07:22:22 +02:00
Sebastian
8e4c7ca951 fix 2023-08-24 07:20:21 +02:00
Sebastian
358e7b8f73 fix 2023-08-24 07:14:47 +02:00
Sebastian
f29b2d7a5a fix 2023-08-24 07:13:54 +02:00
Sebastian
ec858e0e2e need ZKEVM_NETWORK variable 2023-08-24 07:11:09 +02:00
Sebastian
e9a33706cf update traefik 2023-08-24 06:42:59 +02:00
Sebastian
ffc37ede08 integrate zkevm 2023-08-24 06:34:44 +02:00
Sebastian
b7248a04ff revert something is fishy 2023-08-24 05:54:54 +02:00
Sebastian
782c121628 update optimism 2023-08-24 05:47:12 +02:00
Sebastian
7c8932030a update 2023-08-23 17:53:22 +02:00
Sebastian
c91bd27a0d remove the snapshots and try without 2023-08-22 05:30:09 +02:00
Sebastian
a5341f715d make the torrent faster 2023-08-21 14:37:51 +02:00
Sebastian
f2c41be269 try erigon fullnodes for polygon 2023-08-21 14:30:57 +02:00
Sebastian
67b4a181f1 activate archive mode on geth because erigon lags 2023-08-18 12:42:31 +02:00
Sebastian
b18778f96b maybe 2023-08-18 12:13:24 +02:00
Sebastian
869c523461 maybe 2023-08-18 12:08:22 +02:00
Sebastian
ac2452449e maybe 2023-08-18 12:05:43 +02:00
Sebastian
74be6c515d maybe 2023-08-18 12:03:04 +02:00
Sebastian
e87d403a87 use the build not the image 2023-08-18 07:01:55 +02:00
Sebastian
3d5e600195 switch back to old geth version because it's official 2023-08-18 06:56:22 +02:00
Sebastian
4e227c17b8 remove incompatible flags 2023-08-18 06:53:21 +02:00
Sebastian
cdd298d34e erigon synced but linea is official - split 2023-08-18 06:50:26 +02:00
Sebastian
f710a2904c try geth instead 2023-08-18 06:46:20 +02:00
Sebastian
0edf9c3e5b add support for linea 2023-08-18 06:19:11 +02:00
Sebastian
205a873549 fix typo 2023-08-18 06:18:43 +02:00
Sebastian
2ea644076a create 2023-08-18 05:42:31 +02:00
Sebastian
0511e7ed10 enable some more apis on op erigon 2023-08-16 13:32:43 +02:00
Sebastian
4363ad2d1c update 2023-08-16 13:14:29 +02:00
Sebastian
1a30703b5a fix 2023-08-16 08:08:14 +02:00
Sebastian
d9c69f0384 remove that x-tension 2023-08-16 08:06:07 +02:00
Sebastian
9a91da6e4f persistent loggin 2023-08-16 07:54:22 +02:00
Sebastian
bc88334121 persistent logging 2023-08-16 07:52:28 +02:00
Sebastian
ec95880ac3 fix the logging 2023-08-16 07:30:37 +02:00
Sebastian
f2315059e8 enable journald logging 2023-08-16 07:25:05 +02:00
Sebastian
91cb6b4d5f persist the userops database 2023-08-15 14:03:19 +02:00
Sebastian
8a85201ae7 update 2023-08-14 15:09:25 +02:00
Sebastian
ef91590602 finishing up 2023-08-13 08:46:16 +02:00
Sebastian
a114c5d4b6 build that thing because permissions 2023-08-13 07:45:55 +02:00
Sebastian
03488dbd76 make erigon init the genesis block before starting 2023-08-13 07:38:09 +02:00
Sebastian
e961effb51 try erigon again 2023-08-13 06:48:07 +02:00
Sebastian
ab8dd5f7af meh 2023-08-13 06:36:44 +02:00
Sebastian
bed939da0c teh volume 2023-08-13 06:34:58 +02:00
Sebastian
8369a4d6b3 lets try geth 2023-08-13 06:31:10 +02:00
Sebastian
22e8613d08 fixes 2023-08-12 13:15:37 +02:00
Sebastian
12a64b10ee try to add lukso to the mix 2023-08-12 13:00:22 +02:00
Sebastian
b43fed1d39 make goerli same as mainnet 2023-08-12 05:00:53 +02:00
Sebastian
0528ab823d just use the official images 2023-08-11 18:52:57 +02:00
Sebastian
ba26b78674 activate traefik for the skandhas 2023-08-11 15:16:37 +02:00
Sebastian
d0f99b6adf set the path to the right network id 2023-08-11 08:20:25 +02:00
Sebastian
382467b1c5 start a separate relayer for each network 2023-08-11 08:19:25 +02:00
Sebastian
d94fde42b5 add rpc passthrough 2023-08-10 10:02:17 +02:00
Sebastian
32ad2865bd pick another git tag 2023-08-10 09:58:29 +02:00
Sebastian
0dc8ec6dcf update version 2023-08-10 09:17:20 +02:00
Sebastian
4991108e18 fix typo 2023-08-10 08:57:20 +02:00
Sebastian
3ee77d44ad start the relayer business 2023-08-10 08:55:10 +02:00
Sebastian
f2777d6c3b make the rpc accessible 2023-08-08 12:29:26 +02:00
Sebastian
a269a2d8af make the rpc accessible 2023-08-08 12:26:31 +02:00
Sebastian
6d7e8422ac remove name 2023-08-08 10:50:33 +02:00
Sebastian
6482321617 add moonriver 2023-08-08 10:48:18 +02:00
Sebastian
9db8810c7f update version and allow all hosts 2023-08-05 09:10:44 +02:00
Sebastian
d9c68992f1 update version and allow all hosts 2023-08-05 09:08:49 +02:00
Sebastian
e20cdede10 update 2023-08-05 06:28:25 +02:00
Sebastian
156525003c update version 2023-08-02 10:36:13 +02:00
Sebastian
3023c102b3 update arbitrum 2023-08-02 08:32:23 +02:00
Sebastian
3b9873b91d update dshackle 2023-07-25 09:59:06 +02:00
Sebastian
2f28084d9c incorporate firehose into the monorepo 2023-07-20 15:54:54 +02:00
Sebastian
84adad7cfd let erigon do snaps again 2023-07-20 15:38:51 +02:00
Sebastian
cab1f355da fixed erigon verson that doesn't sync gnosis anymore 2023-07-20 15:34:16 +02:00
Sebastian
523c3bb413 flag was removed but authrpc needs to be reachable 2023-07-18 10:34:56 +02:00
Sebastian
f1401a7693 activate externalcl 2023-07-18 10:30:08 +02:00
Sebastian
f51601d77d fix lighthouse connection path 2023-07-18 10:27:36 +02:00
Sebastian
74d315519b don't do snapshots on gnosis they fail to execute 2023-07-18 10:21:56 +02:00
Sebastian
cb1afd5aab fire up erigon for gnosis 2023-07-17 05:21:24 +02:00
Sebastian
0031407fff add some parameter 2023-07-11 17:08:04 +02:00
Sebastian
b410a3123a fix typo 2023-07-11 08:57:29 +02:00
Sebastian
4eed5f219f update for hardfork 2023-07-11 08:54:32 +02:00
Sebastian
37b1d8bb84 update version 2023-07-03 12:29:36 +02:00
Sebastian
11ee4ee9c4 better labels 2023-06-27 13:04:56 +02:00
Sebastian
09b616fe7f better route 2023-06-27 12:48:11 +02:00
Sebastian
e9f49878e0 better route 2023-06-27 12:46:44 +02:00
Sebastian
3a4a1e73ec better path 2023-06-27 12:42:12 +02:00
Sebastian
7ccaab0a6d why not both? 2023-06-27 12:34:46 +02:00
Sebastian
dd2fe64355 try again op-geth 2023-06-26 17:27:15 +02:00
Sebastian
5353f48c17 try again op-geth 2023-06-26 17:26:13 +02:00
Sebastian
6648247d0a next 2023-06-26 17:22:34 +02:00
Sebastian
8fbc473992 try erigon again after bootstrapping op-node 2023-06-26 17:21:28 +02:00
Sebastian
4a0d23d8a1 fix op-geth command full 2023-06-26 16:50:49 +02:00
Sebastian
2d2c0c2393 fix op-geth command block indent 2023-06-26 16:48:53 +02:00
Sebastian
d7f9122983 fix op-geth command 2023-06-26 16:48:06 +02:00
Sebastian
bbae1bd48a use op-geth instead of op-erigon 2023-06-26 16:32:31 +02:00
Sebastian
11a0c5a766 simplyfy 2023-06-25 23:02:42 +02:00
Sebastian
e33c22e61c simplyfy 2023-06-25 22:58:43 +02:00
Sebastian
473d9738de lets define the network 2023-06-25 22:48:12 +02:00
Sebastian
0e009632ff lets define the ports 2023-06-25 22:46:55 +02:00
Sebastian
d870e0a7f8 of course it's called mainnet 2023-06-25 22:44:14 +02:00
Sebastian
72e3f18531 forgot a dash 2023-06-25 22:34:24 +02:00
Sebastian
d3a91e951d maybe the right config for bedrock 2023-06-25 22:32:50 +02:00
Sebastian
2ab9331b1f can mainet be wrong? 2023-06-25 22:17:56 +02:00
Sebastian
f631dca7ab found the typo 2023-06-25 22:14:55 +02:00
Sebastian
87571b6099 add some debug logic 2023-06-25 22:12:35 +02:00
Sebastian
e3bbd2b4d2 another try 2023-06-25 22:05:24 +02:00
Sebastian
55b56670ae make something executable 2023-06-25 21:58:58 +02:00
Sebastian
c5eeb2c136 missing something 2023-06-25 21:58:25 +02:00
Sebastian
69ad058c39 try to unstuck it after bedrock 2 2023-06-25 21:56:45 +02:00
Sebastian
5a4e908c2f try to unstuck it after bedrock 2023-06-25 21:54:35 +02:00
Sebastian
ff74f89bbc update dshackle version 2023-06-15 15:56:30 +02:00
Sebastian
54b2ab3d12 it's not snapshot anymore 2023-05-17 05:30:39 +02:00
Sebastian
2cb0ccebfb update dshackle 2023-05-08 18:37:07 +02:00
Sebastian
e6816215f5 revert 2023-05-05 08:23:22 +02:00
Sebastian
1aeae9f9d8 change goerli data volume 2023-05-02 17:47:53 +02:00
Sebastian
18ecd2efbd nofile limit 2023-05-02 08:07:10 +02:00
Sebastian
d2ff66e529 raise nofile 2023-05-02 08:02:24 +02:00
Sebastian
852cb95ca6 raise nofile 2023-05-02 07:09:12 +02:00
Sebastian
319964a0b9 revert 2023-04-27 11:20:21 +02:00
Sebastian
e23c563dc1 prepare polygon erigon to grow 2023-04-27 09:53:23 +02:00
Sebastian
32391d5069 upp the gaslimit on erigon 2023-04-25 10:39:30 +02:00
Sebastian
c45031f24b update avalanche 2023-04-20 06:53:42 +02:00
Sebastian
d224282d73 fix the command line 2023-04-19 08:41:47 +02:00
Sebastian
8570011973 add gascap for blast 2023-04-18 10:52:05 +02:00
Sebastian
d5b26e80b5 update lighthouse too 2023-04-12 10:12:52 +02:00
Sebastian
7898e5691a update version for hardfork 2023-04-11 11:25:19 +02:00
Sebastian
f93474073a map optimism websocket on a different port than http 2023-04-11 11:04:58 +02:00
Sebastian
831acfa75c fix ports again 2023-04-11 07:25:16 +02:00
Sebastian
3465d8b944 fix port for optimism archive node 2023-04-11 07:15:16 +02:00
Sebastian
b22c3d41d2 sync optimism from l2 2023-04-07 13:07:42 +02:00
Sebastian
a2bf1d1870 update drpc and allow erigon polygon to return large results 2023-04-05 07:35:14 +02:00
Sebastian
a83755b6e3 bump version and raise limit for response sizes 2023-04-03 13:26:23 +02:00
Sebastian
753b3ba30b fix the filename 2023-04-01 11:36:41 +02:00
Sebastian
158dbc8235 release the fantom 2023-04-01 11:32:04 +02:00
Sebastian
fa0c77004b fix the fantom script 2023-04-01 11:24:12 +02:00
Sebastian
52dfafb7f5 run as root 2023-03-30 08:52:34 +02:00
Sebastian
d2ac90a484 escape the globs 2023-03-30 08:49:41 +02:00
Sebastian
1e18e4e052 fix typo 2023-03-30 08:21:38 +02:00
Sebastian
09e3ecde8e remove any formatting form the startup command 2023-03-30 08:20:27 +02:00
Sebastian
3f970cef35 remove the line breaks 2023-03-30 08:13:19 +02:00
Sebastian
e060eeca87 remove the = from erigon startup command 2023-03-30 08:06:43 +02:00
Sebastian
b64344d306 update polygon erigon to use default for snapshots (false) 2023-03-30 07:46:29 +02:00
Sebastian
380d95e7a5 update polygon erigon 2023-03-30 07:20:29 +02:00
Sebastian
74d61b95a3 update 2023-03-26 16:43:56 +02:00
Sebastian
151c767b35 upgrade 2023-03-24 15:34:05 +01:00
Sebastian
2b4f41c4eb update lighthouse 2023-03-24 05:05:38 +01:00
Sebastian
542ea0d46e version upgrade 2023-03-24 02:26:53 +01:00
Sebastian
ab3ac54566 version upgrade 2023-03-24 02:24:49 +01:00
Sebastian
8e1e8460a5 reactivate arbitrum with an update once more 2023-03-18 02:51:40 +01:00
Sebastian
0b87a3e429 ubgrade bc dshackle doesnt know arbitrum anymore 2023-03-17 10:27:26 +01:00
Sebastian
e361e60f93 downgrade (2 days late) 2023-03-16 02:30:50 +01:00
Sebastian
676971bfa9 open websocket port 2023-03-10 04:43:56 +01:00
Sebastian
0df8656abd update erigon AGAIN but better 2023-03-10 02:27:12 +01:00
Sebastian
dbb51b6f40 update erigon AGAIN 2023-03-10 02:23:23 +01:00
Sebastian
87db0f1b7c bump nethermind for goerli hardfork 2023-03-08 02:33:33 +01:00
Sebastian
c22b8b7e01 bump goerli 2023-03-08 02:29:45 +01:00
Sebastian
810843a592 erigon version downgrade 2023-03-07 10:01:59 +01:00
Sebastian
d264f843ac erigon version upgrade 2023-03-07 04:29:55 +01:00
Sebastian
b4f72adad4 this is fantom archive 2023-03-04 13:18:43 +01:00
Sebastian
95cfb4e62c this is fantom archive 2023-03-04 13:17:04 +01:00
Sebastian
18a4e27315 bump dshackle 2023-03-02 00:21:20 +01:00
Sebastian
bebec472d3 add drpc to the mix 2023-02-26 10:07:06 +01:00
Sebastian
885cd2945e add drpc to the mix 2023-02-26 09:20:09 +01:00
Sebastian
10c3a8e6e6 add drpc to the mix 2023-02-26 09:14:57 +01:00
Sebastian
9973ac5dbd add drpc to the mix 2023-02-26 09:10:29 +01:00
Sebastian
43b2d916ae erigon update 2023-02-03 07:14:11 +01:00
Sebastian
90bd2338bf update the erigon version 2023-02-03 07:07:54 +01:00
Sebastian
7b770239cd raise calldata limit 2023-02-03 07:02:21 +01:00
Sebastian
4cf497314b update nethermind 2023-02-03 06:35:41 +01:00
Sebastian
3ce59b84ff traefik for celo 2023-01-29 17:18:40 +01:00
Sebastian
8c893cd195 fix rpc 2023-01-29 07:17:47 +01:00
Sebastian
8ac4fd47e1 patch the patch 2023-01-28 17:19:20 +01:00
Sebastian
4aee4737a9 network for monitoring only 2023-01-27 15:09:06 +01:00
Sebastian
e86aa844c5 revert metrics 2023-01-26 13:14:53 +01:00
Sebastian
2528fb58fb debugging 2023-01-26 13:13:10 +01:00
Sebastian
b47b03a945 add the metrics and trafeik rules to optimism 2023-01-26 13:03:33 +01:00
Sebastian
52cc47a3b4 oops metrics got cutoff 2023-01-26 11:26:38 +01:00
Sebastian
1d29e1ef73 expose the ports 2023-01-26 11:12:27 +01:00
Sebastian
93310f3aba integreate the changes from the live machine 2023-01-26 10:52:58 +01:00
Sebastian
526bb3d006 add metrics 2023-01-26 10:46:56 +01:00
Sebastian
faf41b2e2c update erigon version 2023-01-18 11:46:53 +01:00
Sebastian
1eca59ad15 fixes 2023-01-17 12:20:39 +01:00
Sebastian
e8741ef0da add polygon link 2023-01-16 19:22:07 +01:00
Sebastian
d8b2f0bfa2 add volume 2023-01-16 19:19:21 +01:00
Sebastian
9b31fa08f2 fix identation 2023-01-16 19:16:29 +01:00
Sebastian
bffb6242df initial commit 2023-01-16 19:12:03 +01:00
Sebastian
3671aa635f easy 2023-01-15 20:07:27 +01:00
Sebastian
6f762782e8 finish up 2023-01-15 20:05:31 +01:00
Sebastian
e16fa85294 finish up 2023-01-15 20:03:15 +01:00
Sebastian
a8e22c635f rock 2023-01-15 19:52:50 +01:00
Sebastian
da0b5f1673 jo know 2023-01-15 19:48:14 +01:00
Sebastian
8fdfa5dc77 indentation rulez 2023-01-15 19:18:17 +01:00
Sebastian
25e93d9a27 please me more with avax 2023-01-15 19:16:12 +01:00
Sebastian
28d7accdf5 easy 2023-01-15 19:02:59 +01:00
Sebastian
c4d2d958f9 easy 2023-01-15 19:00:21 +01:00
Sebastian
ef2d87e7f8 plug 2023-01-15 18:57:32 +01:00
Sebastian
030d67afb5 plug 2023-01-15 18:56:31 +01:00
Sebastian
4fea049c2d plug 2023-01-15 18:54:37 +01:00
Sebastian
5252dc47bb plug 2023-01-15 18:52:01 +01:00
Sebastian
ba8e95775d plug 2023-01-15 18:51:21 +01:00
Sebastian
afde8777b1 plug 2023-01-15 18:50:48 +01:00
Sebastian
2b5bf8c307 plug 2023-01-15 18:48:02 +01:00
Sebastian
002bf6dab5 easier 2023-01-15 18:46:31 +01:00
Sebastian
d9d1195552 fix 2023-01-15 18:43:37 +01:00
Sebastian
4b01ff192b fix 2023-01-15 18:42:58 +01:00
Sebastian
6d5cf07f3a link section 2023-01-15 18:39:46 +01:00
Sebastian
24a7539bcc link section 2023-01-15 18:35:21 +01:00
Sebastian
b11596c20d better format and stuff 2023-01-15 18:34:47 +01:00
Sebastian
5baffa724f motivationgit add docs/howto-arbitrum-archive.md ! 2023-01-15 18:18:04 +01:00
Sebastian
f60fa60d0d time estimate 2023-01-15 18:16:26 +01:00
Sebastian
143a2f10ae make it more fun 2023-01-15 18:14:16 +01:00
Sebastian
bde2220783 fix identation 2023-01-15 17:51:52 +01:00
Sebastian
0df4b867fa fix identation 2023-01-15 17:34:05 +01:00
Sebastian
c1e61042a4 fix identation 2023-01-15 17:33:15 +01:00
Sebastian
53ca285d5a fix identation 2023-01-15 17:29:41 +01:00
Sebastian
7e48b287a0 fix identation 2023-01-15 17:23:44 +01:00
Sebastian
4f383b8009 open udp for heimdall 2023-01-15 12:17:26 +01:00
Sebastian
00295ba8ae replace checkpoint sync endpoint 2023-01-15 11:36:14 +01:00
Sebastian
751bb532e5 init the arb classic node with a snapshot 2023-01-15 09:31:01 +01:00
Sebastian
a2758c9ff1 prepare classic snapshot download 2023-01-15 09:17:57 +01:00
Sebastian
fe2deea1b0 prepare classic snapshot download 2023-01-15 09:15:44 +01:00
Sebastian
cbe2b1c903 prepare classic snapshot download 2023-01-15 09:14:39 +01:00
Sebastian
d8fbd33ec4 update erigon 2023-01-14 14:03:17 +01:00
Sebastian
8a4a9dd56e nitro needs to be initialized with a snapshot 2023-01-14 13:53:49 +01:00
Sebastian
0ef3dfa1f3 polygon version update 2023-01-14 13:22:17 +01:00
Sebastian
4aa326cdec version update for nethermind 2023-01-14 10:24:12 +01:00
Sebastian
cc49b98e8f fix the title 2023-01-09 18:50:24 +01:00
Sebastian
98d25c91b4 donno 2023-01-09 18:49:22 +01:00
Sebastian
7fbc5e1567 fix mount point for classic 2023-01-09 18:48:59 +01:00
Sebastian
9bafbe7c3b change sync duration estimae and mount point for classic node data dir and indentation 2023-01-09 18:48:25 +01:00
Sebastian
2cc27e2c3f add arbitrum guide 2023-01-09 18:36:32 +01:00
Sebastian
aaf4a31edc add optimism guide 2023-01-09 17:41:21 +01:00
Sebastian
12d62ac932 fix typo 2023-01-09 17:12:32 +01:00
Sebastian
24ea8cdbdb add guide for celo 2023-01-09 16:59:40 +01:00
Sebastian
1fe1ebd142 finish 2023-01-09 12:47:42 +01:00
Sebastian
7201d3fb4e finish 2023-01-09 12:45:04 +01:00
Sebastian
916dc87412 finish 2023-01-09 12:44:09 +01:00
Sebastian
bc5cb84b7d no italic 2023-01-09 12:43:10 +01:00
Sebastian
163774c8fa add debugging notes 2023-01-09 12:41:47 +01:00
Sebastian
34807b3b6a make guides for free 2023-01-09 12:31:35 +01:00
Sebastian
cabe11b8bb name clashes 2023-01-07 20:58:29 +01:00
Sebastian
a716b82931 fix volume and version 2023-01-07 14:24:07 +01:00
Sebastian
ee66c24e6e fix volume and version 2023-01-07 14:22:09 +01:00
Sebastian
fafc0fa051 update nethermind with traces 2023-01-07 12:48:51 +01:00
Sebastian
213d972132 fix params 2023-01-07 12:23:31 +01:00
Sebastian
1fba148e1a fix params 2023-01-07 12:20:06 +01:00
Sebastian
a297ef6f7a fix params 2023-01-07 12:09:12 +01:00
Sebastian
4741d6cb39 fix ports 2023-01-07 12:05:11 +01:00
Sebastian
bafbacf33e fix volume 2023-01-07 12:01:49 +01:00
Sebastian
1ba6073bcf add classic gateway to arbitrum 2023-01-07 11:59:44 +01:00
Sebastian
b2b12a3b39 nothing happened 2023-01-06 16:26:28 +01:00
Sebastian
f9e6ab6990 fix da erigon 2023-01-06 16:18:07 +01:00
Sebastian
dd585b8afb celo fix 2023-01-05 17:20:44 +01:00
Sebastian
60912c1ab7 added celo archive 2023-01-05 17:16:20 +01:00
Sebastian
94b654ec8b make them executable 2023-01-05 17:03:11 +01:00
Sebastian
3ed5dba101 fix permissions 2023-01-05 16:59:33 +01:00
Sebastian
a52023aa5c fix path 2023-01-05 16:48:00 +01:00
Sebastian
6d8324ded8 add optimism 2023-01-05 16:45:31 +01:00
Sebastian
c8552100f8 fix permissions 2023-01-05 16:07:12 +01:00
Sebastian
80ecafdb0b fix permissions 2023-01-05 15:54:48 +01:00
Sebastian
c220f5cb84 add arbitrum 2023-01-05 15:51:34 +01:00
Sebastian
a58c45afcb update dat erigon config 2023-01-05 15:16:00 +01:00
Sebastian
baf5f3cdd8 version upgrade avalanche 2023-01-05 15:15:24 +01:00
Sebastian
e31b548633 add avalanche archive 2023-01-05 15:14:45 +01:00
Sebastian
c1c0e54c21 remove ws.port flag as the polygon build doesn't support it yet 2022-12-18 12:16:04 +01:00
Sebastian
d5313803dd update the polygon erigon setup to be able to use public rpcs and the new official supported version 2022-12-18 12:02:30 +01:00
Sebastian
f0ee4c3038 set receipts and bodies download to all blocks 2022-12-15 20:28:17 +01:00
Sebastian
f2f6b83979 use official docker image for goerli 2022-12-12 17:41:13 +01:00
Sebastian
a30706db18 fix nethermind docker image verison and set snap sync to true 2022-12-12 13:53:13 +01:00
Sebastian
426232b477 correct startup command 2022-12-10 18:04:20 +01:00
Sebastian
cc3d064256 remove heimdallr 2022-12-10 17:50:34 +01:00
Sebastian
f7126a4620 try new erigon verison for bor 2022-12-10 17:47:22 +01:00
Sebastian
ee43c5990a fix the routing - again 2022-12-10 14:36:32 +01:00
Sebastian
3a3815564d fix checkpoint sync endpoint 2022-12-10 14:02:24 +01:00
Sebastian
5cc1583360 fixed traefik route 2022-12-10 13:55:06 +01:00
Sebastian
145b92b4e0 fixed udp discovery port 2022-12-10 13:37:48 +01:00
Sebastian
8624205ac3 raise the peers limit for faster sync 2022-12-10 13:31:56 +01:00
Sebastian
5e532f3fdb add nethermind goerli 2022-12-10 13:16:52 +01:00
Sebastian
7354b17b08 disable snap sync 2022-12-09 20:45:43 +01:00
Sebastian
6bd35eae88 point lighthouse to the right network 2022-12-09 18:07:17 +01:00
Sebastian
14fbeb86ee fix a comma 2022-12-09 18:03:52 +01:00
Sebastian
5d5a1c8146 fixes 2022-12-09 18:02:38 +01:00
Sebastian
82cb2bdbde add nethermind mainnet configuration 2022-12-09 17:23:39 +01:00
Sebastian
b82c3d8415 rework the erigon mainnet setup 2022-12-05 09:48:49 +01:00
Sebastian
632f9e9157 rework the erigon mainnet setup 2022-12-05 09:43:56 +01:00
Sebastian
e0d684a014 updates 2022-12-04 15:05:08 +01:00
Sebastian
fd1bb7ddff update polygon archive 2022-12-04 14:04:04 +01:00
Sebastian
7611d4583c apply updates 2022-12-03 17:05:51 +01:00
cventastic
63c0c63427 migrate pokt-de-1 2022-11-28 08:58:07 +01:00
cventastic
60b12ef154 migrate pokt-de-1 2022-11-28 08:57:00 +01:00
cventastic
16970cd980 migrate pokt-de-1 2022-11-28 08:55:20 +01:00
cventastic
58e7b7a014 forgot to pull newest version 2022-11-28 08:05:02 +01:00
cventastic
7b654f5523 forgot to pull newest version 2022-11-28 08:03:17 +01:00
cventastic
fcd1b669c2 forgot to pull newest version 2022-11-27 09:42:08 +01:00
cventastic
24bd04fee2 documented parameter doesnt exist xD 2022-11-27 09:38:15 +01:00
cventastic
68c716b7a9 documented parameter doesnt exist xD 2022-11-27 09:37:41 +01:00
cventastic
6451e6bab8 add 2022-11-27 09:32:34 +01:00
cventastic
894e464af8 add chains network to xdai-archive.yml 2022-11-19 10:36:57 +01:00
cventastic
bac89dcd18 new image tag 2022-11-17 15:59:23 +01:00
cventastic
5c68b48267 change author history 2022-11-17 13:19:23 +01:00
cventastic
12625e82b0 README.md 2022-11-08 21:57:44 +01:00
cventastic
d5049475cd non sustainable sync speed with vmtrace and statediif 2022-11-08 21:47:21 +01:00
cventastic
e1edc72d4e non sustainable sync speed with vmtrace and statediif 2022-11-08 21:29:11 +01:00
cventastic
8e5368e211 non sustainable sync speed with vmtrace and statediif 2022-11-08 21:26:38 +01:00
cventastic
8063a81d4d comment pokt-lt-3.
tight storage
2022-11-06 13:20:12 +01:00
cventastic
564d265a91 comment pokt-lt-3.
tight storage
2022-11-06 13:20:00 +01:00
cventastic
fce36dc430 migrate 2022-11-06 13:13:38 +01:00
cventastic
91dcdc706b migrate 2022-11-06 13:12:54 +01:00
cventastic
e9195f2c1a less peers, speed not good 2022-11-05 12:27:10 +01:00
cventastic
ee1707bf0f add more peers for speed? 2022-11-04 22:44:59 +01:00
cventastic
c9ae74831e add more peers for speed? 2022-11-04 21:12:52 +01:00
cventastic
b81e637fcf edit 2022-11-04 20:12:39 +01:00
cventastic
4d4b9060c6 new seeds 2022-11-04 10:31:06 +01:00
cventastic
03ce92bfc1 meh 2022-11-03 09:32:57 +01:00
cventastic
b2de047014 meh 2022-11-03 09:31:21 +01:00
cventastic
2b6e1020aa meh 2022-11-03 09:30:14 +01:00
cventastic
208fcfbf53 xdai-archive.yml
tracestore flags not working via environment
2022-11-03 09:29:18 +01:00
cventastic
4f282a205a xdai-archive.yml
try new trace flags per environment
2022-11-03 09:18:24 +01:00
cventastic
8f5a05c6a9 close ports 🤦 2022-11-01 11:48:30 +01:00
cventastic
015670c2ce new image in hopes of fixing trace call performance 2022-11-01 10:55:02 +01:00
cventastic
ff8791e6fc renamed 2022-11-01 10:40:00 +01:00
cventastic
e4056bc0c3 not only readme but really add xdai-archive.yml 2022-11-01 10:34:10 +01:00
cventastic
c7656c6fd8 add xdai archive 2022-11-01 10:33:37 +01:00
cventastic
a6bc330a97 new pokt version 2022-10-27 02:02:55 +02:00
cventastic
e8267ff402 added new POKT version.
removed pocket-de.yml because its possible to use pocket-frontend.yml on hosts with one pocket container
2022-10-26 15:33:34 +02:00
cventastic
078b88cd16 added torrent download/upload limits 2022-10-26 14:59:04 +02:00
cventastic
1b228c0c35 correct image tag 2022-10-24 09:47:03 +02:00
cventastic
6db5e213eb specify goerli label 2022-10-24 09:45:05 +02:00
cventastic
6011dac0ce add --reconstruct-historic-states 2022-10-24 09:44:00 +02:00
cventastic
edb8d2bfc5 older version cause open issues for new one 2022-10-21 21:10:35 +02:00
cventastic
e33d3a5d85 path? 2022-10-21 21:05:37 +02:00
cventastic
c894badf77 lighthouse prater 2022-10-21 20:55:23 +02:00
cventastic
971e5ee6f9 add new version 2022-10-21 20:34:21 +02:00
cventastic
6211f15994 add new version 2022-10-21 20:33:22 +02:00
cventastic
eba55b0098 add erigon-goerli.yml 2022-10-21 20:08:39 +02:00
cventastic
97f3bb4644 add erigon-goerli.yml 2022-10-21 20:07:55 +02:00
cventastic
ac25454915 fix bootstrap folders 2022-10-20 10:28:34 +02:00
cventastic
a59dbb66dc add torrent.upload/download.rate to erigon in entrypoint.sh 2022-10-19 14:39:11 +02:00
cventastic
80cc6ce977 fix traefik flags for erigon-polygon.yml 2022-10-19 14:22:40 +02:00
cventastic
5e3263dec3 added flags to erigon in entrypoint.sh
rename volume in erigon-polygon.yml
2022-10-19 14:16:05 +02:00
cventastic
aab7c74474 Merge branch 'main' of github.com:cventastic/POKT_DOKT 2022-10-19 12:50:58 +02:00
cventastic
115916a9f9 added libgcc and libstdc++ to polygon-erigon alpine container 2022-10-19 12:38:04 +02:00
cventastic
9cf3c5b71e replace bor with erigon for archive 2022-10-19 12:21:22 +02:00
czarly
a12453aa1d fix 2022-10-19 14:02:31 +04:00
czarly
adfff835d1 fix 2022-10-19 13:47:55 +04:00
czarly
4fdba5f39a fix 2022-10-19 13:41:34 +04:00
czarly
5a89240a61 fix 2022-10-19 13:33:49 +04:00
czarly
4b40259c93 fix 2022-10-19 13:33:02 +04:00
czarly
79e58187f1 route dshackle via traefik and add prometheus scraping 2022-10-19 13:29:55 +04:00
czarly
cca28c35a7 added dshackle confgured with free node providers for ethereum 2022-10-19 13:20:24 +04:00
cventastic
7163d7e630 extend heimdall bootnodes/seeds 2022-10-17 17:45:04 +02:00
cventastic
81f70d04c2 remove testnets goerli.yml, rinkeby.yml, ropsten.yml 2022-10-17 12:03:37 +02:00
cventastic
7f5b7fbec6 new snapshot urls for bootstrapping polygon 2022-10-17 11:58:35 +02:00
cventastic
2f4a4011f8 pocket-fr.yml doesnt exist anymore, its now pokt-lt-2 2022-10-06 09:57:33 +02:00
cventastic
0553baaac8 new pocket release candidate added to pokt/Dockerfile 2022-10-06 09:46:39 +02:00
cventastic
4a8b3a4cc1 revert ulimit for rpc-base.yml and erigon.yml 2022-10-05 13:46:47 +02:00
cventastic
31cda87d29 ulimit for rpc-base.yml and erigon.yml 2022-10-05 09:37:40 +02:00
cventastic
eaae1ef854 revert:
ulimit and loglevel again
2022-10-04 11:59:50 +02:00
cventastic
e91ef6a623 ulimit and loglevel. to test side-by-side. 2022-10-04 11:50:28 +02:00
cventastic
5073b5543c no noticeable impact on performance by traefik log settings. revert to DEBUG 2022-10-04 11:15:48 +02:00
cventastic
ef6c33b3b4 test traefik loglevel impact on performance 2022-10-04 11:07:55 +02:00
cventastic
44d6000048 no performance gain 2022-10-03 16:34:48 +02:00
cventastic
a8a60fb947 test ulimit for concurrent connections 2022-10-03 16:31:53 +02:00
cventastic
badd46c679 update datasources.yml with new rpcs 2022-09-28 17:46:05 +02:00
cventastic
d7d4acbb58 update pokt-metrics for consolidated nodes 2022-09-28 17:31:04 +02:00
cventastic
80d7e588b6 add TODO 2022-09-27 11:16:42 +02:00
cventastic
7a37728c8b cleanup 2022-09-27 10:18:26 +02:00
cventastic
16d1a1abce move pokt-fr-1 to pokt-lt-2 2022-09-26 14:22:02 +02:00
cventastic
066b7be393 forgot volumes 2022-09-26 11:17:49 +02:00
cventastic
8c4e29d98b remove redundant nodes, stake weighting 2022-09-26 11:17:23 +02:00
cventastic
63beaf9258 cleanup 2022-09-26 00:23:10 +02:00
cventastic
9e0b47a9fc util scripts redundant 2022-09-26 00:12:51 +02:00
cventastic
2ab055e7a8 add port 2022-09-25 23:58:20 +02:00
cventastic
d30accaa19 add whitelist service 2022-09-25 23:54:04 +02:00
cventastic
48fdbf0aac add protokoll? 2022-09-12 18:07:35 +02:00
cventastic
989d2c06c8 add protokoll 2022-09-12 18:07:16 +02:00
cventastic
04d4dd003f add port? 2022-09-12 18:06:27 +02:00
cventastic
be497cfcaf accept TOS 2022-09-12 17:59:10 +02:00
cventastic
3dbdefe715 add prysm.yml 2022-09-12 17:57:32 +02:00
cventastic
303dcdca99 add grace period 2022-09-12 17:31:05 +02:00
cventastic
cfb4490a6d forgot comma 2022-09-12 17:21:54 +02:00
cventastic
d5e5288bd2 update geth for merge 2022-09-12 17:20:33 +02:00
cventastic
1b0de278ae update chain dash 2022-09-08 15:32:21 +02:00
cventastic
0fbcaf1c7e naming of dashboards 2022-09-08 15:23:39 +02:00
cventastic
0ac21e3c4e add erigon dash 2022-09-08 14:48:59 +02:00
cventastic
b693c45ddd default grace period to aggressive with 10 sec 2022-09-08 01:09:50 +02:00
cventastic
460478c6a2 forgot metric ports 2022-09-07 23:51:59 +02:00
cventastic
0136227b4c forgot metric ports 2022-09-07 23:16:13 +02:00
cventastic
f6d85e4014 add monitoring network 2022-09-07 23:08:31 +02:00
cventastic
22c91bf68c add monitoring network 2022-09-07 22:29:48 +02:00
cventastic
a50380028f bump monitoring.yml 2022-09-06 17:15:44 +02:00
cventastic
1c8dd77738 bump pocket version 2022-09-06 16:36:39 +02:00
cventastic
ffd67953f7 bump erigon to 2022.08.03 2022-09-06 13:22:17 +02:00
cventastic
8a483e88fa no storage 2022-09-05 10:25:00 +02:00
cventastic
529a5f00f4 wrong address format 2022-09-05 10:18:26 +02:00
cventastic
e66fe77265 wrong port 2022-09-05 10:17:23 +02:00
cventastic
ae3dc849e4 added beaconchain api and traefik to lighthouse 2022-09-05 10:16:58 +02:00
cventastic
fabe1aa013 remove quotation 2022-08-26 15:12:02 +02:00
cventastic
0ccb87cb94 connect lighthouse 2022-08-26 15:11:08 +02:00
cventastic
ea12105a03 set correct release tag 2022-08-26 14:52:14 +02:00
cventastic
76b214e5bf rework erigon for merge 2022-08-26 14:49:15 +02:00
cventastic
1a9e41eca3 bump to new pokt version 2022-08-15 10:23:24 +02:00
cventastic
1f5d23a1a9 remove tendermind port... metrics also get exposed over 8083 2022-07-06 20:56:26 +02:00
cventastic
9554eaa726 add prometheus to pocket-frontend.yml 2022-07-04 12:06:20 +02:00
cventastic
fae11760d2 add all chains to dash 2022-07-01 17:15:53 +02:00
cventastic
417f01da21 remove datasource 2022-07-01 14:49:24 +02:00
cventastic
b6f1f4f2ab remove datasource 2022-07-01 14:48:05 +02:00
cventastic
7c81a895d5 forgot prometheus labels 2022-07-01 14:37:06 +02:00
cventastic
ba0674fcb1 modularize pocket-lt.yml file 2022-07-01 14:34:07 +02:00
cventastic
6abd3efb95 add autodiscover to pocket-lt.yml 2022-07-01 14:31:03 +02:00
cventastic
5d7e8809e3 remove duplicate 2022-07-01 14:23:49 +02:00
cventastic
499dcdd4c4 depends_on has to be array 2022-07-01 14:21:58 +02:00
cventastic
be27baa9e5 add depends on, otherwise network gets lost 2022-07-01 14:20:09 +02:00
cventastic
7410de9dcf conflicting parameters expose/network_mode 2022-07-01 14:18:08 +02:00
cventastic
b966320f77 conflicting parameters expose/network_mode 2022-06-28 12:26:32 +02:00
cventastic
dbdb6e963c conflicting parameters port/network_mode 2022-06-28 12:25:39 +02:00
cventastic
7235941152 put promehteus into wireguard network 2022-06-28 12:24:03 +02:00
cventastic
2d7342c91a added new format for scraping target, comma seperated ports 2022-06-28 11:12:04 +02:00
cventastic
e3b2558333 added forked prometheus-docker-sd 2022-06-28 11:07:51 +02:00
cventastic
b625adc085 format still doesnt work 2022-06-23 14:36:51 +02:00
cventastic
8a6cd18038 format doesnt work 2022-06-23 14:33:02 +02:00
cventastic
1b1913d335 overhaul monitoring 2022-06-23 14:30:17 +02:00
cventastic
a825b27ccd "latest" tag actually was "stable"-tag. with old db schema. 2022-06-19 14:47:18 +02:00
cventastic
8665068892 thought the i built from source. turns out i left image argument inside. latest=devel. 2022-06-19 12:28:33 +02:00
cventastic
b4a68effbc update to "stable" which still uses old db-format. not going to be useful for the merge 2022-06-17 11:58:46 +02:00
cventastic
0567c540ce update to 2022.03.02 first 2022-06-17 10:26:28 +02:00
cventastic
a24a94df2b reeeewind. must update to previous versions first for db update. 2022-06-17 10:05:04 +02:00
cventastic
7108f5521b new version for gray glacier fork 2022-06-17 10:00:54 +02:00
cventastic
ca8ec01603 new version for bsc fork 2022-06-16 14:02:09 +02:00
cventastic
9a0e0e7e6c grey glacier fork 2022-06-16 10:49:27 +02:00
cventastic
5244b20f21 addrbook volume for pocket-frontend.yml 2022-06-13 14:35:11 +02:00
cventastic
b108e7ad9a add pocket-de.yml 2022-06-13 13:55:53 +02:00
cventastic
2242491f83 pocket-lt.yml make addressbook persistent but not the rest of the config files 2022-06-13 13:02:25 +02:00
cventastic
6206fed2f8 add volume for addrbook 2022-06-13 11:50:31 +02:00
cventastic
1f9087aee2 put addrbook in its own folder, so it can be persisted in volume 2022-06-13 11:48:56 +02:00
cventastic
20059b3ebf put addrbook in its own folder, so it can be persisted in volume 2022-06-13 11:46:20 +02:00
cventastic
e70a7d78ab rewind 2022-06-10 19:03:14 +02:00
cventastic
f757bb53d9 update volume to bind mount 2022-06-10 18:49:35 +02:00
cventastic
d4439b4cd4 update chains.json, use local pokt-rpc 2022-06-10 17:57:09 +02:00
cventastic
6e0abad03c expose port 2022-06-02 16:23:08 +02:00
cventastic
8647d4467d label missing 2022-06-02 16:22:06 +02:00
cventastic
593d610461 update port because its occupied on the interims server 2022-06-02 16:20:57 +02:00
cventastic
569f5c3f9b path to dockerfile 2022-06-02 16:11:16 +02:00
cventastic
355c998d60 list -> array... 2022-06-02 16:10:20 +02:00
cventastic
52aa024568 network -> networks 2022-06-02 16:09:01 +02:00
cventastic
5b024c9a36 added erigon-bsc 2022-06-02 16:07:11 +02:00
cventastic
d06f8949bf add new chain and add persistent config-dir for pocket 2022-05-19 13:58:11 +02:00
cventastic
0f6ad2bca8 add new chain and add persistent config-dir for pocket 2022-05-19 13:55:26 +02:00
cventastic
aed1577472 add fantom to chains network 2022-05-16 10:25:48 +02:00
cventastic
805e283687 account for new config params iavl_cache_size, chains_hot_reload 2022-05-16 07:31:00 +02:00
cventastic
97eb058ecf fix tar syntax for new snapshot format 2022-05-11 15:55:53 +02:00
cventastic
9aea8d8c8a hmm wrong syntax? 2022-05-11 15:47:03 +02:00
cventastic
e5ae6029c2 snapshots are now .gz 2022-05-11 15:43:49 +02:00
cventastic
822f21eea8 cant use different docker-compose file versions 2022-05-11 11:13:32 +02:00
cventastic
fea3390ef3 remove redundancy 2022-05-11 11:07:23 +02:00
cventastic
839da8855c balance right port 2022-05-11 10:59:36 +02:00
cventastic
10bb09a8cc Merge branch 'main' of github.com:cventastic/POKT_DOKT 2022-05-11 10:40:06 +02:00
cventastic
b09445ffae RPC Port is 18545 2022-05-11 10:39:47 +02:00
cventastic
8ac4b09771 added fantom 2022-05-11 10:34:34 +02:00
Sebastian
3d2f33309a added solana to chains list 2022-05-03 13:10:20 +02:00
cventastic
27862bf4c9 haproxy healthcheck permissions 2022-04-27 22:17:42 +02:00
cventastic
e847bfa9fc add pokt-lt 2022-04-26 11:39:12 +02:00
cventastic
8055376766 remove owner/group to bootstrap.sh
add it to dockerfile
2022-04-26 11:09:57 +02:00
cventastic
515be43181 add owner/group to bootstrap.sh 2022-04-26 10:57:05 +02:00
cventastic
ae1597fb53 add haproxy to pokt-fr 2022-04-26 09:48:02 +02:00
cventastic
d174d77c0d add pokt-fr 2022-04-26 09:42:52 +02:00
cventastic
30bedc0c15 add it to the actual command, not the echo... 2022-04-25 13:10:58 +02:00
cventastic
d2782e3433 add unzip flag to tar because of new snapshot format 2022-04-25 13:05:09 +02:00
cventastic
38c14d7d9b remove old chains_mainnet.json
remove old pokt bootstrap skript
added correct chains.json
2022-04-25 12:01:41 +02:00
cventastic
e825e4fa37 only mount chaindir 2022-04-25 11:45:38 +02:00
cventastic
dc7998aa74 change mount path 2022-04-25 09:54:06 +02:00
cventastic
431b9430e2 remove commands that went into Dockerfile 2022-04-25 09:38:28 +02:00
cventastic
34a209dee9 remove files that get copied in Dockerfile 2022-04-25 09:29:17 +02:00
cventastic
1a0b185def use RC-0.8.2 2022-04-25 09:20:23 +02:00
cventastic
aae8cc43f9 add pokt-test for RC 0.8.2 2022-04-25 09:17:00 +02:00
cventastic
8775a70590 hardfork xdai 2022-04-20 17:56:42 +02:00
cventastic
33705263e3 restart geth-mainnet unless stopped 2022-04-20 10:22:55 +02:00
cventastic
a34dc194a1 removed ipwhitelist middleware from avalanche.yml 2022-04-19 19:26:34 +02:00
cventastic
5d377d0e51 added network to haproxy 2022-04-19 17:33:30 +02:00
cventastic
b01b21f271 added portforwarding to localhost for haproxy 2022-04-19 16:44:29 +02:00
cventastic
fe28278076 added permission to healthcheck 2022-04-19 16:24:10 +02:00
cventastic
f95d277640 fix traefik labels 2022-04-19 16:22:43 +02:00
cventastic
770e777e15 add polygon healthcheck 2022-04-19 16:15:02 +02:00
Sebastian
41848d2dc1 fix the docker 2022-04-19 15:10:01 +02:00
Sebastian
f722a9e4ca jo 2022-04-19 14:54:33 +02:00
Sebastian
c646654d65 empty chains config 2022-04-19 14:48:29 +02:00
Sebastian
2310f4bb02 make init script executable 2022-04-19 14:41:37 +02:00
Sebastian
9d3c09e499 add missing init script 2022-04-19 14:41:01 +02:00
Sebastian
160da9ebea added network 2022-04-19 13:44:06 +02:00
Sebastian
cf2a242872 updates 2022-04-19 13:42:31 +02:00
Sebastian
4c18e0e983 add a proxy only 2022-04-19 13:28:36 +02:00
cventastic
05de20ee72 added tweaks for polygon-fullnode.yml 2022-04-04 22:41:10 +02:00
cventastic
74a9f8ac7f bootscipt also runs as pokt-user 2022-04-04 15:32:59 +02:00
cventastic
8e55c751f8 oops 2 2022-04-01 14:10:23 +02:00
cventastic
c046563bef oops 2022-03-31 16:14:47 +02:00
cventastic
dd0e402511 fixed traefik label 2022-03-31 13:50:45 +02:00
cventastic
9de6badecc removed eth-api parameter eth 2022-03-29 22:02:25 +02:00
czarly
cc02077bf4 robust snapshot download 2022-03-25 04:22:14 +04:00
czarly
115c4fd30f robust snapshot download 2022-03-25 04:20:24 +04:00
czarly
b24601f47a robust snapshot download 2022-03-25 04:16:49 +04:00
czarly
297d2be210 update snapshot url and make archive node bootstrapping 2022-03-24 22:27:47 +04:00
cventastic
5f36d3920b forgot to add file... 2022-03-22 18:07:28 +01:00
cventastic
b80cd5f40d added tls options/dynamic config file 2022-03-22 18:06:42 +01:00
cventastic
26a0c276f3 one tab too much 2022-03-22 12:11:48 +01:00
cventastic
fae0461504 polygon/heimdall update 2022-03-22 12:05:52 +01:00
cventastic
9673c5223b add metrics autodiscovery for harmony 2022-03-17 13:20:02 +01:00
cventastic
faa4c41b89 added fuse metrics and autodiscovery 2022-03-17 13:04:50 +01:00
cventastic
00b8453faf added fuse metrics 2022-03-17 13:03:47 +01:00
cventastic
7f3824b5f9 added avalanche metrics 2022-03-17 12:02:29 +01:00
cventastic
7ab717dc91 forgot geth-mainnet labels 2022-03-16 16:01:59 +01:00
cventastic
e76bf13833 added all geth clients for prometheus autodiscovery 2022-03-16 15:59:27 +01:00
cventastic
633227fb49 added correct geth metrics path 2022-03-16 15:53:34 +01:00
cventastic
f89e002d9a added metrics ropsten 2022-03-16 14:40:58 +01:00
cventastic
7c082289f9 added metrics ropsten 2022-03-16 14:38:17 +01:00
cventastic
d24d7b45a3 remove prometheus.yml and remove nodeexporter/cadvisor from autodiscover, because they too should be autodiscovered. 2022-03-16 14:33:25 +01:00
cventastic
33e2908492 put prometheus into chains network... 2022-03-16 14:19:59 +01:00
cventastic
b10221e068 remove internal network...
cant reach prometheus for queries this way
2022-03-16 14:17:21 +01:00
cventastic
766caca32d fix typo 2022-03-16 14:06:02 +01:00
cventastic
892c5eb1e6 prom autodiscovery 2022-03-16 14:01:50 +01:00
cventastic
7f1cb0474e removed new line 2022-03-16 12:41:17 +01:00
cventastic
a28269caee added newline 2022-03-16 12:13:52 +01:00
czarly
2bf10202ae more fix pokt 2022-03-16 13:25:55 +04:00
czarly
629a72f068 more fix pokt 2022-03-16 13:24:55 +04:00
czarly
8713748aad more fix pokt 2022-03-16 13:23:42 +04:00
czarly
fe828e0645 more fix pokt 2022-03-16 13:16:06 +04:00
czarly
93afe6ccae more fix pokt 2022-03-16 13:14:28 +04:00
czarly
a77d152f8d final fix pokt 2022-03-16 13:11:16 +04:00
czarly
d620891dff more pokt node fixes 2022-03-16 13:04:07 +04:00
czarly
4a5ef8721e more pokt node fixes 2022-03-16 13:00:36 +04:00
czarly
2f48795651 more pokt node fixes 2022-03-16 12:57:37 +04:00
czarly
623c458776 move pokt config inside the container 2022-03-16 12:46:47 +04:00
czarly
224326487c remove some files 2022-03-15 17:56:31 +04:00
czarly
b935d3e71f fix xdai rpc 2022-03-15 17:49:41 +04:00
czarly
459e253686 missing Dockerfile 2022-03-15 16:27:43 +04:00
czarly
7a07b719a8 first try of a frontend config 2022-03-15 15:51:58 +04:00
czarly
5c06b75b12 move the ip whitelist to the traefik container 2022-03-15 14:35:57 +04:00
czarly
165a7426d1 tag the image versions for more stability 2022-03-14 20:09:57 +04:00
czarly
147d398067 proper path to avalanche 2022-03-14 20:09:33 +04:00
czarly
bf8653ae6c fix polygon heimdall snapshot 2022-03-14 18:58:25 +04:00
czarly
7ca937a745 fix polygon archive data volume 2022-03-14 18:54:01 +04:00
czarly
785aac52c5 fix polygon archive data volume 2022-03-14 18:52:45 +04:00
czarly
c70dc42833 interims fix 2022-03-14 18:32:03 +04:00
czarly
e18d3bd2be interims fix 2022-03-14 18:27:36 +04:00
czarly
52adfce138 fix 2022-03-14 18:16:34 +04:00
czarly
b5dc667a1c fix 2022-03-14 18:15:41 +04:00
czarly
8a7fa9ef6f added geth 2022-03-14 18:14:52 +04:00
czarly
11ac3d9f0a fix 2022-03-14 18:07:56 +04:00
czarly
a66da89fdb fix 2022-03-14 18:06:17 +04:00
czarly
1d03732438 fix 2022-03-14 18:01:31 +04:00
czarly
70b1ccb458 fix the monitoring to come up 2022-03-14 17:59:30 +04:00
czarly
b119d8da26 typo 2022-03-14 17:55:03 +04:00
czarly
6ddc7d5926 split up the big files 2022-03-14 17:49:20 +04:00
754 changed files with 584118 additions and 291 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
.env

363
README.md
View File

@@ -1,20 +1,353 @@
Place val.key and val.pass into ./harmony/keyfiles
# Blockchain Node Configurations
```
docker-compose up -d
```
Check if Bootstrap is working.
```
docker logs -f pokt_dokt_harmony-mainnet_1
```
Logs are at:
```
harmony/log
This directory contains Docker Compose configurations for various blockchain networks and node implementations.
## Directory Structure
- Root level YAML files (e.g. `ethereum-mainnet.yml`, `arbitrum-one.yml`) - Main Docker Compose configurations for specific networks
- Network-specific subdirectories - Contain additional configurations, genesis files, and client-specific implementations
- Utility scripts (e.g. `show-networks.sh`, `logs.sh`) - Helper scripts for managing and monitoring nodes
## Node Types
This repository supports multiple node types for various blockchain networks:
- **Ethereum networks**: Mainnet, Sepolia, Holesky
- **Layer 2 networks**: Arbitrum, Optimism, Base, Scroll, ZKSync Era, etc.
- **Alternative L1 networks**: Avalanche, BSC, Fantom, Polygon, etc.
Most networks have both archive and pruned node configurations available, with support for different client implementations (Geth, Erigon, Reth, etc.).
## Quick Start
1. Create a `.env` file in this directory (see example below)
2. Select which node configurations you want to run by adding them to the `COMPOSE_FILE` variable
3. Run `docker compose up -d`
4. Access your RPC endpoints at `https://yourdomain.tld/path` or `http://localhost:port`
### Example .env File
```bash
# Domain settings
DOMAIN=203-0-113-42.traefik.me # Use your PUBLIC IP with dots replaced by hyphens
MAIL=your-email@example.com # Required for Let's Encrypt SSL
WHITELIST=0.0.0.0/0 # IP whitelist for access (0.0.0.0/0 allows all)
# Public IP (required for many chains)
IP=203.0.113.42 # Your PUBLIC IP (get it with: curl ipinfo.io/ip)
# Network settings
CHAINS_SUBNET=192.168.0.0/26
# RPC provider endpoints (fallback/bootstrap nodes)
ETHEREUM_MAINNET_EXECUTION_RPC=https://ethereum-rpc.publicnode.com
ETHEREUM_MAINNET_EXECUTION_WS=wss://ethereum-rpc.publicnode.com
ETHEREUM_MAINNET_BEACON_REST=https://ethereum-beacon-api.publicnode.com
ETHEREUM_SEPOLIA_EXECUTION_RPC=https://ethereum-sepolia-rpc.publicnode.com
ETHEREUM_SEPOLIA_EXECUTION_WS=wss://ethereum-sepolia-rpc.publicnode.com
ETHEREUM_SEPOLIA_BEACON_REST=https://ethereum-sepolia-beacon-api.publicnode.com
ARBITRUM_SEPOLIA_EXECUTION_RPC=https://arbitrum-sepolia-rpc.publicnode.com
ARBITRUM_SEPOLIA_EXECUTION_WS=wss://arbitrum-sepolia-rpc.publicnode.com
# SSL settings (set NO_SSL=true to disable SSL)
# NO_SSL=true
# Docker Compose configuration
# Always include base.yml and rpc.yml, then add the networks you want
COMPOSE_FILE=base.yml:rpc.yml:ethereum-mainnet.yml
```
HARMONY QUERY
```
curl --location --request POST 'https://$DOMAIN/' --header 'Content-Type: application/json' --data-raw '{"jsonrpc": "2.0", "method": "hmy_latestHeader", "params": [], "id": 1}'
## Usage
To start nodes defined in your `.env` file:
```bash
docker compose up -d
```
[HMY CLI Flags](https://docs.harmony.one/home/network/validators/node-setup/installing-updating/installing-node/using-binary#option-2-setup-using-flag-parsing)
### Ports
The default ports are defined in the templates. They are randomised to avoid conflicts. Some configurations can require 7 ports to be opened for P2P discovery. Docker will override any UFW firewall rule that you define on the host. You should prevent the containers to try to reach out to other nodes on local IP ranges.
You can use the following service definition as a starting point. Replace the {{ chains_subnet }} with the subnet of your network. Default is 192.168.0.0/26.
```
[Unit]
Description= iptables firewall docker fix
After=docker.service
[Service]
ExecStart=/usr/local/bin/iptables-firewall.sh start
RemainAfterExit=true
StandardOutput=journal
[Install]
WantedBy=multi-user.target
```
```bash
#!/bin/bash
PATH="/sbin:/usr/sbin:/bin:/usr/bin"
# Flush existing rules in the DOCKER-USER chain
# this is potentially dangerous if other scripts write in that chain too but for now this should be the only one
iptables -F DOCKER-USER
# block heise.de to test it's working. ./ping.sh heise.de will ping from a container in the subnet.
iptables -I DOCKER-USER -s {{ chains_subnet }} -d 193.99.144.80/32 -j REJECT
# block local networks
iptables -I DOCKER-USER -s {{ chains_subnet }} -d 192.168.0.0/16 -j REJECT
iptables -I DOCKER-USER -s {{ chains_subnet }} -d 172.16.0.0/12 -j REJECT
iptables -I DOCKER-USER -s {{ chains_subnet }} -d 10.0.0.0/8 -j REJECT
# accept the subnet so containers can reach each other.
iptables -I DOCKER-USER -s {{ chains_subnet }} -d {{ chains_subnet }} -j ACCEPT
# I don't know why that is
iptables -I DOCKER-USER -s {{ chains_subnet }} -d 10.13.13.0/24 -j ACCEPT
```
### Node Structure
In general Nodes can have one or all of the following components:
- a client (execution layer)
- a node (for consensus)
- a relay (for data availability access)
- a database (external to the client mostly zk rollups, can have mulitple databases)
- a proxy (to map http access and websockets to the same endpoint)
The simplest examples have only a client. The compose files define one entrypoint to query the node. usually it's the client otherwise it's the proxy. some clients have multiple entrypoints because they allow to query the consensus layer and the execution layer.
In the root folder of this repository you can find convenience yml files which are symlinks to specific compose files. The naming for the symlinks follow the principle {network_name}-{chain_name}.yml which leaves the client and bd type unspecified so they are defaults.
### Syncing
The configurations aim to work standalone restoring state as much as possible from public sources. Using snapshots can help syncing faster. For some configurations it's not reasonably possible to maintain a version that can be bootstrapped from scratch using only the compose file.
### Naming conventions
- default client is the default client for the network. Usually it's geth or op-geth.
- default sync mode is pruned. If available clients are snap synced.
- default node is op-node or prysm or whatever is the default for the network (e.g. beacon-kit for berachain, goat for goat, etc.)
- default sync mode for nodes is pruned
- default client for archive nodes is (op-)erigon or (op-)reth
- default sync mode for (op-)reth and (op-)erigon is archive-trace.
- default sync mode for erigon3 is pruned-trace.
- default db is postgres
- default proxy is nginx
#### Node features
The idea is to assume a default node configuration that is able to drive the execution client. In case the beacon node database has special features then the file name would include the features after a double hyphen. e.g. `ethereum-mainnet-geth-pruned-pebble-hash--lighthouse-pruned-blobs.yml` would be a node that has a pruned execution client and a pruned beacon node database with a complete blob history.
#### Container names
The docker containers are generally named using the base name and the component suffix. The base name is generally the network name and the chain name and the sync mode archive in case of archive nodes. The rationale is that it doesn't make sense to run 2 pruned nodes for the same chain on the same machine as well as 2 archive nodes for the same chain. The volumes that are created in /var/lib/docker/volumes are using the full name of the node including the sync mode and database features. This is to allow switching out the implementation of parts of the configuration and not causing conflicts, e.g. exchanging prysm for nimbus as node implementation but keep using the same exection client. Environment variables are also using the full name of the component that they are defined for.
## Utility Scripts
This directory includes several useful scripts to help you manage and monitor your nodes:
### Status and Monitoring
- `show-status.sh [config-name]` - Check sync status of all configured nodes (or specific config if provided)
- `show-db-size.sh` - Display disk usage of all Docker volumes, sorted by size
- `show-networks.sh` - List all available network configurations
- `show-running.sh` - List currently running containers
- `sync-status.sh <config-name>` - Check synchronization status of a specific configuration
- `logs.sh <config-name>` - View logs of all containers for a specific configuration
- `latest.sh <config-name>` - Get the latest block number and hash of a local node
- `ping.sh <container-name>` - Test connectivity to a container from inside the Docker network
### Node Management
- `stop.sh <config-name>` - Stop all containers for a specific configuration
- `force-recreate.sh <config-name>` - Force recreate all containers for a specific configuration
- `backup-node.sh <config-name> [webdav_url]` - Backup Docker volumes for a configuration (locally or to WebDAV)
- `restore-volumes.sh <config-name> [http_url]` - Restore Docker volumes from backup (local or HTTP source)
- `cleanup-backups.sh` - Clean up old backup files
- `list-backups.sh` - List available backup files
- `op-wheel.sh` - Tool for Optimism rollup maintenance, including rewinding to a specific block
Note: `<config-name>` refers to the compose file name without the .yml extension (e.g., `ethereum-mainnet` for ethereum-mainnet.yml)
#### Nuclear option to recreate a node
```bash
./stop.sh <config-name> && ./rm.sh <config-name> && ./delete-volumes.sh <config-name> && ./force-recreate.sh <config-name> && ./logs.sh <config-name>
```
#### Debugging tips
To get the configuration name for one of the commands use `./show-status.sh` which lists all the configrations and their status to copy paste for further inspection with e.g. `./catchup.sh <config-name>` or repeated use of `./latest.sh <config-name>` which will give you and idea if the sync is actually progressing and if it is on the canonical chain.
Note: some configurations use staged sync which means that there is no measurable progress on the RPC in between bacthes of processed blocks. In any case `./logs.sh <config-name>` will give you insights into problems, potentially filtered by a LLM to spot common errors. It could be that clients are syncing slower than the chain progresses.
#### Further automation
You can chain `./success-if-almost-synced.sh <config-name> <age-of-last-block-in-seconds-to-be-considered-almost-synced>` with other scripts to create more complex automation, e.g. notify you once a node synced up to chainhead or adding the node to the dshackle configuration or taking a backup to clone the node to a different server.
#### OP Wheel Usage Example
Be aware that this is dangerous because you skip every check for your rollups op-geth execution client database to be consistent.
```bash
# Rewind an Optimism rollup to a specific block
./op-wheel.sh engine set-forkchoice --unsafe=0x111AC7F --safe=0x111AC7F --finalized=0x111AC7F \
--engine=http://op-lisk-sepolia:8551/ --engine.open=http://op-lisk-sepolia:8545 \
--engine.jwt-secret-path=/jwtsecret
```
Nuclear option:
```bash
# Finalize the latest locally available block of an Optimism rollup
./op-wheel-finalize-latest-block.sh <client_service_name> (<node_service_name>)
```
Where `<client_service_name>` is the name of the client service in the compose file and `<node_service_name>` is the name of the node service in the compose file which defaults to `<client_service_name>-node`.
## SSL Certificates and IP Configuration
### Public IP Configuration
Many blockchain nodes require your public IP address to function properly:
1. Get your public IP address:
```bash
curl ipinfo.io/ip
```
2. Add this IP to your `.env` file:
```bash
IP=203.0.113.42 # Your actual public IP
```
3. This IP is used by several chains for P2P discovery and network communication
### SSL Certificates with Traefik
This system uses Traefik as a reverse proxy for SSL certificates:
1. By default, certificates are obtained from Let's Encrypt
2. Use your **public** IP address with traefik.me by replacing dots with hyphens
```
# If your public IP is 203.0.113.42
DOMAIN=203-0-113-42.traefik.me
```
3. Traefik.me automatically generates valid SSL certificates for this domain
4. For production, use your own domain and set MAIL for Let's Encrypt notifications
5. To disable SSL, set `NO_SSL=true` in your .env file
## Configuration
Each network configuration includes:
- Node client software (Geth, Erigon, etc.)
- Synchronization type (archive or pruned)
- Database backend and configuration
- Network-specific parameters
## Accessing RPC Endpoints
Once your nodes are running, you can access the RPC endpoints at:
- HTTPS: `https://yourdomain.tld/ethereum` (or other network paths)
- HTTP: `http://yourdomain.tld/ethereum` (or other network paths)
- WebSocket: `wss://yourdomain.tld/ethereum` (same URL as HTTP/HTTPS)
All services use standard ports (80 for HTTP, 443 for HTTPS), so no port specification is required in the URL.
## Resource Requirements
Different node types have different hardware requirements:
- Pruned Ethereum node: ~500GB disk, 8GB RAM
- Archive Ethereum node: ~2TB disk, 16GB RAM
- L2 nodes typically require less resources than L1 nodes
- Consider using SSD or NVMe storage for better performance
## DRPC Integration
This system includes support for DRPC (Decentralized RPC) integration, allowing you to monetize your RPC nodes by selling excess capacity:
### Setting Up DRPC
1. Add `drpc.yml` to your `COMPOSE_FILE` variable in `.env`
2. Configure the following variables in your `.env` file:
```
GW_DOMAIN=your-gateway-domain.com
GW_REDIS_RAM=2gb # Memory allocation for Redis
DRPC_VERSION=0.64.16 # Or latest version
```
3. Generate the upstream configurations for dshackle:
```bash
# Using domain URLs (default)
./upstreams.sh
```
The `upstreams.sh` script automatically detects all running nodes on your machine and generates the appropriate configuration for the dshackle load balancer. This allows you to connect your nodes to drpc.org and sell RPC capacity.
For more information about DRPC, visit [drpc.org](https://drpc.org/).
## Supported Networks
This repository supports a comprehensive range of blockchain networks:
### Layer 1 Networks
- **Major Networks**: Ethereum (Mainnet, Sepolia, Holesky), BSC, Polygon, Avalanche, Gnosis
- **Alternative L1s**: Fantom, Core, Berachain, Ronin, Viction, Fuse, Tron, ThunderCore
- **Emerging L1s**: Goat, AlephZero, Haqq, Taiko, Rootstock
### Layer 2 Networks
- **OP Stack**: Optimism, Base, Zora, Mode, Blast, Fraxtal, Bob, Boba, Worldchain, Metal, Ink, Lisk, SNAX, Celo
- **Arbitrum Ecosystem**: Arbitrum One, Arbitrum Nova, Everclear, Playblock, Real, Connext, OpenCampusCodex
- **Other L2s**: Linea, Scroll, zkSync Era, Metis, Moonbeam
Most networks support multiple node implementations (Geth, Erigon, Reth) and environments (mainnet, testnet).
## Backup and Restore System
This repository includes a comprehensive backup and restore system for Docker volumes:
### Local Backups
- `backup-node.sh <config-name>` - Create a backup of all volumes for a configuration to the `/backup` directory
- `restore-volumes.sh <config-name>` - Restore volumes from the latest backup in the `/backup` directory
### Remote Backups
To serve backups via HTTP and WebDAV:
1. Add `backup-http.yml` to your `COMPOSE_FILE` variable in `.env`
2. This exposes:
- HTTP access to backups at `https://yourdomain.tld/backup`
- WebDAV access at `https://yourdomain.tld/dav`
### Cross-Server Backup and Restore
For multi-server setups:
1. On server A: Include `backup-http.yml` in `COMPOSE_FILE` to serve backups
2. On server B: Use restore from server A's backups:
```bash
# Restore directly from server A
./restore-volumes.sh ethereum-mainnet https://serverA.domain.tld/backup/
```
3. Create backups on server B and send to server A via WebDAV:
```bash
# Backup to server A's WebDAV
./backup-node.sh ethereum-mainnet https://serverA.domain.tld/dav
```
This allows for efficient volume transfers between servers without needing SSH access.

1
abstract-mainnet.yml Symbolic link
View File

@@ -0,0 +1 @@
abstract/external-node/abstract-mainnet-external-node-pruned.yml

1
abstract-testnet.yml Symbolic link
View File

@@ -0,0 +1 @@
abstract/external-node/abstract-testnet-external-node-pruned.yml

View File

@@ -0,0 +1,172 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:abstract/external-node/abstract-mainnet-external-node-archive.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/abstract-mainnet-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
abstract-mainnet-archive-client:
image: ${ABSTRACT_EXTERNAL_NODE_IMAGE:-matterlabs/external-node}:${ABSTRACT_MAINNET_EXTERNAL_NODE_VERSION:-v27.5.7}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
ports:
- 12612:12612
- 12612:12612/udp
expose:
- 8545
- 8546
environment:
- DATABASE_POOL_SIZE=50
- DATABASE_URL=postgres://postgres:notsecurepassword@abstract-mainnet-archive-db:5430/zksync_local_ext_node
- EN_API_NAMESAPCES=eth,net,web3,debug,pubsub,debug,zks
- EN_ETH_CLIENT_URL=${ETHEREUM_MAINNET_EXECUTION_RPC}
- EN_HEALTHCHECK_PORT=3081
- EN_HTTP_PORT=8545
- EN_L1_CHAIN_ID=1
- EN_L2_CHAIN_ID=2741
- EN_MAIN_NODE_URL=https://api.mainnet.abs.xyz
- EN_MAX_RESPONSE_BODY_SIZE_MB=30
- EN_MAX_RESPONSE_BODY_SIZE_OVERRIDES_MB=eth_getLogs=100,eth_getBlockReceipts=None
- EN_MERKLE_TREE_PATH=./db/ext-node/lightweight
- EN_PROMETHEUS_PORT=3322
- EN_PRUNING_ENABLED=false
- EN_REQ_ENTITIES_LIMIT=100000
- EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL=raas-abstract-mainnet-external-node-snapshots
- EN_SNAPSHOTS_OBJECT_STORE_MODE=GCSAnonymousReadOnly
- EN_SNAPSHOTS_RECOVERY_ENABLED=true
- EN_STATE_CACHE_PATH=./db/ext-node/state_keeper
- EN_WS_PORT=8546
- RUST_LOG=warn,zksync=info,zksync_core::metadata_calculator=debug,zksync_state=info,zksync_utils=info,zksync_web3_decl::client=error
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ABSTRACT_MAINNET_EXTERNAL_NODE_ARCHIVE_DATA:-abstract-mainnet-external-node-archive}:/db
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
abstract-mainnet-archive:
image: nginx
expose:
- '80'
environment:
PROXY_HOST: abstract-mainnet-archive-client
RPC_PATH: ''
RPC_PORT: 8545
WS_PATH: ''
WS_PORT: 8546
restart: unless-stopped
depends_on:
- abstract-mainnet-archive-client
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.abstract-mainnet-external-node-archive-stripprefix.stripprefix.prefixes=/abstract-mainnet-archive
- traefik.http.services.abstract-mainnet-external-node-archive.loadbalancer.server.port=80
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-archive.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-archive.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-archive.rule=Host(`$DOMAIN`) && (Path(`/abstract-mainnet-archive`) || Path(`/abstract-mainnet-archive/`))}
- ${NO_SSL:+traefik.http.routers.abstract-mainnet-external-node-archive.rule=Path(`/abstract-mainnet-archive`) || Path(`/abstract-mainnet-archive/`)}
- traefik.http.routers.abstract-mainnet-external-node-archive.middlewares=abstract-mainnet-external-node-archive-stripprefix, ipallowlist
abstract-mainnet-archive-db:
image: postgres:14
expose:
- 5430
environment:
- PGPORT=5430
- POSTGRES_PASSWORD=notsecurepassword
command: >
postgres
-c max_connections=200
-c log_error_verbosity=terse
-c shared_buffers=2GB
-c effective_cache_size=4GB
-c maintenance_work_mem=1GB
-c checkpoint_completion_target=0.9
-c random_page_cost=1.1
-c effective_io_concurrency=200
-c min_wal_size=4GB
-c max_wal_size=16GB
-c max_worker_processes=16
-c checkpoint_timeout=1800
networks:
- chains
volumes:
- ${ABSTRACT_MAINNET_EXTERNAL_NODE_ARCHIVE__DB_DATA:-abstract-mainnet-external-node-archive_db}:/var/lib/postgresql/data
healthcheck:
interval: 1s
timeout: 3s
test: [CMD-SHELL, psql -U postgres -c "select exists (select * from pg_stat_activity where datname = '' and application_name = 'pg_restore')" | grep -e ".f$$"]
logging: *logging-defaults
volumes:
abstract-mainnet-external-node-archive:
abstract-mainnet-external-node-archive_db:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: abstract
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -0,0 +1,172 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:abstract/external-node/abstract-mainnet-external-node-pruned.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/abstract-mainnet \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
abstract-mainnet-client:
image: ${ABSTRACT_EXTERNAL_NODE_IMAGE:-matterlabs/external-node}:${ABSTRACT_MAINNET_EXTERNAL_NODE_VERSION:-v27.5.7}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
ports:
- 14370:14370
- 14370:14370/udp
expose:
- 8545
- 8546
environment:
- DATABASE_POOL_SIZE=50
- DATABASE_URL=postgres://postgres:notsecurepassword@abstract-mainnet-db:5430/zksync_local_ext_node
- EN_API_NAMESAPCES=eth,net,web3,debug,pubsub,debug,zks
- EN_ETH_CLIENT_URL=${ETHEREUM_MAINNET_EXECUTION_RPC}
- EN_HEALTHCHECK_PORT=3081
- EN_HTTP_PORT=8545
- EN_L1_CHAIN_ID=1
- EN_L2_CHAIN_ID=2741
- EN_MAIN_NODE_URL=https://api.mainnet.abs.xyz
- EN_MAX_RESPONSE_BODY_SIZE_MB=30
- EN_MAX_RESPONSE_BODY_SIZE_OVERRIDES_MB=eth_getLogs=100,eth_getBlockReceipts=None
- EN_MERKLE_TREE_PATH=./db/ext-node/lightweight
- EN_PROMETHEUS_PORT=3322
- EN_PRUNING_ENABLED=true
- EN_REQ_ENTITIES_LIMIT=100000
- EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL=raas-abstract-mainnet-external-node-snapshots
- EN_SNAPSHOTS_OBJECT_STORE_MODE=GCSAnonymousReadOnly
- EN_SNAPSHOTS_RECOVERY_ENABLED=true
- EN_STATE_CACHE_PATH=./db/ext-node/state_keeper
- EN_WS_PORT=8546
- RUST_LOG=warn,zksync=info,zksync_core::metadata_calculator=debug,zksync_state=info,zksync_utils=info,zksync_web3_decl::client=error
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ABSTRACT_MAINNET_EXTERNAL_NODE_PRUNED_DATA:-abstract-mainnet-external-node-pruned}:/db
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
abstract-mainnet:
image: nginx
expose:
- '80'
environment:
PROXY_HOST: abstract-mainnet-client
RPC_PATH: ''
RPC_PORT: 8545
WS_PATH: ''
WS_PORT: 8546
restart: unless-stopped
depends_on:
- abstract-mainnet-client
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.abstract-mainnet-external-node-pruned-stripprefix.stripprefix.prefixes=/abstract-mainnet
- traefik.http.services.abstract-mainnet-external-node-pruned.loadbalancer.server.port=80
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-pruned.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-pruned.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.abstract-mainnet-external-node-pruned.rule=Host(`$DOMAIN`) && (Path(`/abstract-mainnet`) || Path(`/abstract-mainnet/`))}
- ${NO_SSL:+traefik.http.routers.abstract-mainnet-external-node-pruned.rule=Path(`/abstract-mainnet`) || Path(`/abstract-mainnet/`)}
- traefik.http.routers.abstract-mainnet-external-node-pruned.middlewares=abstract-mainnet-external-node-pruned-stripprefix, ipallowlist
abstract-mainnet-db:
image: postgres:14
expose:
- 5430
environment:
- PGPORT=5430
- POSTGRES_PASSWORD=notsecurepassword
command: >
postgres
-c max_connections=200
-c log_error_verbosity=terse
-c shared_buffers=2GB
-c effective_cache_size=4GB
-c maintenance_work_mem=1GB
-c checkpoint_completion_target=0.9
-c random_page_cost=1.1
-c effective_io_concurrency=200
-c min_wal_size=4GB
-c max_wal_size=16GB
-c max_worker_processes=16
-c checkpoint_timeout=1800
networks:
- chains
volumes:
- ${ABSTRACT_MAINNET_EXTERNAL_NODE_PRUNED__DB_DATA:-abstract-mainnet-external-node-pruned_db}:/var/lib/postgresql/data
healthcheck:
interval: 1s
timeout: 3s
test: [CMD-SHELL, psql -U postgres -c "select exists (select * from pg_stat_activity where datname = '' and application_name = 'pg_restore')" | grep -e ".f$$"]
logging: *logging-defaults
volumes:
abstract-mainnet-external-node-pruned:
abstract-mainnet-external-node-pruned_db:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: abstract
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -0,0 +1,172 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:abstract/external-node/abstract-testnet-external-node-archive.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/abstract-testnet-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
abstract-testnet-archive-client:
image: ${ABSTRACT_EXTERNAL_NODE_IMAGE:-matterlabs/external-node}:${ABSTRACT_TESTNET_EXTERNAL_NODE_VERSION:-v28.2.1}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
ports:
- 14028:14028
- 14028:14028/udp
expose:
- 8545
- 8546
environment:
- DATABASE_POOL_SIZE=50
- DATABASE_URL=postgres://postgres:notsecurepassword@abstract-testnet-archive-db:5430/zksync_local_ext_node
- EN_API_NAMESAPCES=eth,net,web3,debug,pubsub,debug,zks
- EN_ETH_CLIENT_URL=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- EN_HEALTHCHECK_PORT=3081
- EN_HTTP_PORT=8545
- EN_L1_CHAIN_ID=11155111
- EN_L2_CHAIN_ID=11124
- EN_MAIN_NODE_URL=https://api.testnet.abs.xyz
- EN_MAX_RESPONSE_BODY_SIZE_MB=30
- EN_MAX_RESPONSE_BODY_SIZE_OVERRIDES_MB=eth_getLogs=100,eth_getBlockReceipts=None
- EN_MERKLE_TREE_PATH=./db/ext-node/lightweight
- EN_PROMETHEUS_PORT=3322
- EN_PRUNING_ENABLED=false
- EN_REQ_ENTITIES_LIMIT=100000
- EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL=abstract-testnet-external-node-snapshots
- EN_SNAPSHOTS_OBJECT_STORE_MODE=GCSAnonymousReadOnly
- EN_SNAPSHOTS_RECOVERY_ENABLED=true
- EN_STATE_CACHE_PATH=./db/ext-node/state_keeper
- EN_WS_PORT=8546
- RUST_LOG=warn,zksync=info,zksync_core::metadata_calculator=debug,zksync_state=info,zksync_utils=info,zksync_web3_decl::client=error
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ABSTRACT_TESTNET_EXTERNAL_NODE_ARCHIVE_DATA:-abstract-testnet-external-node-archive}:/db
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
abstract-testnet-archive:
image: nginx
expose:
- '80'
environment:
PROXY_HOST: abstract-testnet-archive-client
RPC_PATH: ''
RPC_PORT: 8545
WS_PATH: ''
WS_PORT: 8546
restart: unless-stopped
depends_on:
- abstract-testnet-archive-client
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.abstract-testnet-external-node-archive-stripprefix.stripprefix.prefixes=/abstract-testnet-archive
- traefik.http.services.abstract-testnet-external-node-archive.loadbalancer.server.port=80
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-archive.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-archive.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-archive.rule=Host(`$DOMAIN`) && (Path(`/abstract-testnet-archive`) || Path(`/abstract-testnet-archive/`))}
- ${NO_SSL:+traefik.http.routers.abstract-testnet-external-node-archive.rule=Path(`/abstract-testnet-archive`) || Path(`/abstract-testnet-archive/`)}
- traefik.http.routers.abstract-testnet-external-node-archive.middlewares=abstract-testnet-external-node-archive-stripprefix, ipallowlist
abstract-testnet-archive-db:
image: postgres:14
expose:
- 5430
environment:
- PGPORT=5430
- POSTGRES_PASSWORD=notsecurepassword
command: >
postgres
-c max_connections=200
-c log_error_verbosity=terse
-c shared_buffers=2GB
-c effective_cache_size=4GB
-c maintenance_work_mem=1GB
-c checkpoint_completion_target=0.9
-c random_page_cost=1.1
-c effective_io_concurrency=200
-c min_wal_size=4GB
-c max_wal_size=16GB
-c max_worker_processes=16
-c checkpoint_timeout=1800
networks:
- chains
volumes:
- ${ABSTRACT_TESTNET_EXTERNAL_NODE_ARCHIVE__DB_DATA:-abstract-testnet-external-node-archive_db}:/var/lib/postgresql/data
healthcheck:
interval: 1s
timeout: 3s
test: [CMD-SHELL, psql -U postgres -c "select exists (select * from pg_stat_activity where datname = '' and application_name = 'pg_restore')" | grep -e ".f$$"]
logging: *logging-defaults
volumes:
abstract-testnet-external-node-archive:
abstract-testnet-external-node-archive_db:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: abstract-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -0,0 +1,172 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:abstract/external-node/abstract-testnet-external-node-pruned.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/abstract-testnet \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
abstract-testnet-client:
image: ${ABSTRACT_EXTERNAL_NODE_IMAGE:-matterlabs/external-node}:${ABSTRACT_TESTNET_EXTERNAL_NODE_VERSION:-v28.2.1}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
ports:
- 12157:12157
- 12157:12157/udp
expose:
- 8545
- 8546
environment:
- DATABASE_POOL_SIZE=50
- DATABASE_URL=postgres://postgres:notsecurepassword@abstract-testnet-db:5430/zksync_local_ext_node
- EN_API_NAMESAPCES=eth,net,web3,debug,pubsub,debug,zks
- EN_ETH_CLIENT_URL=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- EN_HEALTHCHECK_PORT=3081
- EN_HTTP_PORT=8545
- EN_L1_CHAIN_ID=11155111
- EN_L2_CHAIN_ID=11124
- EN_MAIN_NODE_URL=https://api.testnet.abs.xyz
- EN_MAX_RESPONSE_BODY_SIZE_MB=30
- EN_MAX_RESPONSE_BODY_SIZE_OVERRIDES_MB=eth_getLogs=100,eth_getBlockReceipts=None
- EN_MERKLE_TREE_PATH=./db/ext-node/lightweight
- EN_PROMETHEUS_PORT=3322
- EN_PRUNING_ENABLED=true
- EN_REQ_ENTITIES_LIMIT=100000
- EN_SNAPSHOTS_OBJECT_STORE_BUCKET_BASE_URL=abstract-testnet-external-node-snapshots
- EN_SNAPSHOTS_OBJECT_STORE_MODE=GCSAnonymousReadOnly
- EN_SNAPSHOTS_RECOVERY_ENABLED=true
- EN_STATE_CACHE_PATH=./db/ext-node/state_keeper
- EN_WS_PORT=8546
- RUST_LOG=warn,zksync=info,zksync_core::metadata_calculator=debug,zksync_state=info,zksync_utils=info,zksync_web3_decl::client=error
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ABSTRACT_TESTNET_EXTERNAL_NODE_PRUNED_DATA:-abstract-testnet-external-node-pruned}:/db
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
abstract-testnet:
image: nginx
expose:
- '80'
environment:
PROXY_HOST: abstract-testnet-client
RPC_PATH: ''
RPC_PORT: 8545
WS_PATH: ''
WS_PORT: 8546
restart: unless-stopped
depends_on:
- abstract-testnet-client
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.abstract-testnet-external-node-pruned-stripprefix.stripprefix.prefixes=/abstract-testnet
- traefik.http.services.abstract-testnet-external-node-pruned.loadbalancer.server.port=80
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-pruned.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-pruned.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.abstract-testnet-external-node-pruned.rule=Host(`$DOMAIN`) && (Path(`/abstract-testnet`) || Path(`/abstract-testnet/`))}
- ${NO_SSL:+traefik.http.routers.abstract-testnet-external-node-pruned.rule=Path(`/abstract-testnet`) || Path(`/abstract-testnet/`)}
- traefik.http.routers.abstract-testnet-external-node-pruned.middlewares=abstract-testnet-external-node-pruned-stripprefix, ipallowlist
abstract-testnet-db:
image: postgres:14
expose:
- 5430
environment:
- PGPORT=5430
- POSTGRES_PASSWORD=notsecurepassword
command: >
postgres
-c max_connections=200
-c log_error_verbosity=terse
-c shared_buffers=2GB
-c effective_cache_size=4GB
-c maintenance_work_mem=1GB
-c checkpoint_completion_target=0.9
-c random_page_cost=1.1
-c effective_io_concurrency=200
-c min_wal_size=4GB
-c max_wal_size=16GB
-c max_worker_processes=16
-c checkpoint_timeout=1800
networks:
- chains
volumes:
- ${ABSTRACT_TESTNET_EXTERNAL_NODE_PRUNED__DB_DATA:-abstract-testnet-external-node-pruned_db}:/var/lib/postgresql/data
healthcheck:
interval: 1s
timeout: 3s
test: [CMD-SHELL, psql -U postgres -c "select exists (select * from pg_stat_activity where datname = '' and application_name = 'pg_restore')" | grep -e ".f$$"]
logging: *logging-defaults
volumes:
abstract-testnet-external-node-pruned:
abstract-testnet-external-node-pruned_db:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: abstract-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -0,0 +1 @@
arb/nitro/alephzero-mainnet-nitro-archive-pebble-hash.yml

1
alephzero-mainnet.yml Symbolic link
View File

@@ -0,0 +1 @@
arb/nitro/alephzero-mainnet-nitro-pruned-pebble-path.yml

View File

@@ -0,0 +1 @@
arb/nitro/alephzero-sepolia-nitro-archive-leveldb-hash.yml

1
alephzero-sepolia.yml Symbolic link
View File

@@ -0,0 +1 @@
arb/nitro/alephzero-sepolia-nitro-pruned-pebble-path.yml

View File

@@ -0,0 +1,5 @@
{
"chain": {
"info-json": "[{\"chain-id\":41455,\"parent-chain-id\":1,\"parent-chain-is-arbitrum\":false,\"chain-config\":{\"homesteadBlock\":0,\"daoForkBlock\":null,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"berlinBlock\":0,\"londonBlock\":0,\"clique\":{\"period\":0,\"epoch\":0},\"arbitrum\":{\"EnableArbOS\":true,\"AllowDebugPrecompiles\":false,\"DataAvailabilityCommittee\":true,\"InitialArbOSVersion\":20,\"GenesisBlockNum\":0,\"MaxCodeSize\":98304,\"MaxInitCodeSize\":49152,\"InitialChainOwner\":\"0x257812604076712675ae9788F5Bd738173CA3CE0\"},\"chainId\":41455},\"rollup\":{\"bridge\":\"0x41Ec9456AB918f2aBA81F38c03Eb0B93b78E84d9\",\"inbox\":\"0x56D8EC76a421063e1907503aDd3794c395256AEb\",\"sequencer-inbox\":\"0xF75206c49c1694594E3e69252E519434f1579876\",\"rollup\":\"0x1CA12290D954CFe022323b6A6Df92113ed6b1C98\",\"validator-utils\":\"0x2b0E04Dc90e3fA58165CB41E2834B44A56E766aF\",\"validator-wallet-creator\":\"0x9CAd81628aB7D8e239F1A5B497313341578c5F71\",\"deployed-at\":20412468}}]"
}
}

View File

@@ -0,0 +1,6 @@
{
"chain": {
"info-json": "[{\"chain-id\":2039,\"parent-chain-id\":11155111,\"parent-chain-is-arbitrum\":false,\"chain-name\":\"Aleph Zero EVM testnet\",\"chain-config\":{\"homesteadBlock\":0,\"daoForkBlock\":null,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"berlinBlock\":0,\"londonBlock\":0,\"clique\":{\"period\":0,\"epoch\":0},\"arbitrum\":{\"EnableArbOS\":true,\"AllowDebugPrecompiles\":false,\"DataAvailabilityCommittee\":true,\"InitialArbOSVersion\":20,\"GenesisBlockNum\":0,\"MaxCodeSize\":98304,\"MaxInitCodeSize\":49152,\"InitialChainOwner\":\"0x4c6dfF3e40e82a1fB599e062051726a9f7808a18\"},\"chainId\":2039},\"rollup\":{\"bridge\":\"0xCB5c0B38C45Fad0C20591E26b0b3C3809123994A\",\"inbox\":\"0xb27fd27987a71a6B77Fb8705bFb6010C411083EB\",\"sequencer-inbox\":\"0x16Ef70c48EF4BaaCfdaa4AfdD37F69332832a0bD\",\"rollup\":\"0xC8C08A4DbbF3367c8441151591c3d935947CB42F\",\"validator-utils\":\"0xb33Dca7b17c72CFC311D68C543cd4178E0d7ce55\",\"validator-wallet-creator\":\"0x75500812ADC9E51b721BEa31Df322EEc66967DDF\",\"deployed-at\":5827184}}]",
"name": "Aleph Zero EVM testnet"
}
}

View File

@@ -0,0 +1,113 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/arbnode/arbitrum-one-arbnode-archive-leveldb-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-one-arbnode-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-one-arbnode-archive:
image: ${ARBITRUM_ARBNODE_IMAGE:-offchainlabs/arb-node}:${ARBITRUM_ONE_ARBNODE_VERSION:-v1.4.5-e97c1a4}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
- 8546
entrypoint: [/home/user/go/bin/arb-node]
command:
- --core.checkpoint-gas-frequency=156250000
- --l1.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --l2.disable-upstream
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=7070
- --node.cache.allow-slow-lookup
- --node.chain-id=42161
- --node.rpc.addr=0.0.0.0
- --node.rpc.enable-l1-calls
- --node.rpc.port=8545
- --node.rpc.tracing.enable
- --node.rpc.tracing.namespace=trace
- --node.ws.addr=0.0.0.0
- --node.ws.port=8546
- --persistent.chain=/data/datadir/
- --persistent.global-config=/data/
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_ARBNODE_ARCHIVE_LEVELDB_HASH_DATA:-arbitrum-one-arbnode-archive-leveldb-hash}:/data
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.arbitrum-one-arbnode-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/arbitrum-one-arbnode-archive
- traefik.http.services.arbitrum-one-arbnode-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-one-arbnode-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-arbnode-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-arbnode-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one-arbnode-archive`) || Path(`/arbitrum-one-arbnode-archive/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-one-arbnode-archive-leveldb-hash.rule=Path(`/arbitrum-one-arbnode-archive`) || Path(`/arbitrum-one-arbnode-archive/`)}
- traefik.http.routers.arbitrum-one-arbnode-archive-leveldb-hash.middlewares=arbitrum-one-arbnode-archive-leveldb-hash-stripprefix, ipallowlist
volumes:
arbitrum-one-arbnode-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -0,0 +1,6 @@
{
"chain": {
"info-json": "[{\"chain-id\":6398,\"parent-chain-id\":11155111,\"parent-chain-is-arbitrum\":false,\"chain-name\":\"Connext Sepolia\",\"chain-config\":{\"homesteadBlock\":0,\"daoForkBlock\":null,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"berlinBlock\":0,\"londonBlock\":0,\"clique\":{\"period\":0,\"epoch\":0},\"arbitrum\":{\"EnableArbOS\":true,\"AllowDebugPrecompiles\":false,\"DataAvailabilityCommittee\":true,\"InitialArbOSVersion\":20,\"GenesisBlockNum\":0,\"MaxCodeSize\":24576,\"MaxInitCodeSize\":49152,\"InitialChainOwner\":\"0x8ECD393576Ca37a7e5095f31bdfE21F606FF5F75\"},\"chainId\":6398},\"rollup\":{\"bridge\":\"0xf0b58FA876005898798a66A04EE09159C199CB7A\",\"inbox\":\"0x7bc7DAF843bf57c54D41D912F8221A2eE830c320\",\"sequencer-inbox\":\"0x7f5C1a58014E9DE69663CAc441bfa4C5d94b7E64\",\"rollup\":\"0xE6D7bf11A6264BACa59e8fAD7f6985FaC8f62e60\",\"validator-utils\":\"0xb33Dca7b17c72CFC311D68C543cd4178E0d7ce55\",\"validator-wallet-creator\":\"0x75500812ADC9E51b721BEa31Df322EEc66967DDF\",\"deployed-at\":5780509}}]",
"name": "Connext Sepolia"
}
}

View File

@@ -0,0 +1,6 @@
{
"chain": {
"name": "Everclear Mainnet",
"info-json": "[{\"chain-id\":25327,\"parent-chain-id\":1,\"parent-chain-is-arbitrum\":false,\"chain-name\":\"Everclear Mainnet\",\"chain-config\":{\"homesteadBlock\":0,\"daoForkBlock\":null,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"berlinBlock\":0,\"londonBlock\":0,\"clique\":{\"period\":0,\"epoch\":0},\"arbitrum\":{\"EnableArbOS\":true,\"AllowDebugPrecompiles\":false,\"DataAvailabilityCommittee\":true,\"InitialArbOSVersion\":20,\"GenesisBlockNum\":0,\"MaxCodeSize\":24576,\"MaxInitCodeSize\":49152,\"InitialChainOwner\":\"0x98a426C8ED821cAaef1b4BF7D29b514dcef970C0\"},\"chainId\":25327},\"rollup\":{\"bridge\":\"0x4eb4fB614e1aa3634513319F4Ec7334bC4321356\",\"inbox\":\"0x97FdC935c5E25613AA13a054C7Aa71cf751DB495\",\"sequencer-inbox\":\"0x7B0517E0104dB60198f9d573C0aB8d480207827E\",\"rollup\":\"0xc6CAd31D83E33Fc8fBc855f36ef9Cb2fCE070f5C\",\"validator-utils\":\"0x2b0E04Dc90e3fA58165CB41E2834B44A56E766aF\",\"validator-wallet-creator\":\"0x9CAd81628aB7D8e239F1A5B497313341578c5F71\",\"deployed-at\":20684364}}]"
}
}

View File

@@ -0,0 +1,145 @@
---
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/fireeth/arbitrum-one-fireeth-pruned-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-one \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: 10m
max-file: '3'
services:
arbitrum-one:
image: ${ARBITRUM_FIREETH_IMAGE:-ghcr.io/streamingfast/firehose-ethereum}:${ARBITRUM_ONE_FIREETH_VERSION:-v2.11.7-nitro-nitro-v3.5.5-fh3.0}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
environment:
- ${ARBITRUM_ONE_FIREETH_PRUNED_PEBBLE_HASH_S3_BLOCKS_STORE:-/firehose-data/storage/merged-blocks}
entrypoint: [sh, -c, 'exec fireeth -c /config/firehose.yml start --substreams-rpc-endpoints "${ ARBITRUM_ONE_EXECUTION_RPC}" --reader-node-arguments "$*"', _]
command:
- --execution.caching.archive=false
- --execution.caching.state-scheme=hash
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --firehose-enabled
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,debug,admin,txpool,engine
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=pruned
- --persistent.chain=/firehose-data/reader/data/arbitrum-one
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_FIREETH_PRUNED_PEBBLE_HASH_DATA:-arbitrum-one-fireeth-pruned-pebble-hash}:/firehose-data
- ${ARBITRUM_ONE_FIREETH_PRUNED_PEBBLE_HASH_MERGED_BLOCKS_DATA:-arbitrum-one-fireeth-pruned-pebble-hash-blocks}:/firehose-data/storage/merged-blocks
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- traefik.enable=true
- traefik.http.middlewares.arbitrum-one-fireeth-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-one
- traefik.http.services.arbitrum-one-fireeth-pruned-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one`) || Path(`/arbitrum-one/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash.rule=Path(`/arbitrum-one`) || Path(`/arbitrum-one/`)}
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash.middlewares=arbitrum-one-fireeth-pruned-pebble-hash-stripprefix, ipallowlist
- traefik.http.services.arbitrum-one-fireeth-pruned-pebble-hash-firehose.loadbalancer.server.scheme=h2c
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-firehose.service=arbitrum-one-fireeth-pruned-pebble-hash-firehose
- traefik.http.services.arbitrum-one-fireeth-pruned-pebble-hash-firehose.loadbalancer.server.port=10015
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-firehose.entrypoints=grpc
- ${NO_SSL:-traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-firehose.tls.certresolver=myresolver}
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-firehose.rule=Host(`arbitrum-one.${DOMAIN}`)
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-firehose.middlewares=ipallowlist
- traefik.http.services.arbitrum-one-fireeth-pruned-pebble-hash-substreams.loadbalancer.server.scheme=h2c
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-substreams.service=arbitrum-one-fireeth-pruned-pebble-hash-substreams
- traefik.http.services.arbitrum-one-fireeth-pruned-pebble-hash-substreams.loadbalancer.server.port=10016
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-substreams.entrypoints=grpc
- ${NO_SSL:-traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-substreams.tls.certresolver=myresolver}
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-substreams.rule=Host(`arbitrum-one-substreams.${DOMAIN}`)
- traefik.http.routers.arbitrum-one-fireeth-pruned-pebble-hash-substreams.middlewares=ipallowlist
volumes:
arbitrum-one-fireeth-pruned-pebble-hash:
arbitrum-one-fireeth-pruned-pebble-hash-blocks:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,100 @@
---
services:
alephzero-mainnet-archive:
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_MAINNET_NITRO_VERSION:-v3.5.3-0a9c975}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.caching.state-scheme=hash
- --execution.forwarding-target=https://rpc.alephzero.raas.gelato.cloud
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.alephzero.raas.gelato.cloud
- --node.data-availability.sequencer-inbox-address=0x1411949971076304187394088912578077660717096867958
- --node.feed.input.url=wss://feed.alephzero.raas.gelato.cloud
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/alephzero-mainnet-archive
- --persistent.db-engine=leveldb
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ALEPHZERO_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-alephzero-mainnet-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./arb/alephzero/mainnet:/config
- /slowdisk:/slowdisk
labels:
- traefik.enable=true
- traefik.http.middlewares.alephzero-mainnet-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/alephzero-mainnet-archive
- traefik.http.services.alephzero-mainnet-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && PathPrefix(`/alephzero-mainnet-archive`)}
- ${NO_SSL:+traefik.http.routers.alephzero-mainnet-nitro-archive-leveldb-hash.rule=PathPrefix(`/alephzero-mainnet-archive`)}
- traefik.http.routers.alephzero-mainnet-nitro-archive-leveldb-hash.middlewares=alephzero-mainnet-nitro-archive-leveldb-hash-stripprefix, ipwhitelist
volumes:
alephzero-mainnet-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
chain: alephzero
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,143 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/alephzero-mainnet-nitro-archive-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/alephzero-mainnet-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
alephzero-mainnet-archive:
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.forwarding-target=https://rpc.alephzero.raas.gelato.cloud
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.alephzero.raas.gelato.cloud
- --node.data-availability.sequencer-inbox-address=0x1411949971076304187394088912578077660717096867958
- --node.feed.input.url=wss://feed.alephzero.raas.gelato.cloud
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/alephzero-mainnet-archive
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ALEPHZERO_MAINNET_NITRO_ARCHIVE_PEBBLE_HASH_DATA:-alephzero-mainnet-nitro-archive-pebble-hash}:/root/.arbitrum
- ./arb/alephzero/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.alephzero-mainnet-nitro-archive-pebble-hash-stripprefix.stripprefix.prefixes=/alephzero-mainnet-archive
- traefik.http.services.alephzero-mainnet-nitro-archive-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-archive-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-archive-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-archive-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/alephzero-mainnet-archive`) || Path(`/alephzero-mainnet-archive/`))}
- ${NO_SSL:+traefik.http.routers.alephzero-mainnet-nitro-archive-pebble-hash.rule=Path(`/alephzero-mainnet-archive`) || Path(`/alephzero-mainnet-archive/`)}
- traefik.http.routers.alephzero-mainnet-nitro-archive-pebble-hash.middlewares=alephzero-mainnet-nitro-archive-pebble-hash-stripprefix, ipallowlist
volumes:
alephzero-mainnet-nitro-archive-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: alephzero
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,146 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/alephzero-mainnet-nitro-pruned-pebble-path.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/alephzero-mainnet \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
alephzero-mainnet:
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=false
- --execution.caching.state-scheme=path
- --execution.forwarding-target=https://rpc.alephzero.raas.gelato.cloud
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.alephzero.raas.gelato.cloud
- --node.data-availability.sequencer-inbox-address=0x1411949971076304187394088912578077660717096867958
- --node.feed.input.url=wss://feed.alephzero.raas.gelato.cloud
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/alephzero-mainnet
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ALEPHZERO_MAINNET_NITRO_PRUNED_PEBBLE_PATH_DATA:-alephzero-mainnet-nitro-pruned-pebble-path}:/root/.arbitrum
- ./arb/alephzero/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.alephzero-mainnet-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/alephzero-mainnet
- traefik.http.services.alephzero-mainnet-nitro-pruned-pebble-path.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-pruned-pebble-path.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-pruned-pebble-path.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.alephzero-mainnet-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && (Path(`/alephzero-mainnet`) || Path(`/alephzero-mainnet/`))}
- ${NO_SSL:+traefik.http.routers.alephzero-mainnet-nitro-pruned-pebble-path.rule=Path(`/alephzero-mainnet`) || Path(`/alephzero-mainnet/`)}
- traefik.http.routers.alephzero-mainnet-nitro-pruned-pebble-path.middlewares=alephzero-mainnet-nitro-pruned-pebble-path-stripprefix, ipallowlist
volumes:
alephzero-mainnet-nitro-pruned-pebble-path:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: alephzero
method-groups:
enabled:
- debug
- filter
methods:
disabled:
# not compatible with path state scheme
- name: debug_traceBlockByHash
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,100 @@
---
services:
alephzero-sepolia-archive:
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_SEPOLIA_NITRO_VERSION:-v3.5.3-0a9c975}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.caching.state-scheme=hash
- --execution.forwarding-target=https://rpc.alephzero-testnet.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.alephzero-testnet.gelato.digital
- --node.data-availability.sequencer-inbox-address=0x130937498521962644184395825246273622310592356541
- --node.feed.input.url=wss://feed.alephzero-testnet.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/alephzero-sepolia-archive
- --persistent.db-engine=leveldb
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ALEPHZERO_SEPOLIA_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-alephzero-sepolia-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./arb/alephzero/sepolia:/config
- /slowdisk:/slowdisk
labels:
- traefik.enable=true
- traefik.http.middlewares.alephzero-sepolia-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/alephzero-sepolia-archive
- traefik.http.services.alephzero-sepolia-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && PathPrefix(`/alephzero-sepolia-archive`)}
- ${NO_SSL:+traefik.http.routers.alephzero-sepolia-nitro-archive-leveldb-hash.rule=PathPrefix(`/alephzero-sepolia-archive`)}
- traefik.http.routers.alephzero-sepolia-nitro-archive-leveldb-hash.middlewares=alephzero-sepolia-nitro-archive-leveldb-hash-stripprefix, ipwhitelist
volumes:
alephzero-sepolia-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
chain: alephzero-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,143 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/alephzero-sepolia-nitro-archive-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/alephzero-sepolia-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
alephzero-sepolia-archive:
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.forwarding-target=https://rpc.alephzero-testnet.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.alephzero-testnet.gelato.digital
- --node.data-availability.sequencer-inbox-address=0x130937498521962644184395825246273622310592356541
- --node.feed.input.url=wss://feed.alephzero-testnet.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/alephzero-sepolia-archive
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ALEPHZERO_SEPOLIA_NITRO_ARCHIVE_PEBBLE_HASH_DATA:-alephzero-sepolia-nitro-archive-pebble-hash}:/root/.arbitrum
- ./arb/alephzero/sepolia:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.alephzero-sepolia-nitro-archive-pebble-hash-stripprefix.stripprefix.prefixes=/alephzero-sepolia-archive
- traefik.http.services.alephzero-sepolia-nitro-archive-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-archive-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-archive-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-archive-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/alephzero-sepolia-archive`) || Path(`/alephzero-sepolia-archive/`))}
- ${NO_SSL:+traefik.http.routers.alephzero-sepolia-nitro-archive-pebble-hash.rule=Path(`/alephzero-sepolia-archive`) || Path(`/alephzero-sepolia-archive/`)}
- traefik.http.routers.alephzero-sepolia-nitro-archive-pebble-hash.middlewares=alephzero-sepolia-nitro-archive-pebble-hash-stripprefix, ipallowlist
volumes:
alephzero-sepolia-nitro-archive-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: alephzero-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,146 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/alephzero-sepolia-nitro-pruned-pebble-path.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/alephzero-sepolia \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
alephzero-sepolia:
image: ${ALEPHZERO_NITRO_IMAGE:-offchainlabs/nitro-node}:${ALEPHZERO_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=false
- --execution.caching.state-scheme=path
- --execution.forwarding-target=https://rpc.alephzero-testnet.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.alephzero-testnet.gelato.digital
- --node.data-availability.sequencer-inbox-address=0x130937498521962644184395825246273622310592356541
- --node.feed.input.url=wss://feed.alephzero-testnet.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/alephzero-sepolia
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ALEPHZERO_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_DATA:-alephzero-sepolia-nitro-pruned-pebble-path}:/root/.arbitrum
- ./arb/alephzero/sepolia:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.alephzero-sepolia-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/alephzero-sepolia
- traefik.http.services.alephzero-sepolia-nitro-pruned-pebble-path.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-pruned-pebble-path.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-pruned-pebble-path.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.alephzero-sepolia-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && (Path(`/alephzero-sepolia`) || Path(`/alephzero-sepolia/`))}
- ${NO_SSL:+traefik.http.routers.alephzero-sepolia-nitro-pruned-pebble-path.rule=Path(`/alephzero-sepolia`) || Path(`/alephzero-sepolia/`)}
- traefik.http.routers.alephzero-sepolia-nitro-pruned-pebble-path.middlewares=alephzero-sepolia-nitro-pruned-pebble-path-stripprefix, ipallowlist
volumes:
alephzero-sepolia-nitro-pruned-pebble-path:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: alephzero-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
# not compatible with path state scheme
- name: debug_traceBlockByHash
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,136 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-nova-nitro-archive-leveldb-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-nova-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-nova-archive:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_NOVA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=42170
- --execution.caching.archive=true
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=archive
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-nova-archive
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_NOVA_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-arbitrum-nova-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./tmp/arbitrum-nova-archive:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.arbitrum-nova-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/arbitrum-nova-archive
- traefik.http.services.arbitrum-nova-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-nova-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-nova-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-nova-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-nova-archive`) || Path(`/arbitrum-nova-archive/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-nova-nitro-archive-leveldb-hash.rule=Path(`/arbitrum-nova-archive`) || Path(`/arbitrum-nova-archive/`)}
- traefik.http.routers.arbitrum-nova-nitro-archive-leveldb-hash.middlewares=arbitrum-nova-nitro-archive-leveldb-hash-stripprefix, ipallowlist
volumes:
arbitrum-nova-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum-nova
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,137 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-nova-nitro-pruned-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-nova \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-nova:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_NOVA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=42170
- --execution.caching.archive=false
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=pruned
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-nova
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_NOVA_NITRO_PRUNED_PEBBLE_HASH_DATA:-arbitrum-nova-nitro-pruned-pebble-hash}:/root/.arbitrum
- ./tmp/arbitrum-nova:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.arbitrum-nova-nitro-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-nova
- traefik.http.services.arbitrum-nova-nitro-pruned-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-nova-nitro-pruned-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-nova-nitro-pruned-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-nova-nitro-pruned-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-nova`) || Path(`/arbitrum-nova/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-nova-nitro-pruned-pebble-hash.rule=Path(`/arbitrum-nova`) || Path(`/arbitrum-nova/`)}
- traefik.http.routers.arbitrum-nova-nitro-pruned-pebble-hash.middlewares=arbitrum-nova-nitro-pruned-pebble-hash-stripprefix, ipallowlist
volumes:
arbitrum-nova-nitro-pruned-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum-nova
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,142 @@
---
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-one-nitro-archive-leveldb-hash--benchmark.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-one-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-one-archive:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.5.5-90ee45c}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=42161
- --execution.caching.archive=true
- --execution.caching.state-scheme=hash
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.latest=archive
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-one-archive
- --persistent.db-engine=leveldb
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-arbitrum-one-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./tmp/arbitrum-one-archive:/tmp
- /slowdisk:/slowdisk
arbitrum-one-archive-benchmark:
build:
context: ./benchmark-proxy
dockerfile: Dockerfile
expose:
- '8545'
environment:
- ENABLE_DETAILED_LOGS=${BENCHMARK_PROXY_VERBOSE:-false}
- LISTEN_ADDR=:8545
- PRIMARY_BACKEND=http://arbitrum-one-archive:8545
- SUMMARY_INTERVAL=60
restart: unless-stopped
depends_on:
- arbitrum-one-archive
networks:
- chains
labels:
- traefik.enable=true
- traefik.http.middlewares.arbitrum-one-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/arbitrum-one-archive
- traefik.http.services.arbitrum-one-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one-archive`) || Path(`/arbitrum-one-archive/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.rule=Path(`/arbitrum-one-archive`) || Path(`/arbitrum-one-archive/`)}
- traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.middlewares=arbitrum-one-nitro-archive-leveldb-hash-stripprefix, ipallowlist
volumes:
arbitrum-one-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,207 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-one-nitro-archive-leveldb-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-one-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-one-archive:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=42161
- --execution.caching.archive=true
- --execution.rpc.classic-redirect=http://arbitrum-one-arbnode-archive:8545
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=archive
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-one-archive
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-arbitrum-one-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./tmp/arbitrum-one-archive:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.arbitrum-one-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/arbitrum-one-archive
- traefik.http.services.arbitrum-one-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one-archive`) || Path(`/arbitrum-one-archive/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.rule=Path(`/arbitrum-one-archive`) || Path(`/arbitrum-one-archive/`)}
- traefik.http.routers.arbitrum-one-nitro-archive-leveldb-hash.middlewares=arbitrum-one-nitro-archive-leveldb-hash-stripprefix, ipallowlist
arbitrum-one-arbnode-archive:
image: ${ARBITRUM_ARBNODE_IMAGE:-offchainlabs/arb-node}:${ARBITRUM_ONE_ARBNODE_VERSION:-v1.4.5-e97c1a4}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
- 8546
entrypoint: [/home/user/go/bin/arb-node]
command:
- --core.checkpoint-gas-frequency=156250000
- --l1.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --l2.disable-upstream
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=7070
- --node.cache.allow-slow-lookup
- --node.chain-id=42161
- --node.rpc.addr=0.0.0.0
- --node.rpc.enable-l1-calls
- --node.rpc.port=8545
- --node.rpc.tracing.enable
- --node.rpc.tracing.namespace=trace
- --node.ws.addr=0.0.0.0
- --node.ws.port=8546
- --persistent.chain=/data/datadir/
- --persistent.global-config=/data/
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_ARBNODE_ARCHIVE_LEVELDB_HASH_DATA:-arbitrum-one-arbnode-archive-leveldb-hash}:/data
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
volumes:
arbitrum-one-arbnode-archive-leveldb-hash:
arbitrum-one-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -0,0 +1,208 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-one-nitro-archive-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-one-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-one-archive:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=42161
- --execution.caching.archive=true
- --execution.rpc.classic-redirect=http://arbitrum-one-arbnode-archive:8545
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=archive
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-one-archive
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_NITRO_ARCHIVE_PEBBLE_HASH_DATA:-arbitrum-one-nitro-archive-pebble-hash}:/root/.arbitrum
- ./tmp/arbitrum-one-archive:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.arbitrum-one-nitro-archive-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-one-archive
- traefik.http.services.arbitrum-one-nitro-archive-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-archive-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one-archive`) || Path(`/arbitrum-one-archive/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-one-nitro-archive-pebble-hash.rule=Path(`/arbitrum-one-archive`) || Path(`/arbitrum-one-archive/`)}
- traefik.http.routers.arbitrum-one-nitro-archive-pebble-hash.middlewares=arbitrum-one-nitro-archive-pebble-hash-stripprefix, ipallowlist
arbitrum-one-arbnode-archive:
image: ${ARBITRUM_ARBNODE_IMAGE:-offchainlabs/arb-node}:${ARBITRUM_ONE_ARBNODE_VERSION:-v1.4.5-e97c1a4}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
- 8546
entrypoint: [/home/user/go/bin/arb-node]
command:
- --core.checkpoint-gas-frequency=156250000
- --l1.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --l2.disable-upstream
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=7070
- --node.cache.allow-slow-lookup
- --node.chain-id=42161
- --node.rpc.addr=0.0.0.0
- --node.rpc.enable-l1-calls
- --node.rpc.port=8545
- --node.rpc.tracing.enable
- --node.rpc.tracing.namespace=trace
- --node.ws.addr=0.0.0.0
- --node.ws.port=8546
- --persistent.chain=/data/datadir/
- --persistent.global-config=/data/
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_ARBNODE_ARCHIVE_LEVELDB_HASH_DATA:-arbitrum-one-arbnode-archive-leveldb-hash}:/data
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
volumes:
arbitrum-one-arbnode-archive-leveldb-hash:
arbitrum-one-nitro-archive-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -0,0 +1,157 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-one-nitro-pruned-pebble-hash--benchmark.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-one \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-one:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=42161
- --execution.caching.archive=false
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=pruned
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-one
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_DATA:-arbitrum-one-nitro-pruned-pebble-hash}:/root/.arbitrum
- ./tmp/arbitrum-one:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
arbitrum-one-benchmark:
build:
context: ./benchmark-proxy
dockerfile: Dockerfile
expose:
- '8545'
environment:
- ENABLE_DETAILED_LOGS=${BENCHMARK_PROXY_VERBOSE:-false}
- LISTEN_ADDR=:8545
- PRIMARY_BACKEND=http://arbitrum-one:8545
- SUMMARY_INTERVAL=60
restart: unless-stopped
depends_on:
- arbitrum-one
networks:
- chains
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.arbitrum-one-nitro-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-one
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one`) || Path(`/arbitrum-one/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.rule=Path(`/arbitrum-one`) || Path(`/arbitrum-one/`)}
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.middlewares=arbitrum-one-nitro-pruned-pebble-hash-stripprefix, ipallowlist
volumes:
arbitrum-one-nitro-pruned-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,201 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-one-nitro-pruned-pebble-hash--fireeth.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-one \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-one:
image: ${ARBITRUM_FIREETH_IMAGE:-ghcr.io/streamingfast/firehose-ethereum}:${ARBITRUM_ONE_FIREETH_VERSION:-v2.11.7-nitro-nitro-v3.5.5-fh3.0}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
entrypoint: [sh, -c, exec fireeth start reader-node --log-to-file=false --reader-node-arguments "$*", _]
command:
- --chain.id=42161
- --execution.caching.archive=false
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=pruned
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-one
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_FIREETH_DATA:-arbitrum-one-fireeth}:/app/firehose-data
- ${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_DATA:-arbitrum-one-nitro-pruned-pebble-hash}:/root/.arbitrum
- ./tmp/arbitrum-one:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.arbitrum-one-nitro-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-one
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one`) || Path(`/arbitrum-one/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.rule=Path(`/arbitrum-one`) || Path(`/arbitrum-one/`)}
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.middlewares=arbitrum-one-nitro-pruned-pebble-hash-stripprefix, ipallowlist
arbitrum-one-firehose:
image: ${ARBITRUM_FIREETH_IMAGE:-ghcr.io/streamingfast/firehose-ethereum}:${ARBITRUM_ONE_FIREETH_VERSION:-v2.11.7-nitro-nitro-v3.5.5-fh3.0}
expose:
- 10015
- 10014
environment:
- ${ARBITRUM_ONE_FIREETH_BLOCKS_STORE:-/app/firehose-data/storage/merged-blocks}
entrypoint: [sh, -c, exec fireeth --config-file="" --log-to-file=false start firehose index-builder relayer merger $@, _]
command:
- --firehose-rate-limit-bucket-fill-rate=${ARBITRUM_ONE_FIREHOSE_RATE_LIMIT_BUCKET_FILL_RATE:-1s}
- --firehose-rate-limit-bucket-size=${ARBITRUM_ONE_FIREHOSE_RATE_LIMIT_BUCKET_SIZE:-200}
- --log-to-file=false
- --relayer-source=arbitrum-one:10010
restart: unless-stopped
depends_on:
- arbitrum-one
networks:
- chains
volumes:
- ${ARBITRUM_ONE_FIREETH_DATA:-arbitrum-one-fireeth}:/app/firehose-data
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash-firehose.loadbalancer.server.scheme=h2c
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-firehose.service=arbitrum-one-nitro-pruned-pebble-hash-firehose
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash-firehose.loadbalancer.server.port=10015
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-firehose.entrypoints=grpc
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-firehose.tls.certresolver=myresolver}
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-firehose.rule=Host(`arbitrum-one-firehose.${DOMAIN}`)
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-firehose.middlewares=ipallowlist
arbitrum-one-events:
image: ${ARBITRUM_FIREETH_IMAGE:-ghcr.io/streamingfast/firehose-ethereum}:${ARBITRUM_ONE_FIREETH_VERSION:-v2.11.7-nitro-nitro-v3.5.5-fh3.0}
expose:
- 10016
entrypoint: [sh, -c, exec fireeth --config-file="" --log-to-file=false start substreams-tier1 substreams-tier2 $@, _]
command:
- --common-live-blocks-addr=arbitrum-one-firehose:10014
- --log-to-file=false
- --substreams-block-execution-timeout=${ARBITRUM_ONE_SUBSTREAMS_BLOCK_EXECUTION_TIMEOUT:-3m0s}
- --substreams-rpc-endpoints=${ARBITRUM_ONE_EXECUTION_ARCHIVE_RPC}
- --substreams-tier1-max-subrequests=${ARBITRUM_ONE_SUBSTREAMS_TIER1_MAX_SUBREQUESTS:-4}
restart: unless-stopped
depends_on:
- arbitrum-one
networks:
- chains
volumes:
- ${ARBITRUM_ONE_FIREETH_DATA:-arbitrum-one-fireeth}:/app/firehose-data
logging: *logging-defaults
labels:
- traefik.enable=true
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash-events.loadbalancer.server.scheme=h2c
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-events.service=arbitrum-one-nitro-pruned-pebble-hash-events
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash-events.loadbalancer.server.port=10016
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-events.entrypoints=grpc
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-events.tls.certresolver=myresolver}
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-events.rule=Host(`arbitrum-one-events.${DOMAIN}`)
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash-events.middlewares=ipallowlist
volumes:
arbitrum-one-nitro-pruned-pebble-hash:
arbitrum-one-nitro-pruned-pebble-hash_fireeth:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,137 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-one-nitro-pruned-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-one \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-one:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_ONE_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=42161
- --execution.caching.archive=false
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=pruned
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-one
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_ONE_NITRO_PRUNED_PEBBLE_HASH_DATA:-arbitrum-one-nitro-pruned-pebble-hash}:/root/.arbitrum
- ./tmp/arbitrum-one:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.arbitrum-one-nitro-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-one
- traefik.http.services.arbitrum-one-nitro-pruned-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-one`) || Path(`/arbitrum-one/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.rule=Path(`/arbitrum-one`) || Path(`/arbitrum-one/`)}
- traefik.http.routers.arbitrum-one-nitro-pruned-pebble-hash.middlewares=arbitrum-one-nitro-pruned-pebble-hash-stripprefix, ipallowlist
volumes:
arbitrum-one-nitro-pruned-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,137 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-sepolia-nitro-archive-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-sepolia-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-sepolia-archive:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=421614
- --execution.caching.archive=true
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=archive
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-sepolia-archive
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_SEPOLIA_NITRO_ARCHIVE_PEBBLE_HASH_DATA:-arbitrum-sepolia-nitro-archive-pebble-hash}:/root/.arbitrum
- ./tmp/arbitrum-sepolia-archive:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.arbitrum-sepolia-nitro-archive-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-sepolia-archive
- traefik.http.services.arbitrum-sepolia-nitro-archive-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-archive-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-archive-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-archive-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-sepolia-archive`) || Path(`/arbitrum-sepolia-archive/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-sepolia-nitro-archive-pebble-hash.rule=Path(`/arbitrum-sepolia-archive`) || Path(`/arbitrum-sepolia-archive/`)}
- traefik.http.routers.arbitrum-sepolia-nitro-archive-pebble-hash.middlewares=arbitrum-sepolia-nitro-archive-pebble-hash-stripprefix, ipallowlist
volumes:
arbitrum-sepolia-nitro-archive-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,137 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/arbitrum-sepolia-nitro-pruned-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/arbitrum-sepolia \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
arbitrum-sepolia:
image: ${ARBITRUM_NITRO_IMAGE:-offchainlabs/nitro-node}:${ARBITRUM_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --chain.id=421614
- --execution.caching.archive=false
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --init.latest=pruned
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/arbitrum-sepolia
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${ARBITRUM_SEPOLIA_NITRO_PRUNED_PEBBLE_HASH_DATA:-arbitrum-sepolia-nitro-pruned-pebble-hash}:/root/.arbitrum
- ./tmp/arbitrum-sepolia:/tmp
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.arbitrum-sepolia-nitro-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/arbitrum-sepolia
- traefik.http.services.arbitrum-sepolia-nitro-pruned-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/arbitrum-sepolia`) || Path(`/arbitrum-sepolia/`))}
- ${NO_SSL:+traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-hash.rule=Path(`/arbitrum-sepolia`) || Path(`/arbitrum-sepolia/`)}
- traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-hash.middlewares=arbitrum-sepolia-nitro-pruned-pebble-hash-stripprefix, ipallowlist
volumes:
arbitrum-sepolia-nitro-pruned-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: arbitrum-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,57 @@
services:
arbitrum-sepolia:
image: 'offchainlabs/nitro-node:${NITRO_VERSION:-v3.5.3-0a9c975}'
stop_grace_period: 3m
user: root
volumes:
- 'arbitrum-sepolia-nitro-pruned-pebble-path:/root/.arbitrum'
- './tmp/arbitrum-sepolia:/tmp'
expose:
- 8545
command:
- --chain.id=421614
- --execution.caching.state-scheme=path
- --execution.rpc.gas-cap=600000000
- --execution.caching.archive=false
- --execution.sequencer.enable=false
- --persistent.db-engine=pebble
- --persistent.chain=/root/.arbitrum/arbitrum-sepolia
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
- --http.addr=0.0.0.0
- --http.port=8545
- --http.vhosts=*
- --http.corsdomain=*
- --http.api=eth,net,web3,arb,txpool,debug
- --ws.port=8545
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.api=eth,net,web3,arb,txpool,debug
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --log-type=json
- --node.sequencer=false
- --node.staker.enable=false
- --node.batch-poster.enable=false
restart: unless-stopped
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.arbitrum-sepolia-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/arbitrum-sepolia"
- "traefik.http.services.arbitrum-sepolia-nitro-pruned-pebble-path.loadbalancer.server.port=8545"
- "traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-path.entrypoints=websecure"
- "traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-path.tls.certresolver=myresolver"
- "traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && PathPrefix(`/arbitrum-sepolia`)"
- "traefik.http.routers.arbitrum-sepolia-nitro-pruned-pebble-path.middlewares=arbitrum-sepolia-nitro-pruned-pebble-path-stripprefix, ipwhitelist"
networks:
- chains
volumes:
arbitrum-sepolia-nitro-pruned-pebble-path:

View File

@@ -0,0 +1,142 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/connext-sepolia-nitro-archive-leveldb-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/connext-sepolia-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
connext-sepolia-archive:
image: ${CONNEXT_NITRO_IMAGE:-offchainlabs/nitro-node}:${CONNEXT_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.forwarding-target=https://rpc.connext-sepolia.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.connext-sepolia.gelato.digital
- --node.data-availability.sequencer-inbox-address=0x727095791318912381473707332248435763608420056676
- --node.feed.input.url=wss://feed.connext-sepolia.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/connext-sepolia-archive
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${CONNEXT_SEPOLIA_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-connext-sepolia-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./arb/connext/sepolia:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.connext-sepolia-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/connext-sepolia-archive
- traefik.http.services.connext-sepolia-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.connext-sepolia-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.connext-sepolia-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.connext-sepolia-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/connext-sepolia-archive`) || Path(`/connext-sepolia-archive/`))}
- ${NO_SSL:+traefik.http.routers.connext-sepolia-nitro-archive-leveldb-hash.rule=Path(`/connext-sepolia-archive`) || Path(`/connext-sepolia-archive/`)}
- traefik.http.routers.connext-sepolia-nitro-archive-leveldb-hash.middlewares=connext-sepolia-nitro-archive-leveldb-hash-stripprefix, ipallowlist
volumes:
connext-sepolia-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: everclear-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,146 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/connext-sepolia-nitro-pruned-pebble-path.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/connext-sepolia \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
connext-sepolia:
image: ${CONNEXT_NITRO_IMAGE:-offchainlabs/nitro-node}:${CONNEXT_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=false
- --execution.caching.state-scheme=path
- --execution.forwarding-target=https://rpc.connext-sepolia.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.connext-sepolia.gelato.digital
- --node.data-availability.sequencer-inbox-address=0x727095791318912381473707332248435763608420056676
- --node.feed.input.url=wss://feed.connext-sepolia.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_SEPOLIA_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/connext-sepolia
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${CONNEXT_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_DATA:-connext-sepolia-nitro-pruned-pebble-path}:/root/.arbitrum
- ./arb/connext/sepolia:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.connext-sepolia-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/connext-sepolia
- traefik.http.services.connext-sepolia-nitro-pruned-pebble-path.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.connext-sepolia-nitro-pruned-pebble-path.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.connext-sepolia-nitro-pruned-pebble-path.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.connext-sepolia-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && (Path(`/connext-sepolia`) || Path(`/connext-sepolia/`))}
- ${NO_SSL:+traefik.http.routers.connext-sepolia-nitro-pruned-pebble-path.rule=Path(`/connext-sepolia`) || Path(`/connext-sepolia/`)}
- traefik.http.routers.connext-sepolia-nitro-pruned-pebble-path.middlewares=connext-sepolia-nitro-pruned-pebble-path-stripprefix, ipallowlist
volumes:
connext-sepolia-nitro-pruned-pebble-path:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: everclear-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
# not compatible with path state scheme
- name: debug_traceBlockByHash
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,142 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/everclear-mainnet-nitro-archive-leveldb-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/everclear-mainnet-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
everclear-mainnet-archive:
image: ${EVERCLEAR_NITRO_IMAGE:-offchainlabs/nitro-node}:${EVERCLEAR_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.forwarding-target=https://rpc.everclear.raas.gelato.cloud
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.everclear.raas.gelato.cloud
- --node.data-availability.sequencer-inbox-address=0x727095791318912381473707332248435763608420056676
- --node.feed.input.url=wss://feed.everclear.raas.gelato.cloud
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/everclear-mainnet-archive
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${EVERCLEAR_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-everclear-mainnet-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./arb/everclear/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.everclear-mainnet-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/everclear-mainnet-archive
- traefik.http.services.everclear-mainnet-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.everclear-mainnet-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.everclear-mainnet-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.everclear-mainnet-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/everclear-mainnet-archive`) || Path(`/everclear-mainnet-archive/`))}
- ${NO_SSL:+traefik.http.routers.everclear-mainnet-nitro-archive-leveldb-hash.rule=Path(`/everclear-mainnet-archive`) || Path(`/everclear-mainnet-archive/`)}
- traefik.http.routers.everclear-mainnet-nitro-archive-leveldb-hash.middlewares=everclear-mainnet-nitro-archive-leveldb-hash-stripprefix, ipallowlist
volumes:
everclear-mainnet-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: everclear
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,146 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/everclear-mainnet-nitro-pruned-pebble-path.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/everclear-mainnet \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
everclear-mainnet:
image: ${EVERCLEAR_NITRO_IMAGE:-offchainlabs/nitro-node}:${EVERCLEAR_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=false
- --execution.caching.state-scheme=path
- --execution.forwarding-target=https://rpc.everclear.raas.gelato.cloud
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.everclear.raas.gelato.cloud
- --node.data-availability.sequencer-inbox-address=0x727095791318912381473707332248435763608420056676
- --node.feed.input.url=wss://feed.everclear.raas.gelato.cloud
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/everclear-mainnet
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${EVERCLEAR_MAINNET_NITRO_PRUNED_PEBBLE_PATH_DATA:-everclear-mainnet-nitro-pruned-pebble-path}:/root/.arbitrum
- ./arb/everclear/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.everclear-mainnet-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/everclear-mainnet
- traefik.http.services.everclear-mainnet-nitro-pruned-pebble-path.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.everclear-mainnet-nitro-pruned-pebble-path.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.everclear-mainnet-nitro-pruned-pebble-path.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.everclear-mainnet-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && (Path(`/everclear-mainnet`) || Path(`/everclear-mainnet/`))}
- ${NO_SSL:+traefik.http.routers.everclear-mainnet-nitro-pruned-pebble-path.rule=Path(`/everclear-mainnet`) || Path(`/everclear-mainnet/`)}
- traefik.http.routers.everclear-mainnet-nitro-pruned-pebble-path.middlewares=everclear-mainnet-nitro-pruned-pebble-path-stripprefix, ipallowlist
volumes:
everclear-mainnet-nitro-pruned-pebble-path:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: everclear
method-groups:
enabled:
- debug
- filter
methods:
disabled:
# not compatible with path state scheme
- name: debug_traceBlockByHash
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,141 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/opencampuscodex-sepolia-nitro-archive-leveldb-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/opencampuscodex-sepolia-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
opencampuscodex-sepolia-archive:
image: ${OPENCAMPUSCODEX_NITRO_IMAGE:-offchainlabs/nitro-node}:${OPENCAMPUSCODEX_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.forwarding-target=https://rpc.open-campus-codex.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ARBITRUM_SEPOLIA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.open-campus-codex.gelato.digital
- --node.data-availability.sequencer-inbox-address=0xe347C1223381b9Dcd6c0F61cf81c90175A7Bae77
- --node.feed.input.url=wss://feed.open-campus-codex.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.connection.url=${ARBITRUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/opencampuscodex-sepolia-archive
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${OPENCAMPUSCODEX_SEPOLIA_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-opencampuscodex-sepolia-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./arb/opencampuscodex/arbitrum-sepolia:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.opencampuscodex-sepolia-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/opencampuscodex-sepolia-archive
- traefik.http.services.opencampuscodex-sepolia-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/opencampuscodex-sepolia-archive`) || Path(`/opencampuscodex-sepolia-archive/`))}
- ${NO_SSL:+traefik.http.routers.opencampuscodex-sepolia-nitro-archive-leveldb-hash.rule=Path(`/opencampuscodex-sepolia-archive`) || Path(`/opencampuscodex-sepolia-archive/`)}
- traefik.http.routers.opencampuscodex-sepolia-nitro-archive-leveldb-hash.middlewares=opencampuscodex-sepolia-nitro-archive-leveldb-hash-stripprefix, ipallowlist
volumes:
opencampuscodex-sepolia-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: open-campus-codex-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,120 @@
# use at your own risk
services:
opencampuscodex-sepolia:
image: ${OPENCAMPUSCODEX_NITRO_IMAGE:-offchainlabs/nitro-node}:${OPENCAMPUSCODEX_SEPOLIA_NITRO_VERSION:-v3.5.3-0a9c975}
user: root
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
expose:
- 8545
- 8551
ports:
- 10938:10938
- 10938:10938/udp
volumes:
- ${OPENCAMPUSCODEX_SEPOLIA_NITRO_PRUNED_PEBBLE_HASH_DATA:-opencampuscodex-sepolia-nitro-pruned-pebble-hash}:/root/.arbitrum
- /slowdisk:/slowdisk
- .jwtsecret:/jwtsecret:ro
- ./tmp/opencampuscodex-sepolia:/tmp
command:
- --datadir=/root/.arbitrum
- --port=10938
- --bind=0.0.0.0
- --nat=extip:${IP}
- --http
- --http.port=8545
- --http.vhosts=*
- --ws
- --ws.port=8545
- --ws.origins=*
- --ws.addr=0.0.0.0
- --http.addr=0.0.0.0
- --maxpeers=50
- --http.api=eth,net,web3,arb,txpool,debug
- --ws.api=eth,net,web3,arb,txpool,debug
- --rpc.gascap=600000000
- --rpc.returndatalimit=10000000
- --rpc.txfeecap=0
- --execution.caching.state-scheme=hash
- --execution.rpc.gas-cap=600000000
- --execution.caching.archive=false
- --execution.sequencer.enable=false
- --persistent.db-engine=pebble
- --persistent.chain=/root/.arbitrum/opencampuscodex-sepolia
- --conf.file=/config/baseConfig.json
- --node.sequencer=false
- --node.staker.enable=false
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.sequencer-inbox-address=0xe347C1223381b9Dcd6c0F61cf81c90175A7Bae77
- --node.data-availability.parent-chain-node-url=${ARBITRUM_SEPOLIA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.open-campus-codex.gelato.digital
- --node.feed.input.url=wss://feed.open-campus-codex.gelato.digital
- --execution.forwarding-target=https://rpc.open-campus-codex.gelato.digital
- --parent-chain.connection.url=${ARBITRUM_SEPOLIA_EXECUTION_RPC}
networks:
- chains
restart: unless-stopped
stop_grace_period: 5m
labels:
- traefik.enable=true
- traefik.http.middlewares.opencampuscodex-sepolia-nitro-pruned-pebble-hash-stripprefix.stripprefix.prefixes=/opencampuscodex-sepolia
- traefik.http.services.opencampuscodex-sepolia-nitro-pruned-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-hash.rule=Host(`$DOMAIN`) && PathPrefix(`/opencampuscodex-sepolia`)}
- ${NO_SSL:+traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-hash.rule=PathPrefix(`/opencampuscodex-sepolia`)}
- traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-hash.middlewares=opencampuscodex-sepolia-nitro-pruned-pebble-hash-stripprefix, ipwhitelist
volumes:
opencampuscodex-sepolia-nitro-pruned-pebble-hash:
x-upstreams:
- chain: open-campus-codex-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex

View File

@@ -0,0 +1,145 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/opencampuscodex-sepolia-nitro-pruned-pebble-path.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/opencampuscodex-sepolia \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
opencampuscodex-sepolia:
image: ${OPENCAMPUSCODEX_NITRO_IMAGE:-offchainlabs/nitro-node}:${OPENCAMPUSCODEX_SEPOLIA_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=false
- --execution.caching.state-scheme=path
- --execution.forwarding-target=https://rpc.open-campus-codex.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ARBITRUM_SEPOLIA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.open-campus-codex.gelato.digital
- --node.data-availability.sequencer-inbox-address=0xe347C1223381b9Dcd6c0F61cf81c90175A7Bae77
- --node.feed.input.url=wss://feed.open-campus-codex.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.connection.url=${ARBITRUM_SEPOLIA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/opencampuscodex-sepolia
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${OPENCAMPUSCODEX_SEPOLIA_NITRO_PRUNED_PEBBLE_PATH_DATA:-opencampuscodex-sepolia-nitro-pruned-pebble-path}:/root/.arbitrum
- ./arb/opencampuscodex/arbitrum-sepolia:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.opencampuscodex-sepolia-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/opencampuscodex-sepolia
- traefik.http.services.opencampuscodex-sepolia-nitro-pruned-pebble-path.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-path.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-path.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && (Path(`/opencampuscodex-sepolia`) || Path(`/opencampuscodex-sepolia/`))}
- ${NO_SSL:+traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-path.rule=Path(`/opencampuscodex-sepolia`) || Path(`/opencampuscodex-sepolia/`)}
- traefik.http.routers.opencampuscodex-sepolia-nitro-pruned-pebble-path.middlewares=opencampuscodex-sepolia-nitro-pruned-pebble-path-stripprefix, ipallowlist
volumes:
opencampuscodex-sepolia-nitro-pruned-pebble-path:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: open-campus-codex-sepolia
method-groups:
enabled:
- debug
- filter
methods:
disabled:
# not compatible with path state scheme
- name: debug_traceBlockByHash
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,141 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/playblock-mainnet-nitro-archive-leveldb-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/playblock-mainnet-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
playblock-mainnet-archive:
image: ${PLAYBLOCK_NITRO_IMAGE:-offchainlabs/nitro-node}:${PLAYBLOCK_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.forwarding-target=https://rpc.playblock.io
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ARBITRUM_NOVA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.playblock.io
- --node.data-availability.sequencer-inbox-address=0x1297541082195356755105700451499873350464260779639
- --node.feed.input.url=wss://feed.playblock.io
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.connection.url=${ARBITRUM_NOVA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/playblock-mainnet-archive
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${PLAYBLOCK_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-playblock-mainnet-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./arb/playblock/arbitrum-nova:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.playblock-mainnet-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/playblock-mainnet-archive
- traefik.http.services.playblock-mainnet-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.playblock-mainnet-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.playblock-mainnet-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.playblock-mainnet-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/playblock-mainnet-archive`) || Path(`/playblock-mainnet-archive/`))}
- ${NO_SSL:+traefik.http.routers.playblock-mainnet-nitro-archive-leveldb-hash.rule=Path(`/playblock-mainnet-archive`) || Path(`/playblock-mainnet-archive/`)}
- traefik.http.routers.playblock-mainnet-nitro-archive-leveldb-hash.middlewares=playblock-mainnet-nitro-archive-leveldb-hash-stripprefix, ipallowlist
volumes:
playblock-mainnet-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: playnance
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,145 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/playblock-mainnet-nitro-pruned-pebble-path.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/playblock-mainnet \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
playblock-mainnet:
image: ${PLAYBLOCK_NITRO_IMAGE:-offchainlabs/nitro-node}:${PLAYBLOCK_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=false
- --execution.caching.state-scheme=path
- --execution.forwarding-target=https://rpc.playblock.io
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ARBITRUM_NOVA_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.playblock.io
- --node.data-availability.sequencer-inbox-address=0x1297541082195356755105700451499873350464260779639
- --node.feed.input.url=wss://feed.playblock.io
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.connection.url=${ARBITRUM_NOVA_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/playblock-mainnet
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${PLAYBLOCK_MAINNET_NITRO_PRUNED_PEBBLE_PATH_DATA:-playblock-mainnet-nitro-pruned-pebble-path}:/root/.arbitrum
- ./arb/playblock/arbitrum-nova:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.playblock-mainnet-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/playblock-mainnet
- traefik.http.services.playblock-mainnet-nitro-pruned-pebble-path.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.playblock-mainnet-nitro-pruned-pebble-path.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.playblock-mainnet-nitro-pruned-pebble-path.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.playblock-mainnet-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && (Path(`/playblock-mainnet`) || Path(`/playblock-mainnet/`))}
- ${NO_SSL:+traefik.http.routers.playblock-mainnet-nitro-pruned-pebble-path.rule=Path(`/playblock-mainnet`) || Path(`/playblock-mainnet/`)}
- traefik.http.routers.playblock-mainnet-nitro-pruned-pebble-path.middlewares=playblock-mainnet-nitro-pruned-pebble-path-stripprefix, ipallowlist
volumes:
playblock-mainnet-nitro-pruned-pebble-path:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: playnance
method-groups:
enabled:
- debug
- filter
methods:
disabled:
# not compatible with path state scheme
- name: debug_traceBlockByHash
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,142 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/real-mainnet-nitro-archive-leveldb-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/real-mainnet-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
real-mainnet-archive:
image: ${REAL_NITRO_IMAGE:-offchainlabs/nitro-node}:${REAL_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.forwarding-target=https://rpc.realforreal.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.realforreal.gelato.digital
- --node.data-availability.sequencer-inbox-address=0x466813324240923703236721233648302990016039913376
- --node.feed.input.url=wss://feed.realforreal.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/real-mainnet-archive
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${REAL_MAINNET_NITRO_ARCHIVE_LEVELDB_HASH_DATA:-real-mainnet-nitro-archive-leveldb-hash}:/root/.arbitrum
- ./arb/real/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.real-mainnet-nitro-archive-leveldb-hash-stripprefix.stripprefix.prefixes=/real-mainnet-archive
- traefik.http.services.real-mainnet-nitro-archive-leveldb-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-archive-leveldb-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-archive-leveldb-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-archive-leveldb-hash.rule=Host(`$DOMAIN`) && (Path(`/real-mainnet-archive`) || Path(`/real-mainnet-archive/`))}
- ${NO_SSL:+traefik.http.routers.real-mainnet-nitro-archive-leveldb-hash.rule=Path(`/real-mainnet-archive`) || Path(`/real-mainnet-archive/`)}
- traefik.http.routers.real-mainnet-nitro-archive-leveldb-hash.middlewares=real-mainnet-nitro-archive-leveldb-hash-stripprefix, ipallowlist
volumes:
real-mainnet-nitro-archive-leveldb-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: real
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,143 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/real-mainnet-nitro-archive-pebble-hash.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/real-mainnet-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
real-mainnet-archive:
image: ${REAL_NITRO_IMAGE:-offchainlabs/nitro-node}:${REAL_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=true
- --execution.forwarding-target=https://rpc.realforreal.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.realforreal.gelato.digital
- --node.data-availability.sequencer-inbox-address=0x466813324240923703236721233648302990016039913376
- --node.feed.input.url=wss://feed.realforreal.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/real-mainnet-archive
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${REAL_MAINNET_NITRO_ARCHIVE_PEBBLE_HASH_DATA:-real-mainnet-nitro-archive-pebble-hash}:/root/.arbitrum
- ./arb/real/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.real-mainnet-nitro-archive-pebble-hash-stripprefix.stripprefix.prefixes=/real-mainnet-archive
- traefik.http.services.real-mainnet-nitro-archive-pebble-hash.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-archive-pebble-hash.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-archive-pebble-hash.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-archive-pebble-hash.rule=Host(`$DOMAIN`) && (Path(`/real-mainnet-archive`) || Path(`/real-mainnet-archive/`))}
- ${NO_SSL:+traefik.http.routers.real-mainnet-nitro-archive-pebble-hash.rule=Path(`/real-mainnet-archive`) || Path(`/real-mainnet-archive/`)}
- traefik.http.routers.real-mainnet-nitro-archive-pebble-hash.middlewares=real-mainnet-nitro-archive-pebble-hash-stripprefix, ipallowlist
volumes:
real-mainnet-nitro-archive-pebble-hash:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: real
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,146 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:arb/nitro/real-mainnet-nitro-pruned-pebble-path.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/real-mainnet \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
real-mainnet:
image: ${REAL_NITRO_IMAGE:-offchainlabs/nitro-node}:${REAL_MAINNET_NITRO_VERSION:-v3.6.4-28199cd}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
expose:
- 8545
command:
- --conf.file=/config/baseConfig.json
- --execution.caching.archive=false
- --execution.caching.state-scheme=path
- --execution.forwarding-target=https://rpc.realforreal.gelato.digital
- --execution.rpc.gas-cap=600000000
- --execution.sequencer.enable=false
- --http.addr=0.0.0.0
- --http.api=eth,net,web3,arb,txpool,debug
- --http.corsdomain=*
- --http.port=8545
- --http.vhosts=*
- --init.download-path=/tmp
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=6070
- --node.batch-poster.enable=false
- --node.data-availability.enable=true
- --node.data-availability.parent-chain-node-url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --node.data-availability.rest-aggregator.enable=true
- --node.data-availability.rest-aggregator.urls=https://das.realforreal.gelato.digital
- --node.data-availability.sequencer-inbox-address=0x466813324240923703236721233648302990016039913376
- --node.feed.input.url=wss://feed.realforreal.gelato.digital
- --node.sequencer=false
- --node.staker.enable=false
- --parent-chain.blob-client.beacon-url=${ETHEREUM_MAINNET_BEACON_REST}
- --parent-chain.connection.url=${ETHEREUM_MAINNET_EXECUTION_RPC}
- --persistent.chain=/root/.arbitrum/real-mainnet
- --persistent.db-engine=pebble
- --ws.addr=0.0.0.0
- --ws.origins=*
- --ws.port=8545
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${REAL_MAINNET_NITRO_PRUNED_PEBBLE_PATH_DATA:-real-mainnet-nitro-pruned-pebble-path}:/root/.arbitrum
- ./arb/real/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=true
- prometheus-scrape.port=6070
- prometheus-scrape.path=/debug/metrics/prometheus
- traefik.enable=true
- traefik.http.middlewares.real-mainnet-nitro-pruned-pebble-path-stripprefix.stripprefix.prefixes=/real-mainnet
- traefik.http.services.real-mainnet-nitro-pruned-pebble-path.loadbalancer.server.port=8545
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-pruned-pebble-path.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-pruned-pebble-path.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.real-mainnet-nitro-pruned-pebble-path.rule=Host(`$DOMAIN`) && (Path(`/real-mainnet`) || Path(`/real-mainnet/`))}
- ${NO_SSL:+traefik.http.routers.real-mainnet-nitro-pruned-pebble-path.rule=Path(`/real-mainnet`) || Path(`/real-mainnet/`)}
- traefik.http.routers.real-mainnet-nitro-pruned-pebble-path.middlewares=real-mainnet-nitro-pruned-pebble-path-stripprefix, ipallowlist
volumes:
real-mainnet-nitro-pruned-pebble-path:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: real
method-groups:
enabled:
- debug
- filter
methods:
disabled:
# not compatible with path state scheme
- name: debug_traceBlockByHash
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
# standard geth only
- name: debug_getRawBlock
- name: debug_getRawTransaction
- name: debug_getRawReceipts
- name: debug_getRawHeader
- name: debug_getBadBlocks
# non standard geth only slightly dangerous
- name: debug_intermediateRoots
- name: debug_dumpBlock
# standard geth and erigon
- name: debug_accountRange
- name: debug_getModifiedAccountsByNumber
- name: debug_getModifiedAccountsByHash
# non standard geth and erigon
- name: eth_getRawTransactionByHash
- name: eth_getRawTransactionByBlockHashAndIndex
...

View File

@@ -0,0 +1,6 @@
{
"chain": {
"info-json": "[{\"chain-id\":656476,\"parent-chain-id\":421614,\"parent-chain-is-arbitrum\":true,\"chain-name\":\"Codex\",\"chain-config\":{\"homesteadBlock\":0,\"daoForkBlock\":null,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"berlinBlock\":0,\"londonBlock\":0,\"clique\":{\"period\":0,\"epoch\":0},\"arbitrum\":{\"EnableArbOS\":true,\"AllowDebugPrecompiles\":false,\"DataAvailabilityCommittee\":true,\"InitialArbOSVersion\":20,\"GenesisBlockNum\":0,\"MaxCodeSize\":24576,\"MaxInitCodeSize\":49152,\"InitialChainOwner\":\"0xF46B08D9E85df74b6f24Ad85A6a655c02857D5b8\"},\"chainId\":656476},\"rollup\":{\"bridge\":\"0xbf3D64671154D1FB0b27Cb1decbE1094d7016448\",\"inbox\":\"0x67F231eDC83a66556148673863e73D705422A678\",\"sequencer-inbox\":\"0xd5131c1924f080D45CA3Ae97262c0015F675004b\",\"rollup\":\"0x0A94003d3482128c89395aBd94a41DA8eeBB59f7\",\"validator-utils\":\"0xB11EB62DD2B352886A4530A9106fE427844D515f\",\"validator-wallet-creator\":\"0xEb9885B6c0e117D339F47585cC06a2765AaE2E0b\",\"deployed-at\":41549214}}]",
"name": "Codex"
}
}

View File

@@ -0,0 +1,6 @@
{
"chain": {
"info-json": "[{\"chain-id\":1829,\"parent-chain-id\":42170,\"parent-chain-is-arbitrum\":true,\"chain-name\":\"Playblock\",\"chain-config\":{\"homesteadBlock\":0,\"daoForkBlock\":null,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"berlinBlock\":0,\"londonBlock\":0,\"clique\":{\"period\":0,\"epoch\":0},\"arbitrum\":{\"EnableArbOS\":true,\"AllowDebugPrecompiles\":false,\"DataAvailabilityCommittee\":true,\"InitialArbOSVersion\":11,\"GenesisBlockNum\":0,\"MaxCodeSize\":24576,\"MaxInitCodeSize\":49152,\"InitialChainOwner\":\"0x10Fe3cb853F7ef551E1598d91436e95d41Aea45a\"},\"chainId\":1829},\"rollup\":{\"bridge\":\"0xD4FE46D2533E7d03382ac6cACF0547F336e59DC0\",\"inbox\":\"0xFF55fB76F5671dD9eB6c62EffF8D693Bb161a3ad\",\"sequencer-inbox\":\"0xe347C1223381b9Dcd6c0F61cf81c90175A7Bae77\",\"rollup\":\"0x04ea347cC6A258A7F65D67aFb60B1d487062A1d0\",\"validator-utils\":\"0x6c21303F5986180B1394d2C89f3e883890E2867b\",\"validator-wallet-creator\":\"0x2b0E04Dc90e3fA58165CB41E2834B44A56E766aF\",\"deployed-at\":55663578}}]",
"name": "Playblock"
}
}

View File

@@ -0,0 +1,6 @@
{
"chain": {
"info-json": "[{\"chain-id\":111188,\"parent-chain-id\":1,\"parent-chain-is-arbitrum\":false,\"chain-name\":\"real\",\"chain-config\":{\"homesteadBlock\":0,\"daoForkBlock\":null,\"daoForkSupport\":true,\"eip150Block\":0,\"eip150Hash\":\"0x0000000000000000000000000000000000000000000000000000000000000000\",\"eip155Block\":0,\"eip158Block\":0,\"byzantiumBlock\":0,\"constantinopleBlock\":0,\"petersburgBlock\":0,\"istanbulBlock\":0,\"muirGlacierBlock\":0,\"berlinBlock\":0,\"londonBlock\":0,\"clique\":{\"period\":0,\"epoch\":0},\"arbitrum\":{\"EnableArbOS\":true,\"AllowDebugPrecompiles\":false,\"DataAvailabilityCommittee\":true,\"InitialArbOSVersion\":11,\"GenesisBlockNum\":0,\"MaxCodeSize\":24576,\"MaxInitCodeSize\":49152,\"InitialChainOwner\":\"0xbB0385FebfD25E01527617938129A34bD497331e\"},\"chainId\":111188},\"rollup\":{\"bridge\":\"0x39D2EEcC8B55f46aE64789E2494dE777cDDeED03\",\"inbox\":\"0xf538671ddd60eE54BdD6FBb0E309c491A7A2df11\",\"sequencer-inbox\":\"0x51C4a227D59E49E26Ea07D8e4E9Af163da4c87A0\",\"rollup\":\"0xc4F7B37bE2bBbcF07373F28c61b1A259dfe49d2a\",\"validator-utils\":\"0x2b0E04Dc90e3fA58165CB41E2834B44A56E766aF\",\"validator-wallet-creator\":\"0x9CAd81628aB7D8e239F1A5B497313341578c5F71\",\"deployed-at\":19446518}}]",
"name": "real"
}
}

1
arbitrum-nova.yml Symbolic link
View File

@@ -0,0 +1 @@
arb/nitro/arbitrum-nova-nitro-pruned-pebble-hash.yml

View File

@@ -0,0 +1,45 @@
services:
arbitrum-classic:
image: 'offchainlabs/arb-node:v1.4.5-e97c1a4'
stop_grace_period: 30s
user: root
volumes:
- ${ARBITRUM_ONE_MAINNET_ARBNODE_ARCHIVE_TRACE_DATA:-arbitrum-one-mainnet-arbnode-archive-trace}:/data
- ./arbitrum/classic-entrypoint.sh:/entrypoint.sh
expose:
- 8547
- 8548
entrypoint: ["/home/user/go/bin/arb-node"]
command:
- --l1.url=http://eth.drpc.org
- --core.checkpoint-gas-frequency=156250000
- --node.rpc.enable-l1-calls
- --node.cache.allow-slow-lookup
- --node.rpc.tracing.enable
- --node.rpc.addr=0.0.0.0
- --node.rpc.port=8547
- --node.rpc.tracing.namespace=trace
- --node.chain-id=42161
- --node.ws.addr=0.0.0.0
- --node.ws.port=8548
- --metrics
- --metrics-server.addr=0.0.0.0
- --metrics-server.port=7070
- --l2.disable-upstream
- --persistent.chain=/data/datadir/
- --persistent.global-config=/data/
restart: unless-stopped
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.arbitrum-one-mainnet-arbnode-archive-trace-stripprefix.stripprefix.prefixes=/arbitrum-classic"
- "traefik.http.services.arbitrum-one-mainnet-arbnode-archive-trace.loadbalancer.server.port=8547"
- "${NO_SSL:-traefik.http.routers.arbitrum-one-mainnet-arbnode-archive-trace.entrypoints=websecure}"
- "${NO_SSL:-traefik.http.routers.arbitrum-one-mainnet-arbnode-archive-trace.tls.certresolver=myresolver}"
- "${NO_SSL:-traefik.http.routers.arbitrum-one-mainnet-arbnode-archive-trace.rule=Host(`$DOMAIN`) && PathPrefix(`/arbitrum-classic`)}"
- "${NO_SSL:+traefik.http.routers.arbitrum-one-mainnet-arbnode-archive-trace.rule=PathPrefix(`/arbitrum-classic`)}"
- "traefik.http.routers.arbitrum-one-mainnet-arbnode-archive-trace.middlewares=arbitrum-one-mainnet-arbnode-archive-trace-stripprefix, ipwhitelist"
networks:
- chains
volumes:
arbitrum-one-mainnet-arbnode-archive-trace:

View File

@@ -0,0 +1 @@
arb/nitro/alephzero-mainnet-nitro-archive-leveldb-hash.yml

1
arbitrum-one.yml Symbolic link
View File

@@ -0,0 +1 @@
arb/nitro/arbitrum-one-nitro-pruned-pebble-hash.yml

View File

@@ -0,0 +1 @@
arb/nitro/arbitrum-sepolia-nitro-archive-pebble-hash.yml

View File

@@ -0,0 +1 @@
arb/nitro/arbitrum-sepolia-nitro-pruned-pebble-hash.yml

1
arbitrum-sepolia.yml Symbolic link
View File

@@ -0,0 +1 @@
arb/nitro/arbitrum-sepolia-nitro-pruned-pebble-hash.yml

1
avalanche-fuji.yml Symbolic link
View File

@@ -0,0 +1 @@
avalanche/go/avalanche-fuji-go-pruned-pebbledb.yml

View File

@@ -0,0 +1 @@
avalanche/go/avalanche-mainnet-go-archive-leveldb.yml

View File

@@ -0,0 +1,51 @@
services:
avalanche-archive-client:
image: avaplatform/avalanchego:${AVALANCHEGO_VERSION:-v1.12.2}
ulimits:
nofile: 1048576
expose:
- "9650"
- "30720"
ports:
- "30720:30720/tcp"
- "30720:30720/udp"
volumes:
- ${AVALANCHE_MAINNET_GO_ARCHIVE_DATA:-avalanche-mainnet-go-archive}:/root/.avalanchego
- ./avalanche/configs/chains/C/archive-config.json:/root/.avalanchego/configs/chains/C/config.json
environment:
- "IP=${IP}"
networks:
- chains
command: "/avalanchego/build/avalanchego --http-host= --http-allowed-hosts=* --staking-port=30720 --public-ip=$IP"
restart: unless-stopped
avalanche-archive:
restart: unless-stopped
image: nginx
depends_on:
- avalanche-archive-client
expose:
- 80
environment:
PROXY_HOST: avalanche-archive-client
RPC_PORT: 9650
RPC_PATH: /ext/bc/C/rpc
WS_PORT: 9650
WS_PATH: /ext/bc/C/ws
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.avalanche-mainnet-go-archive-stripprefix.stripprefix.prefixes=/avalanche-archive"
- "traefik.http.services.avalanche-mainnet-go-archive.loadbalancer.server.port=80"
- "${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive.entrypoints=websecure}"
- "${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive.tls.certresolver=myresolver}"
- "${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive.rule=Host(`$DOMAIN`) && PathPrefix(`/avalanche-archive`)}"
- "${NO_SSL:+traefik.http.routers.avalanche-mainnet-go-archive.rule=PathPrefix(`/avalanche-archive`)}"
- "traefik.http.routers.avalanche-mainnet-go-archive.middlewares=avalanche-mainnet-go-archive-stripprefix, ipwhitelist"
volumes:
avalanche-mainnet-go-archive:

1
avalanche-mainnet.yml Symbolic link
View File

@@ -0,0 +1 @@
avalanche/go/avalanche-mainnet-go-pruned-pebbledb.yml

View File

@@ -0,0 +1,4 @@
{
"state-sync-enabled": false,
"pruning-enabled": false
}

View File

@@ -0,0 +1,43 @@
{
"snowman-api-enabled": false,
"coreth-admin-api-enabled": false,
"coreth-admin-api-dir": "",
"eth-apis": [
"public-eth",
"public-eth-filter",
"net",
"web3",
"internal-public-eth",
"internal-public-blockchain",
"internal-public-transaction-pool",
"internal-public-account"
],
"continuous-profiler-dir": "",
"continuous-profiler-frequency": 900000000000,
"continuous-profiler-max-files": 5,
"rpc-gas-cap": 50000000,
"rpc-tx-fee-cap": 100,
"preimages-enabled": false,
"pruning-enabled": true,
"snapshot-async": true,
"snapshot-verification-enabled": false,
"metrics-enabled": false,
"metrics-expensive-enabled": false,
"local-txs-enabled": false,
"api-max-duration": 0,
"ws-cpu-refill-rate": 0,
"ws-cpu-max-stored": 0,
"api-max-blocks-per-request": 0,
"allow-unfinalized-queries": false,
"allow-unprotected-txs": false,
"keystore-directory": "",
"keystore-external-signer": "",
"keystore-insecure-unlock-allowed": false,
"remote-tx-gossip-only-enabled": false,
"tx-regossip-frequency": 60000000000,
"tx-regossip-max-size": 15,
"log-level": "debug",
"offline-pruning-enabled": false,
"offline-pruning-bloom-filter-size": 512,
"offline-pruning-data-directory": ""
}

View File

@@ -0,0 +1,6 @@
{
"state-sync-enabled": true,
"pruning-enabled": true,
"offline-pruning-enabled": true,
"offline-pruning-data-directory": "/root/.avalanchego/offline-pruning"
}

View File

@@ -0,0 +1,7 @@
{
"state-sync-enabled": true,
"pruning-enabled": true,
"offline-pruning-enabled": false,
"offline-pruning-data-directory": "/root/.avalanchego/offline-pruning",
"rpc-gas-cap": 600000000
}

View File

@@ -0,0 +1,4 @@
{
"state-sync-enabled": false,
"pruning-enabled": false
}

View File

View File

@@ -0,0 +1,128 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:avalanche/go/avalanche-fuji-go-archive-leveldb.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/avalanche-fuji-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
avalanche-fuji-archive-client:
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_FUJI_GO_VERSION:-v1.13.0-fuji}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
ports:
- 10046:10046
- 10046:10046/udp
expose:
- 9650
- 9650
entrypoint: [/avalanchego/build/avalanchego]
command:
- --chain-config-dir=/config/archive
- --db-type=leveldb
- --http-allowed-hosts=*
- --http-host=
- --network-id=fuji
- --public-ip=${IP}
- --staking-port=10046
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${AVALANCHE_FUJI_GO_ARCHIVE_LEVELDB_DATA:-avalanche-fuji-go-archive-leveldb}:/root/.avalanchego
- ./avalanche/fuji:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
avalanche-fuji-archive:
image: nginx
expose:
- '80'
environment:
PROXY_HOST: avalanche-fuji-archive-client
RPC_PATH: /ext/bc/C/rpc
RPC_PORT: 9650
WS_PATH: /ext/bc/C/ws
WS_PORT: 9650
restart: unless-stopped
depends_on:
- avalanche-fuji-archive-client
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.avalanche-fuji-go-archive-leveldb-stripprefix.stripprefix.prefixes=/avalanche-fuji-archive
- traefik.http.services.avalanche-fuji-go-archive-leveldb.loadbalancer.server.port=80
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-archive-leveldb.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-archive-leveldb.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-archive-leveldb.rule=Host(`$DOMAIN`) && (Path(`/avalanche-fuji-archive`) || Path(`/avalanche-fuji-archive/`))}
- ${NO_SSL:+traefik.http.routers.avalanche-fuji-go-archive-leveldb.rule=Path(`/avalanche-fuji-archive`) || Path(`/avalanche-fuji-archive/`)}
- traefik.http.routers.avalanche-fuji-go-archive-leveldb.middlewares=avalanche-fuji-go-archive-leveldb-stripprefix, ipallowlist
volumes:
avalanche-fuji-go-archive-leveldb:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: avalanche
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -0,0 +1,128 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:avalanche/go/avalanche-fuji-go-pruned-pebbledb.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/avalanche-fuji \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
avalanche-fuji-client:
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_FUJI_GO_VERSION:-v1.13.0-fuji}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
ports:
- 10350:10350
- 10350:10350/udp
expose:
- 9650
- 9650
entrypoint: [/avalanchego/build/avalanchego]
command:
- --chain-config-dir=/config/pruned
- --db-type=pebbledb
- --http-allowed-hosts=*
- --http-host=
- --network-id=fuji
- --public-ip=${IP}
- --staking-port=10350
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${AVALANCHE_FUJI_GO_PRUNED_PEBBLEDB_DATA:-avalanche-fuji-go-pruned-pebbledb}:/root/.avalanchego
- ./avalanche/fuji:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
avalanche-fuji:
image: nginx
expose:
- '80'
environment:
PROXY_HOST: avalanche-fuji-client
RPC_PATH: /ext/bc/C/rpc
RPC_PORT: 9650
WS_PATH: /ext/bc/C/ws
WS_PORT: 9650
restart: unless-stopped
depends_on:
- avalanche-fuji-client
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.avalanche-fuji-go-pruned-pebbledb-stripprefix.stripprefix.prefixes=/avalanche-fuji
- traefik.http.services.avalanche-fuji-go-pruned-pebbledb.loadbalancer.server.port=80
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.rule=Host(`$DOMAIN`) && (Path(`/avalanche-fuji`) || Path(`/avalanche-fuji/`))}
- ${NO_SSL:+traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.rule=Path(`/avalanche-fuji`) || Path(`/avalanche-fuji/`)}
- traefik.http.routers.avalanche-fuji-go-pruned-pebbledb.middlewares=avalanche-fuji-go-pruned-pebbledb-stripprefix, ipallowlist
volumes:
avalanche-fuji-go-pruned-pebbledb:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: avalanche
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -0,0 +1,128 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:avalanche/go/avalanche-mainnet-go-archive-leveldb.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/avalanche-mainnet-archive \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
avalanche-mainnet-archive-client:
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_MAINNET_GO_VERSION:-v1.13.0}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
ports:
- 12934:12934
- 12934:12934/udp
expose:
- 9650
- 9650
entrypoint: [/avalanchego/build/avalanchego]
command:
- --chain-config-dir=/config/archive
- --db-type=leveldb
- --http-allowed-hosts=*
- --http-host=
- --network-id=mainnet
- --public-ip=${IP}
- --staking-port=12934
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${AVALANCHE_MAINNET_GO_ARCHIVE_LEVELDB_DATA:-avalanche-mainnet-go-archive-leveldb}:/root/.avalanchego
- ./avalanche/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
avalanche-mainnet-archive:
image: nginx
expose:
- '80'
environment:
PROXY_HOST: avalanche-mainnet-archive-client
RPC_PATH: /ext/bc/C/rpc
RPC_PORT: 9650
WS_PATH: /ext/bc/C/ws
WS_PORT: 9650
restart: unless-stopped
depends_on:
- avalanche-mainnet-archive-client
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.avalanche-mainnet-go-archive-leveldb-stripprefix.stripprefix.prefixes=/avalanche-mainnet-archive
- traefik.http.services.avalanche-mainnet-go-archive-leveldb.loadbalancer.server.port=80
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive-leveldb.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive-leveldb.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-archive-leveldb.rule=Host(`$DOMAIN`) && (Path(`/avalanche-mainnet-archive`) || Path(`/avalanche-mainnet-archive/`))}
- ${NO_SSL:+traefik.http.routers.avalanche-mainnet-go-archive-leveldb.rule=Path(`/avalanche-mainnet-archive`) || Path(`/avalanche-mainnet-archive/`)}
- traefik.http.routers.avalanche-mainnet-go-archive-leveldb.middlewares=avalanche-mainnet-go-archive-leveldb-stripprefix, ipallowlist
volumes:
avalanche-mainnet-go-archive-leveldb:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: avalanche
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -0,0 +1,128 @@
---
x-logging-defaults: &logging-defaults
driver: json-file
options:
max-size: "10m"
max-file: "3"
# Usage:
#
# mkdir rpc && cd rpc
#
# git init
# git remote add origin https://github.com/StakeSquid/ethereum-rpc-docker.git
# git fetch origin vibe
# git checkout origin/vibe
#
# docker run --rm alpine sh -c "printf '0x'; head -c32 /dev/urandom | xxd -p -c 64" > .jwtsecret
#
# env
# ...
# IP=$(curl ipinfo.io/ip)
# DOMAIN=${IP}.traefik.me
# COMPOSE_FILE=base.yml:rpc.yml:avalanche/go/avalanche-mainnet-go-pruned-pebbledb.yml
#
# docker compose up -d
#
# curl -X POST https://${IP}.traefik.me/avalanche-mainnet \
# -H "Content-Type: application/json" \
# --data '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}'
services:
avalanche-mainnet-client:
image: ${AVALANCHE_GO_IMAGE:-avaplatform/avalanchego}:${AVALANCHE_MAINNET_GO_VERSION:-v1.13.0}
sysctls:
# TCP Performance
net.ipv4.tcp_slow_start_after_idle: 0 # Disable slow start after idle
net.ipv4.tcp_no_metrics_save: 1 # Disable metrics cache
net.ipv4.tcp_rmem: 4096 87380 16777216 # Increase TCP read buffers
net.ipv4.tcp_wmem: 4096 87380 16777216 # Increase TCP write buffers
net.core.somaxconn: 32768 # Higher connection queue
# Memory/Connection Management
# net.core.netdev_max_backlog: 50000 # Increase network buffer
net.ipv4.tcp_max_syn_backlog: 30000 # More SYN requests
net.ipv4.tcp_max_tw_buckets: 2000000 # Allow more TIME_WAIT sockets
ulimits:
nofile: 1048576 # Max open files (for RPC/WS connections)
user: root
ports:
- 11929:11929
- 11929:11929/udp
expose:
- 9650
- 9650
entrypoint: [/avalanchego/build/avalanchego]
command:
- --chain-config-dir=/config/pruned
- --db-type=pebbledb
- --http-allowed-hosts=*
- --http-host=
- --network-id=mainnet
- --public-ip=${IP}
- --staking-port=11929
restart: unless-stopped
stop_grace_period: 5m
networks:
- chains
volumes:
- ${AVALANCHE_MAINNET_GO_PRUNED_PEBBLEDB_DATA:-avalanche-mainnet-go-pruned-pebbledb}:/root/.avalanchego
- ./avalanche/mainnet:/config
- /slowdisk:/slowdisk
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
avalanche-mainnet:
image: nginx
expose:
- '80'
environment:
PROXY_HOST: avalanche-mainnet-client
RPC_PATH: /ext/bc/C/rpc
RPC_PORT: 9650
WS_PATH: /ext/bc/C/ws
WS_PORT: 9650
restart: unless-stopped
depends_on:
- avalanche-mainnet-client
networks:
- chains
volumes:
- ./nginx-proxy:/etc/nginx/templates
logging: *logging-defaults
labels:
- prometheus-scrape.enabled=false
- traefik.enable=true
- traefik.http.middlewares.avalanche-mainnet-go-pruned-pebbledb-stripprefix.stripprefix.prefixes=/avalanche-mainnet
- traefik.http.services.avalanche-mainnet-go-pruned-pebbledb.loadbalancer.server.port=80
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.entrypoints=websecure}
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.tls.certresolver=myresolver}
- ${NO_SSL:-traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.rule=Host(`$DOMAIN`) && (Path(`/avalanche-mainnet`) || Path(`/avalanche-mainnet/`))}
- ${NO_SSL:+traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.rule=Path(`/avalanche-mainnet`) || Path(`/avalanche-mainnet/`)}
- traefik.http.routers.avalanche-mainnet-go-pruned-pebbledb.middlewares=avalanche-mainnet-go-pruned-pebbledb-stripprefix, ipallowlist
volumes:
avalanche-mainnet-go-pruned-pebbledb:
x-upstreams:
- id: $${ID}
labels:
provider: $${PROVIDER}
connection:
generic:
rpc:
url: $${RPC_URL}
ws:
frameSize: 20Mb
msgSize: 50Mb
url: $${WS_URL}
chain: avalanche
method-groups:
enabled:
- debug
- filter
methods:
disabled:
enabled:
- name: txpool_content # TODO: should be disabled for rollup nodes
...

View File

@@ -0,0 +1,4 @@
{
"state-sync-enabled": false,
"pruning-enabled": false
}

View File

35
backup-http.yml Normal file
View File

@@ -0,0 +1,35 @@
services:
backup-http:
image: abassi/node-http-server:latest
restart: unless-stopped
volumes:
- /backup:/dir_to_serve
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.backup-server-stripprefix.stripprefix.prefixes=/backup"
- "traefik.http.services.backup-server.loadbalancer.server.port=8080"
- "traefik.http.routers.backup-server.entrypoints=websecure"
- "traefik.http.routers.backup-server.tls.certresolver=myresolver"
- "traefik.http.routers.backup-server.rule=Host(`$DOMAIN`) && PathPrefix(`/backup`)"
- "traefik.http.routers.backup-server.middlewares=backup-server-stripprefix"
networks:
- chains
backup-dav:
image: 117503445/go_webdav:latest
restart: unless-stopped
environment:
- "dav=/null,/webdav,null,null,false"
volumes:
- /backup:/webdav
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.backup-storage-stripprefix.stripprefix.prefixes=/dav"
- "traefik.http.services.backup-storage.loadbalancer.server.port=80"
- "traefik.http.routers.backup-storage.entrypoints=websecure"
- "traefik.http.routers.backup-storage.tls.certresolver=myresolver"
- "traefik.http.routers.backup-storage.rule=Host(`$DOMAIN`) && PathPrefix(`/dav`)"
- "traefik.http.routers.backup-storage.middlewares=backup-storage-stripprefix"
networks:
- chains

51
backup-node.sh Executable file
View File

@@ -0,0 +1,51 @@
#!/bin/bash
backup_dir="/backup"
if [[ -n $2 ]]; then
echo "upload backup via webdav to $2"
else
if [ ! -d "$backup_dir" ]; then
echo "Error: /backup directory does not exist"
exit 1
fi
fi
# Read the JSON input and extract the list of keys
keys=$(cat /root/rpc/$1.yml | yaml2json - | jq '.volumes' | jq -r 'keys[]')
# Iterate over the list of keys
for key in $keys; do
echo "Executing command with key: /var/lib/docker/volumes/rpc_$key/_data"
source_folder="/var/lib/docker/volumes/rpc_$key/_data"
folder_size=$(du -shL "$source_folder" | awk '{
size = $1
sub(/[Kk]$/, "", size) # Remove 'K' suffix if present
sub(/[Mm]$/, "", size) # Remove 'M' suffix if present
sub(/[Gg]$/, "", size) # Remove 'G' suffix if present
sub(/[Tt]$/, "", size) # Remove 'T' suffix if present
if ($1 ~ /[Kk]$/) {
size *= 0.001 # Convert kilobytes to gigabytes
} else if ($1 ~ /[Mm]$/) {
size *= 0.001 # Convert megabytes to gigabytes
} else if ($1 ~ /[Tt]$/) {
size *= 1000 # convert terabytes to gigabytes
}
print size
}')
folder_size_gb=$(printf "%.0f" "$folder_size")
target_file="rpc_$key-$(date +'%Y-%m-%d-%H-%M-%S')-${folder_size_gb}G.tar.zst"
#echo "$target_file"
if [[ -n $2 ]]; then
tar -cf - --dereference "$source_folder" | pv -pterb -s $(du -sb "$source_folder" | awk '{print $1}') | zstd | curl -X PUT --upload-file - "$2/null/uploading-$target_file"
curl -X MOVE -H "Destination: /null/$target_file" "$2/null/uploading-$target_file"
else
tar -cf - --dereference "$source_folder" | pv -pterb -s $(du -sb "$source_folder" | awk '{print $1}') | zstd -o "/backup/uploading-$target_file"
mv "/backup/uploading-$target_file" "/backup/$target_file"
fi
done

View File

@@ -0,0 +1 @@
op/erigon/base-mainnet-op-erigon-archive-trace.yml

View File

@@ -0,0 +1 @@
op/reth/base-mainnet-op-reth-archive-trace.yml

View File

@@ -0,0 +1 @@
op/reth/base-mainnet-op-reth-pruned-trace.yml

1
base-mainnet.yml Symbolic link
View File

@@ -0,0 +1 @@
op/geth/base-mainnet-op-geth-pruned-pebble-path.yml

View File

@@ -0,0 +1 @@
op/reth/base-sepolia-op-reth-pruned-trace.yml

1
base-sepolia.yml Symbolic link
View File

@@ -0,0 +1 @@
op/geth/base-sepolia-op-geth-pruned-pebble-path.yml

6
base.yml Normal file
View File

@@ -0,0 +1,6 @@
networks:
chains:
driver: bridge
ipam:
config:
- subnet: ${CHAINS_SUBNET:-192.168.0.0/26}

24
benchmark-proxy.yml Normal file
View File

@@ -0,0 +1,24 @@
services:
benchmark-proxy:
build:
context: ./benchmark-proxy
dockerfile: Dockerfile
expose:
- "8080"
environment:
- LISTEN_ADDR=:8080
- SUMMARY_INTERVAL=60
- PRIMARY_BACKEND=${BENCHMARK_PROXY_PRIMARY_BACKEND}
- SECONDARY_BACKENDS=${BENCHMARK_PROXY_SECONDARY_BACKENDS}
restart: unless-stopped
networks:
- chains
labels:
- "traefik.enable=true"
- "traefik.http.middlewares.benchmark-proxy-stripprefix.stripprefix.prefixes=/benchmark"
- "traefik.http.services.benchmark-proxy.loadbalancer.server.port=8080"
- "${NO_SSL:-traefik.http.routers.benchmark-proxy.entrypoints=websecure}"
- "${NO_SSL:-traefik.http.routers.benchmark-proxy.tls.certresolver=myresolver}"
- "${NO_SSL:-traefik.http.routers.benchmark-proxy.rule=Host(`$DOMAIN`) && PathPrefix(`/benchmark`)}"
- "${NO_SSL:+traefik.http.routers.benchmark-proxy.rule=PathPrefix(`/benchmark`)}"
- "traefik.http.routers.benchmark-proxy.middlewares=benchmark-proxy-stripprefix, ipwhitelist"

View File

@@ -0,0 +1,27 @@
# Build stage
FROM golang:1.21-alpine AS builder
WORKDIR /app
# Initialize Go modules if not already done
RUN go mod init benchmark-proxy
# Add the dependency before building
RUN go get github.com/gorilla/websocket
# Copy source code
COPY . .
# Build the application with CGO disabled for a static binary
RUN CGO_ENABLED=0 go build -ldflags="-s -w" -o benchmark-proxy main.go
# Runtime stage (if you're using a multi-stage build)
FROM alpine:latest
WORKDIR /app
# Copy the binary from the build stage
COPY --from=builder /app/benchmark-proxy .
# Run the application
ENTRYPOINT ["./benchmark-proxy"]

7
benchmark-proxy/go.mod Normal file
View File

@@ -0,0 +1,7 @@
module benchmark-proxy
go 1.21
require github.com/gorilla/websocket v1.5.1
require golang.org/x/net v0.17.0 // indirect

4
benchmark-proxy/go.sum Normal file
View File

@@ -0,0 +1,4 @@
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=

3235
benchmark-proxy/main.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,22 @@
[package]
name = "benchmark_proxy_rust"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
tokio = { version = "1", features = ["full"] }
hyper = { version = "0.14", features = ["full"] }
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
tokio-tungstenite = { version = "0.21", features = ["native-tls"] }
log = "0.4"
env_logger = "0.10"
dashmap = "5.5"
reqwest = { version = "0.11", features = ["json", "rustls-tls"], default-features = false }
thiserror = "1.0"
futures-util = "0.3"
http = "0.2"
url = "2.5"
lazy_static = "1.4.0"

View File

@@ -0,0 +1,343 @@
use crate::{config::AppConfig, structures::Backend};
use dashmap::DashMap;
use futures_util::{stream::SplitSink, SinkExt, StreamExt};
use log::{debug, error, info, warn};
use serde_json::json;
use std::{
collections::HashMap,
sync::{Arc, Mutex},
time::{Duration, SystemTime},
};
use tokio::{
net::TcpStream,
sync::watch,
task::JoinHandle,
time::sleep,
};
use tokio_tungstenite::{
connect_async,
tungstenite::{protocol::Message as TungsteniteMessage, Error as TungsteniteError},
MaybeTlsStream, WebSocketStream,
};
use url::Url;
const RECONNECT_DELAY: Duration = Duration::from_secs(10);
#[derive(serde::Deserialize, Debug)]
struct SubscriptionMessage {
#[allow(dead_code)] // May not be used if only checking method
jsonrpc: Option<String>,
method: Option<String>,
params: Option<SubscriptionParams>,
result: Option<serde_json::Value>, // For subscription ID confirmation
id: Option<serde_json::Value>, // For request echo
}
#[derive(serde::Deserialize, Debug)]
struct SubscriptionParams {
subscription: String,
result: HeaderData,
}
#[derive(serde::Deserialize, Debug)]
struct HeaderData {
number: String, // Hex string like "0x123"
// Add other fields like "hash" if ever needed for more advanced logic
}
pub struct BlockHeightTracker {
config: Arc<AppConfig>,
backends: Vec<Backend>,
block_heights: Arc<DashMap<String, u64>>,
last_update_times: Arc<DashMap<String, SystemTime>>,
shutdown_tx: watch::Sender<bool>,
tasks: Arc<Mutex<Vec<JoinHandle<()>>>>,
enable_detailed_logs: bool,
}
impl BlockHeightTracker {
pub fn new(
config: Arc<AppConfig>,
all_backends: &[Backend],
) -> Option<Arc<Self>> {
if !config.enable_block_height_tracking {
info!("BlockHeightTracker disabled by configuration.");
return None;
}
info!("Initializing BlockHeightTracker for {} backends.", all_backends.len());
let (shutdown_tx, _shutdown_rx) = watch::channel(false); // _shutdown_rx cloned by tasks
Some(Arc::new(Self {
config: config.clone(),
backends: all_backends.to_vec(), // Clones the slice into a Vec
block_heights: Arc::new(DashMap::new()),
last_update_times: Arc::new(DashMap::new()),
shutdown_tx,
tasks: Arc::new(Mutex::new(Vec::new())),
enable_detailed_logs: config.enable_detailed_logs,
}))
}
pub fn start_monitoring(self: Arc<Self>) {
if self.backends.is_empty() {
info!("BHT: No backends configured for monitoring.");
return;
}
info!("BHT: Starting block height monitoring for {} backends.", self.backends.len());
let mut tasks_guard = self.tasks.lock().unwrap();
for backend in self.backends.clone() {
// Only monitor if backend has a URL, primarily for non-primary roles or specific needs
// For this implementation, we assume all backends in the list are candidates.
let task_self = self.clone();
let task_backend = backend.clone(); // Clone backend for the task
let task_shutdown_rx = self.shutdown_tx.subscribe();
let task = tokio::spawn(async move {
task_self
.monitor_backend_connection(task_backend, task_shutdown_rx)
.await;
});
tasks_guard.push(task);
}
}
async fn monitor_backend_connection(
self: Arc<Self>,
backend: Backend,
mut shutdown_rx: watch::Receiver<bool>,
) {
info!("BHT: Starting monitoring for backend: {}", backend.name);
loop { // Outer reconnect loop
tokio::select! {
biased;
_ = shutdown_rx.changed() => {
if *shutdown_rx.borrow() {
info!("BHT: Shutdown signal received for {}, terminating monitoring.", backend.name);
break; // Break outer reconnect loop
}
}
_ = tokio::time::sleep(Duration::from_millis(10)) => { // Give a chance for shutdown signal before attempting connection
// Proceed to connection attempt
}
}
if *shutdown_rx.borrow() { break; }
let mut ws_url = backend.url.clone();
let scheme = if backend.url.scheme() == "https" { "wss" } else { "ws" };
if let Err(_e) = ws_url.set_scheme(scheme) {
error!("BHT: Failed to set scheme to {} for backend {}: {}", scheme, backend.name, backend.url);
sleep(RECONNECT_DELAY).await;
continue;
}
if self.enable_detailed_logs {
debug!("BHT: Attempting to connect to {} for backend {}", ws_url, backend.name);
}
match connect_async(ws_url.clone()).await {
Ok((ws_stream, _response)) => {
if self.enable_detailed_logs {
info!("BHT: Successfully connected to WebSocket for backend: {}", backend.name);
}
let (mut write, mut read) = ws_stream.split();
let subscribe_payload = json!({
"jsonrpc": "2.0",
"method": "eth_subscribe",
"params": ["newHeads"],
"id": 1 // Static ID for this subscription
});
if let Err(e) = write.send(TungsteniteMessage::Text(subscribe_payload.to_string())).await {
error!("BHT: Failed to send eth_subscribe to {}: {}. Retrying connection.", backend.name, e);
// Connection will be retried by the outer loop after delay
sleep(RECONNECT_DELAY).await;
continue;
}
if self.enable_detailed_logs {
debug!("BHT: Sent eth_subscribe payload to {}", backend.name);
}
// Inner message reading loop
loop {
tokio::select! {
biased;
_ = shutdown_rx.changed() => {
if *shutdown_rx.borrow() {
info!("BHT: Shutdown signal for {}, closing WebSocket and stopping.", backend.name);
// Attempt to close the WebSocket gracefully
let _ = write.send(TungsteniteMessage::Close(None)).await;
break; // Break inner message_read_loop
}
}
maybe_message = read.next() => {
match maybe_message {
Some(Ok(message)) => {
match message {
TungsteniteMessage::Text(text_msg) => {
if self.enable_detailed_logs {
debug!("BHT: Received text from {}: {}", backend.name, text_msg);
}
match serde_json::from_str::<SubscriptionMessage>(&text_msg) {
Ok(parsed_msg) => {
if parsed_msg.method.as_deref() == Some("eth_subscription") {
if let Some(params) = parsed_msg.params {
let block_num_str = params.result.number;
match u64::from_str_radix(block_num_str.trim_start_matches("0x"), 16) {
Ok(block_num) => {
self.block_heights.insert(backend.name.clone(), block_num);
self.last_update_times.insert(backend.name.clone(), SystemTime::now());
if self.enable_detailed_logs {
debug!("BHT: Updated block height for {}: {} (raw: {})", backend.name, block_num, block_num_str);
}
}
Err(e) => error!("BHT: Failed to parse block number hex '{}' for {}: {}", block_num_str, backend.name, e),
}
}
} else if parsed_msg.id == Some(json!(1)) && parsed_msg.result.is_some() {
if self.enable_detailed_logs {
info!("BHT: Received subscription confirmation from {}: {:?}", backend.name, parsed_msg.result);
}
} else {
if self.enable_detailed_logs {
debug!("BHT: Received other JSON message from {}: {:?}", backend.name, parsed_msg);
}
}
}
Err(e) => {
if self.enable_detailed_logs {
warn!("BHT: Failed to parse JSON from {}: {}. Message: {}", backend.name, e, text_msg);
}
}
}
}
TungsteniteMessage::Binary(bin_msg) => {
if self.enable_detailed_logs {
debug!("BHT: Received binary message from {} ({} bytes), ignoring.", backend.name, bin_msg.len());
}
}
TungsteniteMessage::Ping(ping_data) => {
if self.enable_detailed_logs { debug!("BHT: Received Ping from {}, sending Pong.", backend.name); }
// tokio-tungstenite handles Pongs automatically by default if feature "rustls-pong" or "native-tls-pong" is enabled.
// If not, manual send:
// if let Err(e) = write.send(TungsteniteMessage::Pong(ping_data)).await {
// error!("BHT: Failed to send Pong to {}: {}", backend.name, e);
// break; // Break inner loop, connection might be unstable
// }
}
TungsteniteMessage::Pong(_) => { /* Usually no action needed */ }
TungsteniteMessage::Close(_) => {
if self.enable_detailed_logs { info!("BHT: WebSocket closed by server for {}.", backend.name); }
break; // Break inner loop
}
TungsteniteMessage::Frame(_) => { /* Raw frame, usually not handled directly */ }
}
}
Some(Err(e)) => {
match e {
TungsteniteError::ConnectionClosed | TungsteniteError::AlreadyClosed => {
if self.enable_detailed_logs { info!("BHT: WebSocket connection closed for {}.", backend.name); }
}
_ => {
error!("BHT: Error reading from WebSocket for {}: {:?}. Attempting reconnect.", backend.name, e);
}
}
break; // Break inner loop, will trigger reconnect
}
None => {
if self.enable_detailed_logs { info!("BHT: WebSocket stream ended for {}. Attempting reconnect.", backend.name); }
break; // Break inner loop, will trigger reconnect
}
}
}
} // End of inner select
if *shutdown_rx.borrow() { break; } // Ensure inner loop breaks if shutdown occurred
} // End of inner message reading loop
}
Err(e) => {
warn!("BHT: Failed to connect to WebSocket for backend {}: {:?}. Retrying after delay.", backend.name, e);
}
}
// If we are here, it means the connection was dropped or failed. Wait before retrying.
if !*shutdown_rx.borrow() { // Don't sleep if shutting down
sleep(RECONNECT_DELAY).await;
}
} // End of outer reconnect loop
info!("BHT: Stopped monitoring backend {}.", backend.name);
}
pub fn is_secondary_behind(&self, secondary_name: &str) -> bool {
if !self.config.enable_block_height_tracking { return false; } // If tracking is off, assume not behind
let primary_info = self.backends.iter().find(|b| b.role == "primary");
let primary_name = match primary_info {
Some(b) => b.name.clone(),
None => {
if self.enable_detailed_logs {
warn!("BHT: No primary backend configured for is_secondary_behind check.");
}
return false;
}
};
let primary_height_opt = self.block_heights.get(&primary_name).map(|h_ref| *h_ref.value());
let primary_height = match primary_height_opt {
Some(h) => h,
None => {
if self.enable_detailed_logs {
debug!("BHT: Primary '{}' height unknown for is_secondary_behind check with {}.", primary_name, secondary_name);
}
return false; // Primary height unknown, can't reliably determine if secondary is behind
}
};
let secondary_height_opt = self.block_heights.get(secondary_name).map(|h_ref| *h_ref.value());
match secondary_height_opt {
Some(secondary_height_val) => {
if primary_height > secondary_height_val {
let diff = primary_height - secondary_height_val;
let is_behind = diff > self.config.max_blocks_behind;
if self.enable_detailed_logs && is_behind {
debug!("BHT: Secondary '{}' (height {}) is behind primary '{}' (height {}). Diff: {}, Max allowed: {}",
secondary_name, secondary_height_val, primary_name, primary_height, diff, self.config.max_blocks_behind);
}
return is_behind;
}
false // Secondary is not behind or is ahead
}
None => {
if self.enable_detailed_logs {
debug!("BHT: Secondary '{}' height unknown, considering it behind primary '{}' (height {}).", secondary_name, primary_name, primary_height);
}
true // Secondary height unknown, assume it's behind if primary height is known
}
}
}
pub fn get_block_height_status(&self) -> HashMap<String, u64> {
self.block_heights
.iter()
.map(|entry| (entry.key().clone(), *entry.value()))
.collect()
}
pub async fn stop(&self) {
info!("BHT: Sending shutdown signal to all monitoring tasks...");
if self.shutdown_tx.send(true).is_err() {
error!("BHT: Failed to send shutdown signal. Tasks might not terminate gracefully.");
}
let mut tasks_guard = self.tasks.lock().unwrap();
info!("BHT: Awaiting termination of {} monitoring tasks...", tasks_guard.len());
for task in tasks_guard.drain(..) {
if let Err(e) = task.await {
error!("BHT: Error awaiting task termination: {:?}", e);
}
}
info!("BHT: All monitoring tasks terminated.");
}
}

View File

@@ -0,0 +1,169 @@
use std::env;
use std::str::FromStr;
use std::time::Duration;
use url::Url;
use thiserror::Error;
use log::{warn, info};
#[derive(Debug, Error)]
pub enum ConfigError {
#[error("Failed to parse environment variable '{var_name}': {source}")]
ParseError {
var_name: String,
source: Box<dyn std::error::Error + Send + Sync>,
},
#[error("Missing required environment variable: {var_name}")]
MissingVariable { var_name: String },
#[error("Invalid URL format for '{var_name}': {url_str} - {source}")]
UrlParseError {
var_name: String,
url_str: String,
source: url::ParseError,
},
}
#[derive(Debug, Clone)]
pub struct AppConfig {
pub listen_addr: String,
pub primary_backend_url: Url,
pub secondary_backend_urls: Vec<Url>,
pub summary_interval_secs: u64,
pub enable_detailed_logs: bool,
pub enable_secondary_probing: bool,
pub probe_interval_secs: u64,
pub min_delay_buffer_ms: u64,
pub probe_methods: Vec<String>,
pub enable_block_height_tracking: bool,
pub max_blocks_behind: u64,
pub enable_expensive_method_routing: bool,
pub max_body_size_bytes: usize,
pub http_client_timeout_secs: u64,
pub request_context_timeout_secs: u64,
}
// Helper function to get and parse environment variables
fn get_env_var<T: FromStr>(key: &str, default_value: T) -> T
where
<T as FromStr>::Err: std::fmt::Display,
{
match env::var(key) {
Ok(val_str) => match val_str.parse::<T>() {
Ok(val) => val,
Err(e) => {
warn!(
"Failed to parse environment variable '{}' with value '{}': {}. Using default: {:?}",
key, val_str, e, default_value
);
default_value
}
},
Err(_) => default_value,
}
}
// Helper function for boolean environment variables
fn get_env_var_bool(key: &str, default_value: bool) -> bool {
match env::var(key) {
Ok(val_str) => val_str.to_lowercase() == "true",
Err(_) => default_value,
}
}
// Helper function for Vec<String> from comma-separated string
fn get_env_var_vec_string(key: &str, default_value: Vec<String>) -> Vec<String> {
match env::var(key) {
Ok(val_str) => {
if val_str.is_empty() {
default_value
} else {
val_str.split(',').map(|s| s.trim().to_string()).collect()
}
}
Err(_) => default_value,
}
}
// Helper function for Vec<Url> from comma-separated string
fn get_env_var_vec_url(key: &str, default_value: Vec<Url>) -> Result<Vec<Url>, ConfigError> {
match env::var(key) {
Ok(val_str) => {
if val_str.is_empty() {
return Ok(default_value);
}
val_str
.split(',')
.map(|s| s.trim())
.filter(|s| !s.is_empty())
.map(|url_str| {
Url::parse(url_str).map_err(|e| ConfigError::UrlParseError {
var_name: key.to_string(),
url_str: url_str.to_string(),
source: e,
})
})
.collect()
}
Err(_) => Ok(default_value),
}
}
pub fn load_from_env() -> Result<AppConfig, ConfigError> {
info!("Loading configuration from environment variables...");
let primary_backend_url_str = env::var("PRIMARY_BACKEND_URL").map_err(|_| {
ConfigError::MissingVariable {
var_name: "PRIMARY_BACKEND_URL".to_string(),
}
})?;
let primary_backend_url =
Url::parse(&primary_backend_url_str).map_err(|e| ConfigError::UrlParseError {
var_name: "PRIMARY_BACKEND_URL".to_string(),
url_str: primary_backend_url_str,
source: e,
})?;
let secondary_backend_urls = get_env_var_vec_url("SECONDARY_BACKEND_URLS", Vec::new())?;
let config = AppConfig {
listen_addr: get_env_var("LISTEN_ADDR", "127.0.0.1:8080".to_string()),
primary_backend_url,
secondary_backend_urls,
summary_interval_secs: get_env_var("SUMMARY_INTERVAL_SECS", 60),
enable_detailed_logs: get_env_var_bool("ENABLE_DETAILED_LOGS", false),
enable_secondary_probing: get_env_var_bool("ENABLE_SECONDARY_PROBING", true),
probe_interval_secs: get_env_var("PROBE_INTERVAL_SECS", 10),
min_delay_buffer_ms: get_env_var("MIN_DELAY_BUFFER_MS", 500),
probe_methods: get_env_var_vec_string(
"PROBE_METHODS",
vec!["eth_blockNumber".to_string(), "net_version".to_string()],
),
enable_block_height_tracking: get_env_var_bool("ENABLE_BLOCK_HEIGHT_TRACKING", true),
max_blocks_behind: get_env_var("MAX_BLOCKS_BEHIND", 5),
enable_expensive_method_routing: get_env_var_bool("ENABLE_EXPENSIVE_METHOD_ROUTING", false),
max_body_size_bytes: get_env_var("MAX_BODY_SIZE_BYTES", 10 * 1024 * 1024), // 10MB
http_client_timeout_secs: get_env_var("HTTP_CLIENT_TIMEOUT_SECS", 30),
request_context_timeout_secs: get_env_var("REQUEST_CONTEXT_TIMEOUT_SECS", 35),
};
info!("Configuration loaded successfully: {:?}", config);
Ok(config)
}
impl AppConfig {
pub fn http_client_timeout(&self) -> Duration {
Duration::from_secs(self.http_client_timeout_secs)
}
pub fn request_context_timeout(&self) -> Duration {
Duration::from_secs(self.request_context_timeout_secs)
}
pub fn summary_interval(&self) -> Duration {
Duration::from_secs(self.summary_interval_secs)
}
pub fn probe_interval(&self) -> Duration {
Duration::from_secs(self.probe_interval_secs)
}
}

View File

@@ -0,0 +1,12 @@
pub mod structures;
pub mod config;
pub mod stats_collector;
pub mod secondary_probe;
pub mod block_height_tracker;
pub mod rpc_utils;
pub mod request_handler;
pub mod websocket_handler;
fn main() {
println!("Hello, world!");
}

View File

@@ -0,0 +1,272 @@
use bytes::Bytes;
use hyper::{Body, Request, Response, StatusCode};
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::mpsc;
use log;
use crate::config::AppConfig;
use crate::stats_collector::StatsCollector;
use crate::secondary_probe::SecondaryProbe;
use crate::block_height_tracker::BlockHeightTracker;
use crate::structures::{Backend, BatchInfo};
use crate::rpc_utils;
#[derive(Debug)]
pub enum BackendResult {
Success {
backend_name: String,
response: reqwest::Response, // Send the whole reqwest::Response
duration: std::time::Duration,
},
Error {
backend_name: String,
error: reqwest::Error, // Send the reqwest::Error
duration: std::time::Duration,
},
}
fn calculate_secondary_delay(
batch_info: &crate::structures::BatchInfo,
probe: &Option<Arc<crate::secondary_probe::SecondaryProbe>>,
stats: &Arc<crate::stats_collector::StatsCollector>,
_config: &Arc<crate::config::AppConfig>, // _config might be used later for more complex logic
) -> std::time::Duration {
let mut max_delay = std::time::Duration::from_millis(0);
let default_delay = std::time::Duration::from_millis(25); // Default from Go
if batch_info.methods.is_empty() {
return default_delay;
}
for method_name in &batch_info.methods {
let current_method_delay = if let Some(p) = probe {
p.get_delay_for_method(method_name)
} else {
// This will use the stubbed method from StatsCollector which currently returns 25ms
stats.get_primary_p75_for_method(method_name)
};
if current_method_delay > max_delay {
max_delay = current_method_delay;
}
}
if max_delay == std::time::Duration::from_millis(0) { // if all methods were unknown or had 0 delay
if let Some(p) = probe {
// Go code uses: probe.minResponseTime + probe.minDelayBuffer
// probe.get_delay_for_method("") would approximate this if it falls back to min_response_time + buffer
return p.get_delay_for_method(""); // Assuming empty method falls back to base delay
}
return default_delay;
}
max_delay
}
pub async fn handle_http_request(
req: Request<Body>,
config: Arc<AppConfig>,
stats_collector: Arc<StatsCollector>,
http_client: Arc<reqwest::Client>,
secondary_probe: Option<Arc<SecondaryProbe>>,
block_height_tracker: Option<Arc<BlockHeightTracker>>,
all_backends: Arc<Vec<Backend>>,
) -> Result<Response<Body>, Box<dyn std::error::Error + Send + Sync>> {
let _overall_start_time = std::time::Instant::now(); // To be used later with request_context_timeout
// 1. Read and limit request body
let limited_body = hyper::body::Limited::new(req.into_body(), config.max_body_size_bytes);
let body_bytes = match hyper::body::to_bytes(limited_body).await {
Ok(bytes) => bytes,
Err(e) => {
log::error!("Failed to read request body or limit exceeded: {}", e);
let mut err_resp = Response::new(Body::from(format!("Request body error: {}", e)));
*err_resp.status_mut() = if e.is::<hyper::Error>() && e.downcast_ref::<hyper::Error>().map_or(false, |he| he.is_body_write_aborted() || format!("{}", he).contains("Too Large")) { // A bit heuristic for "Too Large"
StatusCode::PAYLOAD_TOO_LARGE
} else {
StatusCode::BAD_REQUEST
};
return Ok(err_resp);
}
};
// 2. Parse Batch Info
let batch_info = match rpc_utils::parse_batch_info(&body_bytes) {
Ok(info) => info,
Err(e) => {
log::error!("Invalid JSON-RPC request: {}", e);
let mut err_resp = Response::new(Body::from(format!("Invalid JSON-RPC: {}", e)));
*err_resp.status_mut() = StatusCode::BAD_REQUEST;
return Ok(err_resp);
}
};
let display_method = if batch_info.is_batch {
format!("batch[{}]", batch_info.request_count)
} else {
batch_info.methods.get(0).cloned().unwrap_or_else(|| "unknown".to_string())
};
log::info!("Received request: Method: {}, IsBatch: {}, NumMethods: {}", display_method, batch_info.is_batch, batch_info.methods.len());
// 3. Calculate Secondary Delay
let secondary_delay = calculate_secondary_delay(&batch_info, &secondary_probe, &stats_collector, &config);
if config.enable_detailed_logs {
log::debug!("Method: {}, Calculated secondary delay: {:?}", display_method, secondary_delay);
}
// 4. Backend Filtering & Expensive Method Routing
let mut target_backends: Vec<Backend> = (*all_backends).clone();
if batch_info.has_stateful {
log::debug!("Stateful method detected in request '{}', targeting primary only.", display_method);
target_backends.retain(|b| b.role == "primary");
} else {
// Filter by block height
if let Some(bht) = &block_height_tracker {
if config.enable_block_height_tracking { // Check if feature is enabled
target_backends.retain(|b| {
if b.role != "primary" && bht.is_secondary_behind(&b.name) {
if config.enable_detailed_logs { log::info!("Skipping secondary {}: behind in block height for request {}", b.name, display_method); }
// TODO: Add stat for skipped due to block height
false
} else { true }
});
}
}
// Filter by probe availability
if let Some(sp) = &secondary_probe {
if config.enable_secondary_probing { // Check if feature is enabled
target_backends.retain(|b| {
if b.role != "primary" && !sp.is_backend_available(&b.name) {
if config.enable_detailed_logs { log::info!("Skipping secondary {}: not available via probe for request {}", b.name, display_method); }
// TODO: Add stat for skipped due to probe unavailable
false
} else { true }
});
}
}
}
let is_req_expensive = batch_info.methods.iter().any(|m| rpc_utils::is_expensive_method(m)) ||
batch_info.methods.iter().any(|m| stats_collector.is_expensive_method_by_stats(m)); // Stubbed
if config.enable_expensive_method_routing && is_req_expensive && !batch_info.has_stateful {
log::debug!("Expensive method detected in request {}. Attempting to route to a secondary.", display_method);
// TODO: Complex expensive method routing logic.
// For now, this placeholder doesn't change target_backends.
// A real implementation would try to find the best secondary or stick to primary if none are suitable.
}
// 5. Concurrent Request Dispatch
let (response_tx, mut response_rx) = mpsc::channel::<BackendResult>(target_backends.len().max(1));
let mut dispatched_count = 0;
for backend in target_backends { // target_backends is now filtered
dispatched_count += 1;
let task_body_bytes = body_bytes.clone();
let task_http_client = http_client.clone();
let task_response_tx = response_tx.clone();
// task_backend_name, task_backend_url, task_backend_role are cloned from 'backend'
let task_backend_name = backend.name.clone();
let task_backend_url = backend.url.clone();
let task_backend_role = backend.role.clone();
let task_secondary_delay = secondary_delay;
let task_config_detailed_logs = config.enable_detailed_logs;
let task_http_timeout = config.http_client_timeout(); // Get Duration from config
tokio::spawn(async move {
let backend_req_start_time = std::time::Instant::now();
if task_backend_role != "primary" {
if task_config_detailed_logs {
log::debug!("Secondary backend {} for request {} delaying for {:?}", task_backend_name, display_method, task_secondary_delay);
}
tokio::time::sleep(task_secondary_delay).await;
}
let result = task_http_client
.post(task_backend_url)
.header("Content-Type", "application/json")
// TODO: Copy relevant headers from original request 'req.headers()'
.body(task_body_bytes)
.timeout(task_http_timeout)
.send()
.await;
let duration = backend_req_start_time.elapsed();
match result {
Ok(resp) => {
if task_config_detailed_logs {
log::debug!("Backend {} for request {} responded with status {}", task_backend_name, display_method, resp.status());
}
if task_response_tx.send(BackendResult::Success {
backend_name: task_backend_name,
response: resp,
duration,
}).await.is_err() {
log::error!("Failed to send success to channel for request {}: receiver dropped", display_method);
}
}
Err(err) => {
if task_config_detailed_logs {
log::error!("Backend {} for request {} request failed: {}", task_backend_name, display_method, err);
}
if task_response_tx.send(BackendResult::Error {
backend_name: task_backend_name,
error: err,
duration,
}).await.is_err() {
log::error!("Failed to send error to channel for request {}: receiver dropped", display_method);
}
}
}
});
}
drop(response_tx);
if dispatched_count == 0 {
log::warn!("No backends available to dispatch request for method {}", display_method);
// TODO: Add stat for no backend available
let mut err_resp = Response::new(Body::from("No available backends for this request type."));
*err_resp.status_mut() = StatusCode::SERVICE_UNAVAILABLE;
return Ok(err_resp);
}
// Placeholder: return the first received response
if let Some(first_result) = response_rx.recv().await {
if config.enable_detailed_logs {
log::info!("First backend response for request {}: {:?}", display_method, first_result);
}
match first_result {
BackendResult::Success { backend_name: _, response: reqwest_resp, duration: _ } => {
let mut hyper_resp_builder = Response::builder().status(reqwest_resp.status());
for (name, value) in reqwest_resp.headers().iter() {
hyper_resp_builder = hyper_resp_builder.header(name.clone(), value.clone());
}
let hyper_resp = hyper_resp_builder
.body(Body::wrap_stream(reqwest_resp.bytes_stream()))
.unwrap_or_else(|e| {
log::error!("Error building response from backend for request {}: {}", display_method, e);
let mut err_resp = Response::new(Body::from("Error processing backend response"));
*err_resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
err_resp
});
return Ok(hyper_resp);
}
BackendResult::Error { backend_name, error, duration: _ } => {
log::error!("First response for request {} was an error from {}: {}", display_method, backend_name, error);
let mut err_resp = Response::new(Body::from(format!("Error from backend {}: {}", backend_name, error)));
*err_resp.status_mut() = StatusCode::BAD_GATEWAY;
return Ok(err_resp);
}
}
} else {
log::error!("No responses received from any dispatched backend for method {}", display_method);
// TODO: Add stat for no response received
let mut err_resp = Response::new(Body::from("No response from any backend."));
*err_resp.status_mut() = StatusCode::GATEWAY_TIMEOUT;
return Ok(err_resp);
}
// Note: Overall request context timeout and full response aggregation logic are still TODOs.
}

View File

@@ -0,0 +1,92 @@
use crate::structures::{BatchInfo, JsonRpcRequest};
use std::collections::HashSet;
use log;
use serde_json; // Added for parsing
fn get_stateful_methods() -> HashSet<&'static str> {
[
"eth_newFilter", "eth_newBlockFilter", "eth_newPendingTransactionFilter",
"eth_getFilterChanges", "eth_getFilterLogs", "eth_uninstallFilter",
"eth_subscribe", "eth_unsubscribe", "eth_subscription", // "eth_subscription" is a notification, not a method client calls.
// But if it appears in a batch for some reason, it's state-related.
]
.iter()
.cloned()
.collect()
}
fn get_expensive_methods() -> HashSet<&'static str> {
[
// Ethereum Debug API (typically Geth-specific)
"debug_traceBlockByHash", "debug_traceBlockByNumber", "debug_traceCall", "debug_traceTransaction",
"debug_storageRangeAt", "debug_getModifiedAccountsByHash", "debug_getModifiedAccountsByNumber",
// Erigon/OpenEthereum Trace Module (more standard)
"trace_block", "trace_call", "trace_callMany", "trace_filter", "trace_get", "trace_rawTransaction",
"trace_replayBlockTransactions", "trace_replayTransaction", "trace_transaction",
// Specific combinations that might be considered extra expensive
"trace_replayBlockTransactions#vmTrace", // Example, depends on actual usage if # is method part
"trace_replayTransaction#vmTrace",
]
.iter()
.cloned()
.collect()
}
lazy_static::lazy_static! {
static ref STATEFUL_METHODS: HashSet<&'static str> = get_stateful_methods();
static ref EXPENSIVE_METHODS: HashSet<&'static str> = get_expensive_methods();
}
pub fn is_stateful_method(method: &str) -> bool {
STATEFUL_METHODS.contains(method)
}
pub fn is_expensive_method(method: &str) -> bool {
EXPENSIVE_METHODS.contains(method)
}
pub fn parse_batch_info(body_bytes: &[u8]) -> Result<BatchInfo, String> {
if body_bytes.is_empty() {
return Err("Empty request body".to_string());
}
// Try parsing as a batch (array) first
match serde_json::from_slice::<Vec<JsonRpcRequest>>(body_bytes) {
Ok(batch_reqs) => {
if batch_reqs.is_empty() {
return Err("Empty batch request".to_string());
}
let mut methods = Vec::new();
let mut has_stateful = false;
for req in &batch_reqs {
methods.push(req.method.clone());
if is_stateful_method(&req.method) {
has_stateful = true;
}
}
Ok(BatchInfo {
is_batch: true,
methods,
request_count: batch_reqs.len(),
has_stateful,
})
}
Err(_e_batch) => {
// If not a batch, try parsing as a single request
match serde_json::from_slice::<JsonRpcRequest>(body_bytes) {
Ok(single_req) => Ok(BatchInfo {
is_batch: false,
methods: vec![single_req.method.clone()],
request_count: 1,
has_stateful: is_stateful_method(&single_req.method),
}),
Err(_e_single) => {
// Log the actual errors if needed for debugging, but return a generic one
log::debug!("Failed to parse as batch: {}", _e_batch);
log::debug!("Failed to parse as single: {}", _e_single);
Err("Invalid JSON-RPC request format. Not a valid single request or batch.".to_string())
}
}
}
}
}

View File

@@ -0,0 +1,383 @@
use crate::{
config::AppConfig,
structures::{Backend, JsonRpcRequest},
};
use chrono::Utc;
use dashmap::DashMap;
use log::{debug, error, info, warn};
use reqwest::Client;
use serde_json::json;
use std::{
cmp::min,
sync::{
atomic::{AtomicU32, Ordering},
Arc, Mutex, RwLock,
},
time::{Duration, SystemTime},
};
use tokio::sync::watch;
const PROBE_REQUEST_COUNT: usize = 10;
const DEFAULT_MIN_RESPONSE_TIME_MS: u64 = 15;
const PROBE_CYCLE_DELAY_MS: u64 = 10;
pub struct SecondaryProbe {
config: Arc<AppConfig>,
backends: Vec<Backend>, // Only secondary backends
client: Client,
min_response_time: Arc<RwLock<Duration>>,
method_timings: Arc<DashMap<String, Duration>>, // method_name -> min_duration
backend_timings: Arc<DashMap<String, Duration>>, // backend_name -> min_duration
// Health state per backend
backend_available: Arc<DashMap<String, bool>>,
backend_error_count: Arc<DashMap<String, AtomicU32>>,
backend_consecutive_success_count: Arc<DashMap<String, AtomicU32>>, // For recovery
backend_last_success: Arc<DashMap<String, Mutex<SystemTime>>>,
last_probe_time: Arc<Mutex<SystemTime>>,
failure_count: Arc<AtomicU32>, // Consecutive overall probe cycle failures
last_success_time: Arc<Mutex<SystemTime>>, // Last time any probe in an overall cycle succeeded
shutdown_tx: watch::Sender<bool>,
shutdown_rx: watch::Receiver<bool>,
enable_detailed_logs: bool,
}
impl SecondaryProbe {
pub fn new(
config: Arc<AppConfig>,
all_backends: &[Backend],
client: Client,
) -> Option<Arc<Self>> {
let secondary_backends: Vec<Backend> = all_backends
.iter()
.filter(|b| b.role.to_lowercase() == "secondary")
.cloned()
.collect();
if secondary_backends.is_empty() {
info!("No secondary backends configured. SecondaryProbe will not be initialized.");
return None;
}
info!(
"Initializing SecondaryProbe for {} secondary backends.",
secondary_backends.len()
);
let backend_available = Arc::new(DashMap::new());
let backend_error_count = Arc::new(DashMap::new());
let backend_consecutive_success_count = Arc::new(DashMap::new());
let backend_last_success = Arc::new(DashMap::new());
for backend in &secondary_backends {
backend_available.insert(backend.name.clone(), true);
backend_error_count.insert(backend.name.clone(), AtomicU32::new(0));
backend_consecutive_success_count.insert(backend.name.clone(), AtomicU32::new(0));
backend_last_success.insert(backend.name.clone(), Mutex::new(SystemTime::now()));
info!(" - Backend '{}' ({}) initialized as available.", backend.name, backend.url);
}
let (shutdown_tx, shutdown_rx) = watch::channel(false);
Some(Arc::new(Self {
config: config.clone(),
backends: secondary_backends,
client,
min_response_time: Arc::new(RwLock::new(Duration::from_millis(
DEFAULT_MIN_RESPONSE_TIME_MS, // Or load from config if needed
))),
method_timings: Arc::new(DashMap::new()),
backend_timings: Arc::new(DashMap::new()),
backend_available,
backend_error_count,
backend_consecutive_success_count,
backend_last_success,
last_probe_time: Arc::new(Mutex::new(SystemTime::now())),
failure_count: Arc::new(AtomicU32::new(0)),
last_success_time: Arc::new(Mutex::new(SystemTime::now())),
shutdown_tx,
shutdown_rx, // Receiver is cloneable
enable_detailed_logs: config.enable_detailed_logs,
}))
}
pub fn start_periodic_probing(self: Arc<Self>) {
if self.backends.is_empty() {
info!("No secondary backends to probe. Periodic probing will not start.");
return;
}
info!(
"Starting periodic probing for {} secondary backends. Probe interval: {}s. Probe methods: {:?}. Max errors: {}, Recovery threshold: {}.",
self.backends.len(),
self.config.probe_interval_secs,
self.config.probe_methods,
self.config.max_error_threshold,
self.config.recovery_threshold
);
// Run initial probe
let initial_probe_self = self.clone();
tokio::spawn(async move {
if initial_probe_self.enable_detailed_logs {
debug!("Running initial probe...");
}
initial_probe_self.run_probe().await;
if initial_probe_self.enable_detailed_logs {
debug!("Initial probe finished.");
}
});
// Start periodic probing task
let mut interval = tokio::time::interval(self.config.probe_interval());
let mut shutdown_rx_clone = self.shutdown_rx.clone();
tokio::spawn(async move {
loop {
tokio::select! {
_ = interval.tick() => {
if self.enable_detailed_logs {
debug!("Running periodic probe cycle...");
}
self.run_probe().await;
if self.enable_detailed_logs {
debug!("Periodic probe cycle finished.");
}
}
res = shutdown_rx_clone.changed() => {
if res.is_err() || *shutdown_rx_clone.borrow() {
info!("SecondaryProbe: Shutdown signal received or channel closed, stopping periodic probing.");
break;
}
}
}
}
info!("SecondaryProbe: Periodic probing task has stopped.");
});
}
async fn run_probe(&self) {
let mut successful_probes_in_overall_cycle = 0;
let mut temp_method_timings: DashMap<String, Duration> = DashMap::new(); // method_name -> min_duration for this cycle
let mut temp_backend_timings: DashMap<String, Duration> = DashMap::new(); // backend_name -> min_duration for this cycle
let mut temp_overall_min_response_time = Duration::MAX;
for backend in &self.backends {
let mut backend_cycle_successful_probes = 0;
let mut backend_cycle_min_duration = Duration::MAX;
for method_name in &self.config.probe_methods {
let mut method_min_duration_for_backend_this_cycle = Duration::MAX;
for i in 0..PROBE_REQUEST_COUNT {
let probe_id = format!(
"probe-{}-{}-{}-{}",
backend.name,
method_name,
Utc::now().timestamp_nanos_opt().unwrap_or_else(|| SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap_or_default().as_nanos() as i64),
i
);
let request_body = JsonRpcRequest {
method: method_name.clone(),
params: Some(json!([])),
id: Some(json!(probe_id)),
jsonrpc: Some("2.0".to_string()),
};
let start_time = SystemTime::now();
match self.client.post(backend.url.clone()).json(&request_body).timeout(self.config.http_client_timeout()).send().await {
Ok(response) => {
let duration = start_time.elapsed().unwrap_or_default();
if response.status().is_success() {
// TODO: Optionally parse JSON RPC response for error field
backend_cycle_successful_probes += 1;
successful_probes_in_overall_cycle += 1;
method_min_duration_for_backend_this_cycle = min(method_min_duration_for_backend_this_cycle, duration);
backend_cycle_min_duration = min(backend_cycle_min_duration, duration);
temp_overall_min_response_time = min(temp_overall_min_response_time, duration);
if self.enable_detailed_logs {
debug!("Probe success: {} method {} ID {} took {:?}.", backend.name, method_name, probe_id, duration);
}
} else {
if self.enable_detailed_logs {
warn!("Probe failed (HTTP status {}): {} method {} ID {}. Body: {:?}", response.status(), backend.name, method_name, probe_id, response.text().await.unwrap_or_default());
}
}
}
Err(e) => {
if self.enable_detailed_logs {
warn!("Probe error (request failed): {} method {} ID {}: {:?}", backend.name, method_name, probe_id, e);
}
}
}
tokio::time::sleep(Duration::from_millis(PROBE_CYCLE_DELAY_MS)).await;
} // End of PROBE_REQUEST_COUNT loop
if method_min_duration_for_backend_this_cycle != Duration::MAX {
temp_method_timings
.entry(method_name.clone())
.and_modify(|current_min| *current_min = min(*current_min, method_min_duration_for_backend_this_cycle))
.or_insert(method_min_duration_for_backend_this_cycle);
}
} // End of probe_methods loop
if backend_cycle_min_duration != Duration::MAX {
temp_backend_timings.insert(backend.name.clone(), backend_cycle_min_duration);
}
self.update_backend_health(&backend.name, backend_cycle_successful_probes > 0);
if self.enable_detailed_logs {
debug!(
"Probe sub-cycle for backend {}: {} successful probes. Min duration for this backend this cycle: {:?}. Current health: available={}",
backend.name,
backend_cycle_successful_probes,
if backend_cycle_min_duration == Duration::MAX { None } else { Some(backend_cycle_min_duration) },
self.is_backend_available(&backend.name)
);
}
} // End of backends loop
// Update overall timings if any probe in the cycle was successful
if successful_probes_in_overall_cycle > 0 {
if temp_overall_min_response_time != Duration::MAX {
let mut min_resp_time_guard = self.min_response_time.write().unwrap();
*min_resp_time_guard = min(*min_resp_time_guard, temp_overall_min_response_time);
if self.enable_detailed_logs {
debug!("Global min_response_time updated to: {:?}", *min_resp_time_guard);
}
}
for entry in temp_method_timings.iter() {
self.method_timings
.entry(entry.key().clone())
.and_modify(|current_min| *current_min = min(*current_min, *entry.value()))
.or_insert(*entry.value());
if self.enable_detailed_logs {
debug!("Global method_timing for {} updated/set to: {:?}", entry.key(), *entry.value());
}
}
for entry in temp_backend_timings.iter() {
self.backend_timings
.entry(entry.key().clone())
.and_modify(|current_min| *current_min = min(*current_min, *entry.value()))
.or_insert(*entry.value());
if self.enable_detailed_logs {
debug!("Global backend_timing for {} updated/set to: {:?}", entry.key(), *entry.value());
}
}
self.failure_count.store(0, Ordering::Relaxed);
*self.last_success_time.lock().unwrap() = SystemTime::now();
if self.enable_detailed_logs {
info!("Overall probe cycle completed with {} successes. Overall failure count reset.", successful_probes_in_overall_cycle);
}
} else {
let prev_failures = self.failure_count.fetch_add(1, Ordering::Relaxed);
warn!(
"Overall probe cycle completed with NO successful probes. Overall failure count incremented to {}.",
prev_failures + 1
);
}
*self.last_probe_time.lock().unwrap() = SystemTime::now();
}
fn update_backend_health(&self, backend_name: &str, is_cycle_success: bool) {
let current_availability = self.is_backend_available(backend_name);
let error_count_entry = self.backend_error_count.entry(backend_name.to_string()).or_insert_with(|| AtomicU32::new(0));
let consecutive_success_entry = self.backend_consecutive_success_count.entry(backend_name.to_string()).or_insert_with(|| AtomicU32::new(0));
if is_cycle_success {
error_count_entry.store(0, Ordering::Relaxed);
consecutive_success_entry.fetch_add(1, Ordering::Relaxed);
if let Some(mut last_success_guard) = self.backend_last_success.get_mut(backend_name) {
*last_success_guard.lock().unwrap() = SystemTime::now();
}
if !current_availability {
let successes = consecutive_success_entry.load(Ordering::Relaxed);
if successes >= self.config.recovery_threshold {
self.backend_available.insert(backend_name.to_string(), true);
info!("Backend {} recovered and is now AVAILABLE ({} consecutive successes met threshold {}).", backend_name, successes, self.config.recovery_threshold);
consecutive_success_entry.store(0, Ordering::Relaxed); // Reset after recovery
} else {
if self.enable_detailed_logs {
debug!("Backend {} had a successful probe cycle. Consecutive successes: {}. Needs {} for recovery.", backend_name, successes, self.config.recovery_threshold);
}
}
} else {
if self.enable_detailed_logs {
debug!("Backend {} remains available, successful probe cycle.", backend_name);
}
}
} else { // Probe cycle failed for this backend
consecutive_success_entry.store(0, Ordering::Relaxed); // Reset consecutive successes on any failure
let current_errors = error_count_entry.fetch_add(1, Ordering::Relaxed) + 1; // +1 because fetch_add returns previous value
if current_availability && current_errors >= self.config.max_error_threshold {
self.backend_available.insert(backend_name.to_string(), false);
warn!(
"Backend {} has become UNAVAILABLE due to {} errors (threshold {}).",
backend_name, current_errors, self.config.max_error_threshold
);
} else {
if self.enable_detailed_logs {
if current_availability {
debug!("Backend {} is still available but error count increased to {}. Max errors before unavailable: {}", backend_name, current_errors, self.config.max_error_threshold);
} else {
debug!("Backend {} remains UNAVAILABLE, error count now {}.", backend_name, current_errors);
}
}
}
}
}
pub fn get_delay_for_method(&self, method_name: &str) -> Duration {
let base_delay = self
.method_timings
.get(method_name)
.map(|timing_ref| *timing_ref.value())
.unwrap_or_else(|| *self.min_response_time.read().unwrap()); // Read lock
let buffer = Duration::from_millis(self.config.min_delay_buffer_ms);
let calculated_delay = base_delay.saturating_add(buffer);
let overall_failures = self.failure_count.load(Ordering::Relaxed);
// Consider last_success_time to see if failures are recent and persistent
let time_since_last_overall_success = SystemTime::now()
.duration_since(*self.last_success_time.lock().unwrap()) // Lock for last_success_time
.unwrap_or_default();
// Fallback logic: if many consecutive failures AND last success was long ago
if overall_failures >= 3 && time_since_last_overall_success > self.config.probe_interval().saturating_mul(3) {
warn!(
"Probes failing ({} consecutive, last overall success {:?} ago). Using conservative fixed delay for method {}.",
overall_failures, time_since_last_overall_success, method_name
);
return Duration::from_millis(self.config.min_delay_buffer_ms.saturating_mul(3));
}
if self.enable_detailed_logs {
debug!("Delay for method '{}': base {:?}, buffer {:?}, final {:?}", method_name, base_delay, buffer, calculated_delay);
}
calculated_delay
}
pub fn is_backend_available(&self, backend_name: &str) -> bool {
self.backend_available
.get(backend_name)
.map_or(false, |entry| *entry.value())
}
pub fn stop(&self) {
info!("SecondaryProbe: Sending shutdown signal...");
if self.shutdown_tx.send(true).is_err() {
error!("Failed to send shutdown signal to SecondaryProbe task. It might have already stopped or had no receiver.");
}
}
}

View File

@@ -0,0 +1,290 @@
use crate::structures::{ResponseStats, WebSocketStats, CuDataPoint, Backend};
use crate::block_height_tracker::BlockHeightTracker;
use crate::secondary_probe::SecondaryProbe;
use std::time::{Duration, SystemTime};
use std::sync::{Arc, Mutex, atomic::{AtomicU64, Ordering}};
use dashmap::DashMap;
use log::{debug, error, info, warn};
pub struct StatsCollector {
pub request_stats: Arc<Mutex<Vec<ResponseStats>>>,
pub method_stats: Arc<DashMap<String, Mutex<Vec<Duration>>>>, // method_name -> list of durations for primary
pub backend_method_stats: Arc<DashMap<String, DashMap<String, Mutex<Vec<Duration>>>>>, // backend_name -> method_name -> list of durations
pub backend_wins: Arc<DashMap<String, AtomicU64>>, // backend_name -> count
pub method_backend_wins: Arc<DashMap<String, DashMap<String, AtomicU64>>>, // method_name -> backend_name -> count
pub first_response_durations: Arc<Mutex<Vec<Duration>>>,
pub actual_first_response_durations: Arc<Mutex<Vec<Duration>>>,
pub method_first_response_durations: Arc<DashMap<String, Mutex<Vec<Duration>>>>,
pub method_actual_first_response_durations: Arc<DashMap<String, Mutex<Vec<Duration>>>>,
pub total_requests: Arc<AtomicU64>>,
pub error_count: Arc<AtomicU64>>,
pub skipped_secondary_requests: Arc<AtomicU64>>,
pub ws_stats: Arc<Mutex<Vec<WebSocketStats>>>,
pub total_ws_connections: Arc<AtomicU64>>,
pub app_start_time: SystemTime,
pub interval_start_time: Arc<Mutex<SystemTime>>,
pub summary_interval: Duration,
pub method_cu_prices: Arc<DashMap<String, u64>>,
pub total_cu: Arc<AtomicU64>>,
pub method_cu: Arc<DashMap<String, AtomicU64>>, // method_name -> total CU for this method in interval
pub historical_cu: Arc<Mutex<Vec<CuDataPoint>>>,
pub has_secondary_backends: bool,
// Placeholders for probe and tracker - actual types will be defined later
// pub secondary_probe: Option<Arc<SecondaryProbe>>,
// pub block_height_tracker: Option<Arc<BlockHeightTracker>>,
}
impl StatsCollector {
pub fn new(summary_interval: Duration, has_secondary_backends: bool) -> Self {
let method_cu_prices = Arc::new(DashMap::new());
Self::init_cu_prices(&method_cu_prices);
StatsCollector {
request_stats: Arc::new(Mutex::new(Vec::new())),
method_stats: Arc::new(DashMap::new()),
backend_method_stats: Arc::new(DashMap::new()),
backend_wins: Arc::new(DashMap::new()),
method_backend_wins: Arc::new(DashMap::new()),
first_response_durations: Arc::new(Mutex::new(Vec::new())),
actual_first_response_durations: Arc::new(Mutex::new(Vec::new())),
method_first_response_durations: Arc::new(DashMap::new()),
method_actual_first_response_durations: Arc::new(DashMap::new()),
total_requests: Arc::new(AtomicU64::new(0)),
error_count: Arc::new(AtomicU64::new(0)),
skipped_secondary_requests: Arc::new(AtomicU64::new(0)),
ws_stats: Arc::new(Mutex::new(Vec::new())),
total_ws_connections: Arc::new(AtomicU64::new(0)),
app_start_time: SystemTime::now(),
interval_start_time: Arc::new(Mutex::new(SystemTime::now())),
summary_interval,
method_cu_prices,
total_cu: Arc::new(AtomicU64::new(0)),
method_cu: Arc::new(DashMap::new()),
historical_cu: Arc::new(Mutex::new(Vec::new())),
has_secondary_backends,
}
}
fn init_cu_prices(prices_map: &DashMap<String, u64>) {
// Base CU
prices_map.insert("eth_call".to_string(), 100);
prices_map.insert("eth_estimateGas".to_string(), 150);
prices_map.insert("eth_getLogs".to_string(), 200);
prices_map.insert("eth_sendRawTransaction".to_string(), 250);
prices_map.insert("trace_call".to_string(), 300);
prices_map.insert("trace_replayBlockTransactions".to_string(), 500);
// Default for unknown methods
prices_map.insert("default".to_string(), 50);
}
pub fn add_stats(&self, stats_vec: Vec<ResponseStats>) {
if stats_vec.is_empty() {
warn!("add_stats called with empty stats_vec");
return;
}
self.total_requests.fetch_add(1, Ordering::Relaxed);
let mut primary_stats: Option<&ResponseStats> = None;
let mut winning_backend_name: Option<String> = None;
let mut actual_first_response_duration: Option<Duration> = None;
let mut first_response_duration_from_primary_or_fastest_secondary: Option<Duration> = None;
// Find the 'actual-first-response' if present and the primary response
for stat in &stats_vec {
if stat.backend_name == "actual-first-response" {
actual_first_response_duration = Some(stat.duration);
} else if stat.backend_name.contains("-primary") { // Assuming primary name contains "-primary"
primary_stats = Some(stat);
}
}
let method_name = primary_stats.map_or_else(
|| stats_vec.first().map_or_else(|| "unknown".to_string(), |s| s.method.clone()),
|ps| ps.method.clone()
);
// Determine winning backend and first_response_duration_from_primary_or_fastest_secondary
if self.has_secondary_backends {
let mut fastest_duration = Duration::MAX;
for stat in stats_vec.iter().filter(|s| s.backend_name != "actual-first-response" && s.error.is_none()) {
if stat.duration < fastest_duration {
fastest_duration = stat.duration;
winning_backend_name = Some(stat.backend_name.clone());
}
}
if fastest_duration != Duration::MAX {
first_response_duration_from_primary_or_fastest_secondary = Some(fastest_duration);
}
} else {
// If no secondary backends, primary is the winner if no error
if let Some(ps) = primary_stats {
if ps.error.is_none() {
winning_backend_name = Some(ps.backend_name.clone());
first_response_duration_from_primary_or_fastest_secondary = Some(ps.duration);
}
}
}
// If no winner determined yet (e.g. all errored, or no secondary and primary errored),
// and if primary_stats exists, consider it as the "winner" for error tracking purposes.
if winning_backend_name.is_none() && primary_stats.is_some() {
winning_backend_name = Some(primary_stats.unwrap().backend_name.clone());
}
// Update backend_wins and method_backend_wins
if let Some(ref winner_name) = winning_backend_name {
self.backend_wins.entry(winner_name.clone()).or_insert_with(|| AtomicU64::new(0)).fetch_add(1, Ordering::Relaxed);
self.method_backend_wins.entry(method_name.clone()).or_default().entry(winner_name.clone()).or_insert_with(|| AtomicU64::new(0)).fetch_add(1, Ordering::Relaxed);
}
// Update first_response_durations and actual_first_response_durations
if let Some(duration) = first_response_duration_from_primary_or_fastest_secondary {
self.first_response_durations.lock().unwrap().push(duration);
self.method_first_response_durations.entry(method_name.clone()).or_insert_with(|| Mutex::new(Vec::new())).lock().unwrap().push(duration);
}
if let Some(duration) = actual_first_response_duration {
self.actual_first_response_durations.lock().unwrap().push(duration);
self.method_actual_first_response_durations.entry(method_name.clone()).or_insert_with(|| Mutex::new(Vec::new())).lock().unwrap().push(duration);
}
let mut request_stats_guard = self.request_stats.lock().unwrap();
for stat in stats_vec {
if stat.backend_name == "actual-first-response" { // Already handled
continue;
}
request_stats_guard.push(stat.clone());
if stat.error.is_some() {
if stat.error.as_deref() == Some("skipped by primary due to min_delay_buffer") {
self.skipped_secondary_requests.fetch_add(1, Ordering::Relaxed);
} else {
self.error_count.fetch_add(1, Ordering::Relaxed);
}
}
// Update backend_method_stats for all backends
self.backend_method_stats
.entry(stat.backend_name.clone())
.or_default()
.entry(stat.method.clone())
.or_insert_with(|| Mutex::new(Vec::new()))
.lock()
.unwrap()
.push(stat.duration);
// If the winning backend is primary and it's not a batch (batch handled separately), update method_stats and CUs
// Assuming primary_stats contains the correct method name for CU calculation
if let Some(ref winner_name_val) = winning_backend_name {
if &stat.backend_name == winner_name_val && stat.backend_name.contains("-primary") && stat.error.is_none() {
// Update method_stats (for primary)
self.method_stats
.entry(stat.method.clone())
.or_insert_with(|| Mutex::new(Vec::new()))
.lock()
.unwrap()
.push(stat.duration);
// Update CU
let cu_price = self.method_cu_prices.get(&stat.method).map_or_else(
|| self.method_cu_prices.get("default").map_or(0, |p| *p.value()),
|p| *p.value()
);
if cu_price > 0 {
self.total_cu.fetch_add(cu_price, Ordering::Relaxed);
self.method_cu.entry(stat.method.clone()).or_insert_with(|| AtomicU64::new(0)).fetch_add(cu_price, Ordering::Relaxed);
}
}
}
}
}
pub fn add_batch_stats(&self, methods: &[String], duration: Duration, backend_name: &str) {
if !backend_name.contains("-primary") { // Only primary processes batches directly for now
warn!("add_batch_stats called for non-primary backend: {}", backend_name);
return;
}
let mut batch_cu: u64 = 0;
for method_name in methods {
let cu_price = self.method_cu_prices.get(method_name).map_or_else(
|| self.method_cu_prices.get("default").map_or(0, |p| *p.value()),
|p| *p.value()
);
batch_cu += cu_price;
if cu_price > 0 {
self.method_cu.entry(method_name.clone()).or_insert_with(|| AtomicU64::new(0)).fetch_add(cu_price, Ordering::Relaxed);
}
// Update method_stats for each method in the batch on the primary
self.method_stats
.entry(method_name.clone())
.or_insert_with(|| Mutex::new(Vec::new()))
.lock()
.unwrap()
.push(duration); // Using the same duration for all methods in the batch as an approximation
// Update backend_method_stats
self.backend_method_stats
.entry(backend_name.to_string())
.or_default()
.entry(method_name.clone())
.or_insert_with(|| Mutex::new(Vec::new()))
.lock()
.unwrap()
.push(duration);
}
if batch_cu > 0 {
self.total_cu.fetch_add(batch_cu, Ordering::Relaxed);
}
// Note: total_requests is incremented by add_stats which should be called for the overall batch request
}
pub fn add_websocket_stats(&self, ws_stat: WebSocketStats) {
if ws_stat.error.is_some() {
self.error_count.fetch_add(1, Ordering::Relaxed);
}
self.ws_stats.lock().unwrap().push(ws_stat);
self.total_ws_connections.fetch_add(1, Ordering::Relaxed);
}
// STUBBED METHODS - to be implemented later
pub fn get_primary_p75_for_method(&self, _method: &str) -> std::time::Duration {
// Placeholder: return a default fixed duration
log::debug!("StatsCollector::get_primary_p75_for_method called (stub)");
std::time::Duration::from_millis(25) // Default from Go's calculateBatchDelay fallback
}
pub fn get_primary_p50_for_method(&self, _method: &str) -> std::time::Duration {
// Placeholder: return a default fixed duration
log::debug!("StatsCollector::get_primary_p50_for_method called (stub)");
std::time::Duration::from_millis(15)
}
pub fn is_expensive_method_by_stats(&self, _method: &str) -> bool {
// Placeholder: always return false
log::debug!("StatsCollector::is_expensive_method_by_stats called (stub)");
false
}
pub fn select_best_secondary_for_expensive_method(
&self,
_method: &str,
_backends: &[Backend],
_block_height_tracker: &Option<Arc<BlockHeightTracker>>,
_secondary_probe: &Option<Arc<SecondaryProbe>>,
) -> Option<Backend> {
// Placeholder: always return None
log::debug!("StatsCollector::select_best_secondary_for_expensive_method called (stub)");
None
}
}

View File

@@ -0,0 +1,107 @@
use serde::{Serialize, Deserialize};
use url::Url;
use http::StatusCode;
use std::time::{Duration, SystemTime};
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct JsonRpcRequest {
pub method: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub id: Option<serde_json::Value>,
#[serde(skip_serializing_if = "Option::is_none")]
pub jsonrpc: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub params: Option<serde_json::Value>,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct BatchInfo {
pub is_batch: bool,
pub methods: Vec<String>,
pub request_count: usize,
pub has_stateful: bool,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct Backend {
#[serde(with = "url_serde")]
pub url: Url,
pub name: String,
pub role: String, // Consider an enum BackendRole { Primary, Secondary } later
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct ResponseStats {
pub backend_name: String,
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(with = "http_serde_status_code_option", default)]
pub status_code: Option<StatusCode>,
#[serde(with = "humantime_serde")]
pub duration: Duration,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<String>,
pub method: String,
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
pub struct WebSocketStats {
pub backend_name: String,
pub error: Option<String>, // Default Option<String> serde is fine
pub connect_time: std::time::Duration, // Default Duration serde (secs/nanos struct)
pub is_active: bool,
pub client_to_backend_messages: u64,
pub backend_to_client_messages: u64,
}
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct CuDataPoint {
pub timestamp: SystemTime,
pub cu: u64,
}
// Helper module for serializing/deserializing Option<http::StatusCode>
mod http_serde_status_code_option {
use http::StatusCode;
use serde::{self, Deserializer, Serializer, AsOwned};
pub fn serialize<S>(status_code: &Option<StatusCode>, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
match status_code {
Some(sc) => serializer.serialize_some(&sc.as_u16()),
None => serializer.serialize_none(),
}
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Option<StatusCode>, D::Error>
where
D: Deserializer<'de>,
{
Option::<u16>::deserialize(deserializer)?
.map(|code| StatusCode::from_u16(code).map_err(serde::de::Error::custom))
.transpose()
}
}
// Helper module for serializing/deserializing url::Url
mod url_serde {
use url::Url;
use serde::{self, Deserializer, Serializer};
pub fn serialize<S>(url: &Url, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(url.as_str())
}
pub fn deserialize<'de, D>(deserializer: D) -> Result<Url, D::Error>
where
D: Deserializer<'de>,
{
String::deserialize(deserializer)?
.parse()
.map_err(serde::de::Error::custom)
}
}

View File

@@ -0,0 +1,228 @@
use std::sync::Arc;
use std::time::{Duration, Instant};
use hyper::{Body, Request, Response, StatusCode};
use hyper_tungstenite::HyperWebsocket;
use log;
use tokio_tungstenite::tungstenite::protocol::Message;
use futures_util::{stream::StreamExt, sink::SinkExt};
use crate::config::AppConfig;
use crate::stats_collector::StatsCollector;
use crate::structures::{Backend, WebSocketStats}; // Ensure WebSocketStats has new fields
pub async fn handle_websocket_request(
mut req: Request<Body>,
app_config: Arc<AppConfig>,
stats_collector: Arc<StatsCollector>,
all_backends: Arc<Vec<Backend>>,
) -> Result<Response<Body>, Box<dyn std::error::Error + Send + Sync + 'static>> {
let upgrade_start_time = Instant::now();
// Check for upgrade request
if !hyper_tungstenite::is_upgrade_request(&req) {
log::warn!("Not a WebSocket upgrade request");
let mut resp = Response::new(Body::from("Not a WebSocket upgrade request"));
*resp.status_mut() = StatusCode::BAD_REQUEST;
return Ok(resp);
}
// Attempt to upgrade the connection
let (response, websocket) = match hyper_tungstenite::upgrade(&mut req, None) {
Ok((resp, ws)) => (resp, ws),
Err(e) => {
log::error!("WebSocket upgrade failed: {}", e);
let mut resp = Response::new(Body::from(format!("WebSocket upgrade failed: {}", e)));
*resp.status_mut() = StatusCode::INTERNAL_SERVER_ERROR; // Or BAD_REQUEST
return Ok(resp);
}
};
// Spawn a task to handle the WebSocket connection after sending 101
tokio::spawn(async move {
match websocket.await {
Ok(ws_stream) => {
let client_ws_stream = ws_stream;
if app_config.enable_detailed_logs {
log::info!("Client WebSocket connection established.");
}
// Successfully upgraded client connection, now connect to primary backend
proxy_websocket_to_primary(client_ws_stream, app_config, stats_collector, all_backends).await;
}
Err(e) => {
log::error!("Error awaiting client WebSocket upgrade: {}", e);
// No actual client WS connection to record stats against other than the failed upgrade attempt
let stats = WebSocketStats {
backend_name: "client_upgrade_failed".to_string(),
error: Some(format!("Client WS upgrade await error: {}", e)),
connect_time: upgrade_start_time.elapsed(),
is_active: false,
client_to_backend_messages: 0,
backend_to_client_messages: 0,
};
stats_collector.add_websocket_stats(stats);
}
}
});
// Return the 101 Switching Protocols response to the client
Ok(response)
}
async fn proxy_websocket_to_primary(
mut client_ws_stream: HyperWebsocket, // Made mutable for close()
app_config: Arc<AppConfig>,
stats_collector: Arc<StatsCollector>,
all_backends: Arc<Vec<Backend>>,
) {
let connect_to_primary_start_time = Instant::now();
let mut client_to_backend_msg_count: u64 = 0;
let mut backend_to_client_msg_count: u64 = 0;
let mut ws_stats_error: Option<String> = None;
let mut backend_name_for_stats = "primary_unknown".to_string();
// 1. Find Primary Backend
let primary_backend = match all_backends.iter().find(|b| b.role == "primary") {
Some(pb) => {
backend_name_for_stats = pb.name.clone();
pb
}
None => {
log::error!("No primary backend configured for WebSocket proxy.");
ws_stats_error = Some("No primary backend configured".to_string());
// Close client connection gracefully if possible
let _ = client_ws_stream.close(None).await; // HyperWebsocket uses close method
// Record stats and return
let stats = WebSocketStats {
backend_name: backend_name_for_stats,
error: ws_stats_error,
connect_time: connect_to_primary_start_time.elapsed(),
is_active: false,
client_to_backend_messages,
backend_to_client_messages,
};
stats_collector.add_websocket_stats(stats);
return;
}
};
backend_name_for_stats = primary_backend.name.clone(); // Ensure it's set if primary_backend was found
// 2. Connect to Primary Backend's WebSocket
let mut ws_url = primary_backend.url.clone();
let scheme = if ws_url.scheme() == "https" { "wss" } else { "ws" };
if ws_url.set_scheme(scheme).is_err() {
log::error!("Failed to set WebSocket scheme for backend URL: {}", primary_backend.url);
ws_stats_error = Some(format!("Invalid backend URL scheme for {}", primary_backend.url));
let _ = client_ws_stream.close(None).await;
let stats = WebSocketStats {
backend_name: backend_name_for_stats,
error: ws_stats_error,
connect_time: connect_to_primary_start_time.elapsed(),
is_active: false,
client_to_backend_messages,
backend_to_client_messages,
};
stats_collector.add_websocket_stats(stats);
return;
}
let backend_connect_attempt_time = Instant::now();
let backend_ws_result = tokio_tungstenite::connect_async(ws_url.clone()).await;
let connect_duration = backend_connect_attempt_time.elapsed(); // This is backend connection time
let backend_ws_stream_conn = match backend_ws_result {
Ok((stream, _response)) => {
if app_config.enable_detailed_logs {
log::info!("Successfully connected to primary backend WebSocket: {}", primary_backend.name);
}
stream
}
Err(e) => {
log::error!("Failed to connect to primary backend {} WebSocket: {}", primary_backend.name, e);
ws_stats_error = Some(format!("Primary backend connect error: {}", e));
let _ = client_ws_stream.close(None).await; // Close client connection
let stats = WebSocketStats {
backend_name: backend_name_for_stats,
error: ws_stats_error,
connect_time: connect_duration,
is_active: false,
client_to_backend_messages,
backend_to_client_messages,
};
stats_collector.add_websocket_stats(stats);
return;
}
};
// 3. Proxying Logic
let (mut client_ws_tx, mut client_ws_rx) = client_ws_stream.split();
let (mut backend_ws_tx, mut backend_ws_rx) = backend_ws_stream_conn.split();
let client_to_backend_task = async {
while let Some(msg_result) = client_ws_rx.next().await {
match msg_result {
Ok(msg) => {
if app_config.enable_detailed_logs { log::trace!("C->B: {:?}", msg); }
if backend_ws_tx.send(msg).await.is_err() {
if app_config.enable_detailed_logs { log::debug!("Error sending to backend, C->B loop breaking."); }
break;
}
client_to_backend_msg_count += 1;
}
Err(e) => {
log::warn!("Error reading from client WebSocket: {}", e);
// Use a closure to capture `e` by reference for the format macro.
ws_stats_error.get_or_insert_with(|| { let e_ref = &e; format!("Client read error: {}", e_ref) });
break;
}
}
}
// Try to close the backend sink gracefully if client read loop ends
if app_config.enable_detailed_logs { log::debug!("C->B proxy loop finished. Closing backend_ws_tx.");}
let _ = backend_ws_tx.close().await;
};
let backend_to_client_task = async {
while let Some(msg_result) = backend_ws_rx.next().await {
match msg_result {
Ok(msg) => {
if app_config.enable_detailed_logs { log::trace!("B->C: {:?}", msg); }
if client_ws_tx.send(msg).await.is_err() {
if app_config.enable_detailed_logs { log::debug!("Error sending to client, B->C loop breaking."); }
break;
}
backend_to_client_msg_count += 1;
}
Err(e) => {
log::warn!("Error reading from backend WebSocket: {}", e);
// Use a closure to capture `e` by reference for the format macro.
ws_stats_error.get_or_insert_with(|| { let e_ref = &e; format!("Backend read error: {}", e_ref) });
break;
}
}
}
// Try to close the client sink gracefully if backend read loop ends
if app_config.enable_detailed_logs { log::debug!("B->C proxy loop finished. Closing client_ws_tx.");}
let _ = client_ws_tx.close().await;
};
// Run both proxy tasks concurrently
tokio::join!(client_to_backend_task, backend_to_client_task);
if app_config.enable_detailed_logs {
log::info!("WebSocket proxying ended for {}. Client->Backend: {}, Backend->Client: {}. Error: {:?}",
backend_name_for_stats, client_to_backend_msg_count, backend_to_client_msg_count, ws_stats_error);
}
let final_session_duration = connect_to_primary_start_time.elapsed();
let final_stats = WebSocketStats {
backend_name: backend_name_for_stats,
error: ws_stats_error,
connect_time: final_session_duration,
is_active: false, // Session is now over
client_to_backend_messages,
backend_to_client_messages,
};
stats_collector.add_websocket_stats(final_stats);
}

1
berachain-bartio.yml Symbolic link
View File

@@ -0,0 +1 @@
berachain/reth/berachain-bartio-reth-archive-trace.yml

View File

@@ -0,0 +1 @@
berachain/reth/berachain-bepolia-reth-archive-trace.yml

View File

@@ -0,0 +1 @@
berachain/reth/berachain-mainnet-reth-archive-trace.yml

Some files were not shown because too many files have changed in this diff Show More