diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..1bebb339 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,4 @@ +### v0.9.4-rc1 +- Unifies the `pbs`, `signer`, and `cli` binaries into one: `commit-boost`. This change changes the CLI, notably the `init` command is now invoked as `commit-boost init --config `. +- Includes new quality of life testing improvements in the Justfile: unit test coverage tooling, local Kurtosis testnet, and microbenchmark diffing. +- Robustifies the release process to ensure no compromised maintainer can unilaterally cut a release. Additionally all binaries are now signed during CI and can easily be verified before use. diff --git a/Cargo.lock b/Cargo.lock index 2de971f8..aab44380 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1880,7 +1880,6 @@ name = "cb-pbs" version = "0.9.3" dependencies = [ "alloy", - "async-trait", "axum 0.8.8", "axum-extra", "cb-common", @@ -6652,23 +6651,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" -[[package]] -name = "status_api" -version = "0.9.3" -dependencies = [ - "async-trait", - "axum 0.8.8", - "color-eyre", - "commit-boost", - "eyre", - "lazy_static", - "prometheus", - "reqwest 0.12.28", - "serde", - "tokio", - "tracing", -] - [[package]] name = "strsim" version = "0.11.1" diff --git a/Cargo.toml b/Cargo.toml index 83a27e93..4419c9bc 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = ["benches/*", "bin", "crates/*", "examples/da_commit", "examples/status_api", "tests"] +members = ["benches/*", "bin", "crates/*", "examples/da_commit", "tests"] resolver = "2" [workspace.package] diff --git a/api/signer-api.yml b/api/signer-api.yml index 95897ecd..be44f8fd 100644 --- a/api/signer-api.yml +++ b/api/signer-api.yml @@ -86,7 +86,7 @@ paths: application/json: schema: type: object - required: [pubkey, object_root] + required: [pubkey, object_root, nonce] properties: pubkey: description: The 48-byte BLS public key, with optional `0x` prefix, of the proposer key that you want to request a signature from. @@ -234,7 +234,7 @@ paths: application/json: schema: type: object - required: [proxy, object_root] + required: [proxy, object_root, nonce] properties: proxy: description: The 48-byte BLS public key (for `proxy_bls` mode) or the 20-byte Ethereum address (for `proxy_ecdsa` mode), with optional `0x` prefix, of the proxy key that you want to request a signature from. @@ -382,7 +382,7 @@ paths: application/json: schema: type: object - required: [proxy, object_root] + required: [proxy, object_root, nonce] properties: proxy: description: The 20-byte Ethereum address, with optional `0x` prefix, of the proxy key that you want to request a signature from. @@ -695,7 +695,12 @@ components: $ref: "#/components/schemas/EcdsaSignature" Nonce: type: integer - description: If your module tracks nonces per signature (e.g., to prevent replay attacks), this is the unique nonce to use for the signature. It should be an unsigned 64-bit integer in big-endian format. It must be between 0 and 2^64-2, inclusive. If your module doesn't use nonces, we suggest setting this to 2^64-1 instead of 0 because 0 is a legal nonce and will cause complications with your module if you ever want to use a nonce in the future. + description: | + Replay-protection nonce, always mixed into the signing root via `PropCommitSigningInfo`. It + must be an unsigned 64-bit integer between 0 and 2^64-2 (18446744073709551614), inclusive. + + Modules that track nonces for replay protection should use a monotonically increasing value + per key. Modules that do not use replay protection should always send `0`. minimum: 0 - maximum: 18446744073709551614 // 2^64-2 + maximum: 18446744073709551614 example: 1 diff --git a/benches/microbench/src/get_header.rs b/benches/microbench/src/get_header.rs index 44eff329..4059242e 100644 --- a/benches/microbench/src/get_header.rs +++ b/benches/microbench/src/get_header.rs @@ -36,20 +36,18 @@ //! - `HeaderMap` allocation (created once in setup, cloned cheaply per //! iteration) -use std::{path::PathBuf, sync::Arc, time::Duration}; +use std::{collections::HashSet, path::PathBuf, sync::Arc}; use alloy::primitives::B256; use axum::http::HeaderMap; -use cb_common::{pbs::GetHeaderParams, signer::random_secret, types::Chain}; +use cb_common::{pbs::GetHeaderParams, signer::random_secret, types::Chain, utils::EncodingType}; use cb_pbs::{PbsState, get_header}; use cb_tests::{ - mock_relay::{MockRelayState, start_mock_relay_service}, - utils::{generate_mock_relay, get_pbs_static_config, to_pbs_config}, + mock_relay::{MockRelayState, start_mock_relay_service_with_listener}, + utils::{generate_mock_relay, get_free_listener, get_pbs_config, to_pbs_config}, }; use criterion::{Criterion, black_box, criterion_group, criterion_main}; -// Ports 19201–19205 are reserved for the microbenchmark mock relays. -const BASE_PORT: u16 = 19200; const CHAIN: Chain = Chain::Hoodi; const MAX_RELAYS: usize = 5; const RELAY_COUNTS: [usize; 3] = [1, 3, MAX_RELAYS]; @@ -77,34 +75,34 @@ fn bench_get_header(c: &mut Criterion) { // Start all mock relays once and build one PbsState per relay-count variant. // All relays share the same MockRelayState (and therefore the same signing - // key). + // key). Each relay gets its own OS-assigned port via get_free_listener() so + // there is no TOCTOU race and no hardcoded port reservations. let (states, params) = rt.block_on(async { let signer = random_secret(); let pubkey = signer.public_key(); let mock_state = Arc::new(MockRelayState::new(CHAIN, signer)); - let relay_clients: Vec<_> = (0..MAX_RELAYS) - .map(|i| { - let port = BASE_PORT + 1 + i as u16; - tokio::spawn(start_mock_relay_service(mock_state.clone(), port)); - generate_mock_relay(port, pubkey.clone()).expect("relay client") - }) - .collect(); + let mut relay_clients = Vec::with_capacity(MAX_RELAYS); + for _ in 0..MAX_RELAYS { + let listener = get_free_listener().await; + let port = listener.local_addr().unwrap().port(); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), listener)); + relay_clients.push(generate_mock_relay(port, pubkey.clone()).expect("relay client")); + } - // Give all servers time to bind before benchmarking starts. - tokio::time::sleep(Duration::from_millis(200)).await; + // Give all servers time to start accepting before benchmarking begins. + tokio::time::sleep(std::time::Duration::from_millis(200)).await; let params = GetHeaderParams { slot: 0, parent_hash: B256::ZERO, pubkey }; // Port 0 here is the port the PBS service itself would bind to for incoming // validator requests. We call get_header() as a function directly, so no // PBS server is started and this port is never used. The actual relay - // endpoints are carried inside the RelayClient objects (ports 19201–19205). + // endpoints are carried inside the RelayClient objects. let states: Vec = RELAY_COUNTS .iter() .map(|&n| { - let config = - to_pbs_config(CHAIN, get_pbs_static_config(0), relay_clients[..n].to_vec()); + let config = to_pbs_config(CHAIN, get_pbs_config(0), relay_clients[..n].to_vec()); PbsState::new(config, PathBuf::new()) }) .collect(); @@ -138,6 +136,7 @@ fn bench_get_header(c: &mut Criterion) { black_box(params.clone()), black_box(headers.clone()), black_box(state.clone()), + black_box(HashSet::from([EncodingType::Json, EncodingType::Ssz])), )) .expect("get_header failed") }) diff --git a/bin/commit-boost.rs b/bin/commit-boost.rs index e424d144..3994ef90 100644 --- a/bin/commit-boost.rs +++ b/bin/commit-boost.rs @@ -7,7 +7,7 @@ use cb_common::{ }, utils::{initialize_tracing_log, print_logo, wait_for_signal}, }; -use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_pbs::{PbsService, PbsState}; use cb_signer::service::SigningService; use clap::{Parser, Subcommand}; use eyre::Result; @@ -68,7 +68,7 @@ async fn run_pbs_service() -> Result<()> { PbsService::init_metrics(pbs_config.chain)?; let state = PbsState::new(pbs_config, config_path); - let server = PbsService::run::<_, DefaultBuilderApi>(state); + let server = PbsService::run(state); tokio::select! { maybe_err = server => { diff --git a/bin/src/lib.rs b/bin/src/lib.rs index 0897aa34..365815a6 100644 --- a/bin/src/lib.rs +++ b/bin/src/lib.rs @@ -17,10 +17,7 @@ pub mod prelude { utils::{initialize_tracing_log, utcnow_ms, utcnow_ns, utcnow_sec, utcnow_us}, }; pub use cb_metrics::provider::MetricsProvider; - pub use cb_pbs::{ - BuilderApi, BuilderApiState, DefaultBuilderApi, PbsService, PbsState, PbsStateGuard, - get_header, get_status, register_validator, submit_block, - }; + pub use cb_pbs::{PbsService, PbsState, PbsStateGuard}; // The TreeHash derive macro requires tree_hash as import pub mod tree_hash { pub use tree_hash::*; diff --git a/config.example.toml b/config.example.toml index f7745df4..0e23a6df 100644 --- a/config.example.toml +++ b/config.example.toml @@ -12,9 +12,6 @@ chain = "Holesky" # Docker image to use for the PBS module. # OPTIONAL, DEFAULT: ghcr.io/commit-boost/pbs:latest docker_image = "ghcr.io/commit-boost/pbs:latest" -# Whether to enable the PBS module to request signatures from the Signer module (not used in the default PBS image) -# OPTIONAL, DEFAULT: false -with_signer = false # Host to receive BuilderAPI calls from beacon node # OPTIONAL, DEFAULT: 127.0.0.1 host = "127.0.0.1" @@ -174,7 +171,7 @@ timeout_get_header_ms = 900 id = "mux-relay-1" url = "http://0xa119589bb33ef52acbb8116832bec2b58fca590fe5c85eac5d3230b44d5bc09fe73ccd21f88eab31d6de16194d17782e@def.xyz" -# Configuration for the Signer Module, only required if any `commit` module is present, or if `pbs.with_signer = true` +# Configuration for the Signer Module, only required if any `commit` module is present # Currently three types of Signer modules are supported (only one can be used at a time): # - Remote: a remote Web3Signer instance # - Dirk: a remote Dirk instance diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index 7976ce17..ac451341 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -497,6 +497,8 @@ fn create_signer_service_dirk( let mut envs = IndexMap::from([ get_env_val(CONFIG_ENV, CONFIG_DEFAULT), get_env_same(JWTS_ENV), + get_env_same(ADMIN_JWT_ENV), + get_env_val(SIGNER_TLS_CERTIFICATES_PATH_ENV, SIGNER_TLS_CERTIFICATES_PATH_DEFAULT), get_env_val(DIRK_CERT_ENV, DIRK_CERT_DEFAULT), get_env_val(DIRK_KEY_ENV, DIRK_KEY_DEFAULT), get_env_val(DIRK_DIR_SECRETS_ENV, DIRK_DIR_SECRETS_DEFAULT), @@ -548,6 +550,7 @@ fn create_signer_service_dirk( // write jwts to env service_config.envs.insert(JWTS_ENV.into(), format_comma_separated(&service_config.jwts)); + service_config.envs.insert(ADMIN_JWT_ENV.into(), random_jwt_secret()); // CA cert volume and env if let Some(ca_cert_path) = ca_cert_path { @@ -589,8 +592,8 @@ fn create_signer_service_dirk( environment: Environment::KvPair(envs), healthcheck: Some(Healthcheck { test: Some(HealthcheckTest::Single(format!( - "curl -f http://localhost:{}/status", - signer_config.port, + "curl -k -f {}/status", + cb_config.signer_server_url(SIGNER_PORT_DEFAULT), ))), interval: Some("30s".into()), timeout: Some("5s".into()), @@ -932,6 +935,13 @@ mod tests { service.volumes.iter().any(|v| matches!(v, Volumes::Simple(s) if s.contains(substr))) } + fn get_healthcheck_cmd(service: &Service) -> Option { + service.healthcheck.as_ref().and_then(|hc| match &hc.test { + Some(HealthcheckTest::Single(cmd)) => Some(cmd.clone()), + _ => None, + }) + } + fn has_port(service: &Service, substr: &str) -> bool { match &service.ports { Ports::Short(ports) => ports.iter().any(|p| p.contains(substr)), @@ -1309,12 +1319,33 @@ mod tests { assert!(env_str(&service, DIRK_CERT_ENV).is_some()); assert!(env_str(&service, DIRK_KEY_ENV).is_some()); assert!(env_str(&service, DIRK_DIR_SECRETS_ENV).is_some()); + assert!(has_env_key(&service, ADMIN_JWT_ENV)); + assert!(has_env_key(&service, SIGNER_TLS_CERTIFICATES_PATH_ENV)); assert!(has_volume(&service, "client.crt")); assert!(has_volume(&service, "client.key")); assert!(has_volume(&service, "dirk_secrets")); Ok(()) } + #[test] + fn test_create_signer_service_dirk_generates_admin_jwt() -> eyre::Result<()> { + let mut sc = minimal_service_config(); + let signer_config = dirk_signer_config(); + create_signer_service_dirk( + &mut sc, + &signer_config, + Path::new("/certs/client.crt"), + Path::new("/certs/client.key"), + Path::new("/dirk_secrets"), + &None, + &None, + )?; + + let admin_jwt = sc.envs.get(ADMIN_JWT_ENV).expect("ADMIN_JWT_ENV must be set"); + assert!(!admin_jwt.is_empty(), "admin JWT secret must not be empty"); + Ok(()) + } + #[test] fn test_create_signer_service_dirk_with_ca_cert() -> eyre::Result<()> { let mut sc = minimal_service_config(); @@ -1489,11 +1520,10 @@ mod tests { config } - /// Returns a `ServiceCreationInfo` whose CB config has `pbs.with_signer = - /// true` and a local signer with `TlsMode::Certificate(certs_path)`. + /// Returns a `ServiceCreationInfo` whose CB config has a local signer with + /// `TlsMode::Certificate(certs_path)`. fn service_config_with_tls(certs_path: PathBuf) -> ServiceCreationInfo { let mut sc = minimal_service_config(); - sc.config_info.cb_config.pbs.with_signer = true; sc.config_info.cb_config.signer = Some(local_signer_config_with_tls(certs_path)); sc } @@ -1589,12 +1619,15 @@ mod tests { // ------------------------------------------------------------------------- #[test] - fn test_create_pbs_service_with_tls_adds_cert_env_and_volume() -> eyre::Result<()> { + fn test_create_pbs_service_with_tls_but_no_commit_module_no_cert() -> eyre::Result<()> { + // PBS no longer connects to the signer directly; only commit modules do. + // Even when the signer is configured with TLS, the cert env/volume must + // NOT be injected into the PBS container unless a Commit module is present. let mut sc = service_config_with_tls(PathBuf::from("/my/certs")); let service = create_pbs_service(&mut sc)?; - assert!(has_env_key(&service, SIGNER_TLS_CERTIFICATES_PATH_ENV)); - assert!(has_volume(&service, SIGNER_TLS_CERTIFICATE_NAME)); + assert!(!has_env_key(&service, SIGNER_TLS_CERTIFICATES_PATH_ENV)); + assert!(!has_volume(&service, SIGNER_TLS_CERTIFICATE_NAME)); Ok(()) } @@ -1690,6 +1723,50 @@ mod tests { Ok(()) } + #[test] + fn test_create_signer_service_dirk_healthcheck_uses_https_with_tls() -> eyre::Result<()> { + let dir = tempfile::tempdir()?; + let certs_path = dir.path().to_path_buf(); + std::fs::write(certs_path.join(SIGNER_TLS_CERTIFICATE_NAME), b"cert")?; + std::fs::write(certs_path.join(SIGNER_TLS_KEY_NAME), b"key")?; + + let mut sc = service_config_with_tls(certs_path); + let signer_config = dirk_signer_config(); + let service = create_signer_service_dirk( + &mut sc, + &signer_config, + Path::new("/certs/client.crt"), + Path::new("/certs/client.key"), + Path::new("/dirk_secrets"), + &None, + &None, + )?; + + let cmd = get_healthcheck_cmd(&service).expect("healthcheck must be set"); + assert!(cmd.contains("https://"), "healthcheck must use https with TLS: {cmd}"); + assert!(cmd.contains("-k"), "healthcheck must use -k flag for self-signed certs: {cmd}"); + Ok(()) + } + + #[test] + fn test_create_signer_service_dirk_healthcheck_uses_http_without_tls() -> eyre::Result<()> { + let mut sc = minimal_service_config(); + let signer_config = dirk_signer_config(); + let service = create_signer_service_dirk( + &mut sc, + &signer_config, + Path::new("/certs/client.crt"), + Path::new("/certs/client.key"), + Path::new("/dirk_secrets"), + &None, + &None, + )?; + + let cmd = get_healthcheck_cmd(&service).expect("healthcheck must be set"); + assert!(cmd.contains("http://"), "healthcheck must use http without TLS: {cmd}"); + Ok(()) + } + // ------------------------------------------------------------------------- // create_module_service – TLS cert env/volume // ------------------------------------------------------------------------- diff --git a/crates/common/src/commit/request.rs b/crates/common/src/commit/request.rs index a64e9a67..cd780446 100644 --- a/crates/common/src/commit/request.rs +++ b/crates/common/src/commit/request.rs @@ -84,6 +84,10 @@ impl fmt::Display for SignedProxyDelegation { pub struct SignConsensusRequest { pub pubkey: BlsPublicKey, pub object_root: B256, + /// Replay-protection nonce mixed into the signing root via + /// `PropCommitSigningInfo`. Modules that do not track nonces should + /// send `0`. Modules that do track nonces should use a monotonically + /// increasing value per key to prevent signature reuse. pub nonce: u64, } @@ -93,7 +97,7 @@ impl SignConsensusRequest { } pub fn builder(pubkey: BlsPublicKey) -> Self { - Self::new(pubkey, B256::ZERO, u64::MAX - 1) + Self::new(pubkey, B256::ZERO, 0) } pub fn with_root>(self, object_root: R) -> Self { @@ -125,6 +129,10 @@ impl Display for SignConsensusRequest { pub struct SignProxyRequest { pub proxy: T, pub object_root: B256, + /// Replay-protection nonce mixed into the signing root via + /// `PropCommitSigningInfo`. Modules that do not track nonces should + /// send `0`. Modules that do track nonces should use a monotonically + /// increasing value per key to prevent signature reuse. pub nonce: u64, } @@ -134,7 +142,7 @@ impl SignProxyRequest { } pub fn builder(proxy: T) -> Self { - Self::new(proxy, B256::ZERO, u64::MAX - 1) + Self::new(proxy, B256::ZERO, 0) } pub fn with_root>(self, object_root: R) -> Self { diff --git a/crates/common/src/config/mod.rs b/crates/common/src/config/mod.rs index e0958342..1955ad42 100644 --- a/crates/common/src/config/mod.rs +++ b/crates/common/src/config/mod.rs @@ -131,10 +131,9 @@ impl CommitBoostConfig { /// Helper to return if the signer module is needed based on the config pub fn needs_signer_module(&self) -> bool { - self.pbs.with_signer || - self.modules.as_ref().is_some_and(|modules| { - modules.iter().any(|module| matches!(module.kind, ModuleKind::Commit)) - }) + self.modules.as_ref().is_some_and(|modules| { + modules.iter().any(|module| matches!(module.kind, ModuleKind::Commit)) + }) } pub fn signer_uses_tls(&self) -> bool { diff --git a/crates/common/src/config/module.rs b/crates/common/src/config/module.rs index 22884551..aec45289 100644 --- a/crates/common/src/config/module.rs +++ b/crates/common/src/config/module.rs @@ -83,7 +83,7 @@ pub fn load_commit_module_config() -> Result { chain: Chain, modules: Vec>, - signer: SignerConfig, + signer: Option, } // load module config including the extra data (if any) @@ -106,7 +106,7 @@ pub fn load_commit_module_config() -> Result None, TlsMode::Certificate(path) => Some( load_env_var(SIGNER_TLS_CERTIFICATES_PATH_ENV) diff --git a/crates/common/src/config/pbs.rs b/crates/common/src/config/pbs.rs index 3fb49ee6..c9525b23 100644 --- a/crates/common/src/config/pbs.rs +++ b/crates/common/src/config/pbs.rs @@ -21,17 +21,12 @@ use super::{ load_optional_env_var, }; use crate::{ - commit::client::SignerClient, - config::{ - CONFIG_ENV, MODULE_JWT_ENV, MuxKeysLoader, PBS_IMAGE_DEFAULT, PBS_SERVICE_NAME, PbsMuxes, - SIGNER_TLS_CERTIFICATE_NAME, SIGNER_TLS_CERTIFICATES_PATH_ENV, SIGNER_URL_ENV, - SignerConfig, TlsMode, load_env_var, load_file_from_env, - }, + config::{CONFIG_ENV, MuxKeysLoader, PBS_IMAGE_DEFAULT, PbsMuxes, load_file_from_env}, pbs::{ DEFAULT_PBS_PORT, DEFAULT_REGISTRY_REFRESH_SECONDS, DefaultTimeout, LATE_IN_SLOT_TIME_MS, REGISTER_VALIDATOR_RETRY_LIMIT, RelayClient, RelayEntry, }, - types::{BlsPublicKey, Chain, Jwt, ModuleId}, + types::{BlsPublicKey, Chain}, utils::{ WEI_PER_ETH, as_eth_str, default_bool, default_host, default_u16, default_u32, default_u64, default_u256, @@ -244,9 +239,6 @@ pub struct StaticPbsConfig { /// Config of pbs module #[serde(flatten)] pub pbs_config: PbsConfig, - /// Whether to enable the signer client - #[serde(default = "default_bool::")] - pub with_signer: bool, } impl StaticPbsConfig { @@ -279,8 +271,6 @@ pub struct PbsModuleConfig { /// URL) DO NOT use this for get_header calls, use `relays` or `mux_lookup` /// instead pub all_relays: Vec, - /// Signer client to call Signer API - pub signer_client: Option, /// List of raw mux details configured, if any pub registry_muxes: Option>, /// Lookup of pubkey to mux config @@ -355,7 +345,6 @@ pub async fn load_pbs_config(config_path: Option) -> Result<(PbsModuleC pbs_config: Arc::new(config.pbs.pbs_config), relays: relay_clients, all_relays, - signer_client: None, registry_muxes, mux_lookup, }, @@ -378,7 +367,6 @@ pub async fn load_pbs_custom_config() -> Result<(PbsModuleC chain: Chain, relays: Vec, pbs: CustomPbsConfig, - signer: SignerConfig, muxes: Option, } @@ -431,29 +419,6 @@ pub async fn load_pbs_custom_config() -> Result<(PbsModuleC let all_relays = all_relays.into_values().collect(); - let signer_client = if cb_config.pbs.static_config.with_signer { - // if custom pbs requires a signer client, load jwt - let module_jwt = Jwt(load_env_var(MODULE_JWT_ENV)?); - let signer_server_url = load_env_var(SIGNER_URL_ENV)?.parse()?; - let certs_path = match cb_config.signer.tls_mode { - TlsMode::Insecure => None, - TlsMode::Certificate(path) => Some( - load_env_var(SIGNER_TLS_CERTIFICATES_PATH_ENV) - .map(PathBuf::from) - .unwrap_or(path) - .join(SIGNER_TLS_CERTIFICATE_NAME), - ), - }; - Some(SignerClient::new( - signer_server_url, - certs_path, - module_jwt, - ModuleId(PBS_SERVICE_NAME.to_string()), - )?) - } else { - None - }; - Ok(( PbsModuleConfig { chain: cb_config.chain, @@ -461,7 +426,6 @@ pub async fn load_pbs_custom_config() -> Result<(PbsModuleC pbs_config: Arc::new(cb_config.pbs.static_config.pbs_config), relays: relay_clients, all_relays, - signer_client, registry_muxes, mux_lookup, }, diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 343ec213..13575c8c 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -92,10 +92,12 @@ impl Display for ReverseProxyHeaderSetup { write!(f, "\"{header} (unique)\"") } ReverseProxyHeaderSetup::Rightmost { header, trusted_count } => { - let suffix = match trusted_count.get() % 10 { - 1 => "st", - 2 => "nd", - 3 => "rd", + let n = trusted_count.get(); + let suffix = match (n % 100, n % 10) { + (11..=13, _) => "th", + (_, 1) => "st", + (_, 2) => "nd", + (_, 3) => "rd", _ => "th", }; write!(f, "\"{header} ({trusted_count}{suffix} from the right)\"") @@ -483,7 +485,6 @@ mod tests { ssv_node_api_url: Url::parse("https://example.net").unwrap(), ssv_public_api_url: Url::parse("https://example.net").unwrap(), }, - with_signer: true, }, muxes: None, modules: Some(vec![]), diff --git a/crates/common/src/signature.rs b/crates/common/src/signature.rs index 18c10d4a..41631e33 100644 --- a/crates/common/src/signature.rs +++ b/crates/common/src/signature.rs @@ -165,10 +165,18 @@ pub fn verify_proposer_commitment_signature_ecdsa( #[cfg(test)] mod tests { - use alloy::primitives::aliases::B32; - - use super::compute_domain; - use crate::{constants::APPLICATION_BUILDER_DOMAIN, types::Chain}; + use alloy::primitives::{U256, aliases::B32}; + + use super::{compute_domain, sign_builder_message, verify_signed_message}; + use crate::{ + constants::APPLICATION_BUILDER_DOMAIN, + pbs::{ + BlindedBeaconBlockElectra, BuilderBid, BuilderBidElectra, + ExecutionPayloadHeaderElectra, ExecutionRequests, + }, + types::{BlsSecretKey, Chain}, + utils::TestRandomSeed, + }; #[test] fn test_builder_domains() { @@ -178,4 +186,48 @@ mod tests { assert_eq!(compute_domain(Chain::Sepolia, domain), Chain::Sepolia.builder_domain()); assert_eq!(compute_domain(Chain::Hoodi, domain), Chain::Hoodi.builder_domain()); } + + #[test] + fn test_builder_bid_sign_and_verify() { + let secret_key = BlsSecretKey::test_random(); + let pubkey = secret_key.public_key(); + + let message = BuilderBid::Electra(BuilderBidElectra { + header: ExecutionPayloadHeaderElectra::test_random(), + blob_kzg_commitments: Default::default(), + execution_requests: ExecutionRequests::default(), + value: U256::from(10), + pubkey: pubkey.clone().into(), + }); + + let sig = sign_builder_message(Chain::Mainnet, &secret_key, &message); + + assert!(verify_signed_message( + Chain::Mainnet, + &pubkey, + &message, + &sig, + None, + &B32::from(APPLICATION_BUILDER_DOMAIN), + )); + } + + #[test] + fn test_blinded_block_sign_and_verify() { + let secret_key = BlsSecretKey::test_random(); + let pubkey = secret_key.public_key(); + + let block = BlindedBeaconBlockElectra::test_random(); + + let sig = sign_builder_message(Chain::Mainnet, &secret_key, &block); + + assert!(verify_signed_message( + Chain::Mainnet, + &pubkey, + &block, + &sig, + None, + &B32::from(APPLICATION_BUILDER_DOMAIN), + )); + } } diff --git a/crates/pbs/Cargo.toml b/crates/pbs/Cargo.toml index 9d9df214..9cfe9bc9 100644 --- a/crates/pbs/Cargo.toml +++ b/crates/pbs/Cargo.toml @@ -7,7 +7,6 @@ version.workspace = true [dependencies] alloy.workspace = true -async-trait.workspace = true axum.workspace = true axum-extra.workspace = true cb-common.workspace = true diff --git a/crates/pbs/src/api.rs b/crates/pbs/src/api.rs deleted file mode 100644 index 74d92fb2..00000000 --- a/crates/pbs/src/api.rs +++ /dev/null @@ -1,71 +0,0 @@ -use std::{collections::HashSet, sync::Arc}; - -use async_trait::async_trait; -use axum::{Router, http::HeaderMap}; -use cb_common::{ - pbs::{BuilderApiVersion, GetHeaderParams, SignedBlindedBeaconBlock}, - utils::EncodingType, -}; - -use crate::{ - CompoundGetHeaderResponse, CompoundSubmitBlockResponse, mev_boost, - state::{BuilderApiState, PbsState, PbsStateGuard}, -}; - -#[async_trait] -pub trait BuilderApi: 'static { - /// Use to extend the BuilderApi - fn extra_routes() -> Option>> { - None - } - - /// https://ethereum.github.io/builder-specs/#/Builder/getHeader - async fn get_header( - params: GetHeaderParams, - req_headers: HeaderMap, - state: PbsState, - accepted_types: HashSet, - ) -> eyre::Result> { - mev_boost::get_header(params, req_headers, state, accepted_types).await - } - - /// https://ethereum.github.io/builder-specs/#/Builder/status - async fn get_status(req_headers: HeaderMap, state: PbsState) -> eyre::Result<()> { - mev_boost::get_status(req_headers, state).await - } - - /// https://ethereum.github.io/builder-specs/#/Builder/submitBlindedBlock and - /// https://ethereum.github.io/builder-specs/#/Builder/submitBlindedBlockV2 - async fn submit_block( - signed_blinded_block: Arc, - req_headers: HeaderMap, - state: PbsState, - api_version: BuilderApiVersion, - accepted_types: HashSet, - ) -> eyre::Result { - mev_boost::submit_block( - signed_blinded_block, - req_headers, - state, - api_version, - accepted_types, - ) - .await - } - - /// https://ethereum.github.io/builder-specs/#/Builder/registerValidator - async fn register_validator( - registrations: Vec, - req_headers: HeaderMap, - state: PbsState, - ) -> eyre::Result<()> { - mev_boost::register_validator(registrations, req_headers, state).await - } - - async fn reload(state: PbsState) -> eyre::Result> { - mev_boost::reload(state).await - } -} - -pub struct DefaultBuilderApi; -impl BuilderApi<()> for DefaultBuilderApi {} diff --git a/crates/pbs/src/lib.rs b/crates/pbs/src/lib.rs index 8b4afdcf..92d026c3 100644 --- a/crates/pbs/src/lib.rs +++ b/crates/pbs/src/lib.rs @@ -1,15 +1,15 @@ -mod api; mod constants; mod error; mod metrics; -mod mev_boost; mod routes; mod service; mod state; mod utils; -pub use api::*; pub use constants::*; -pub use mev_boost::*; +pub use routes::{ + CompoundGetHeaderResponse, CompoundSubmitBlockResponse, LightGetHeaderResponse, + LightSubmitBlockResponse, get_header, +}; pub use service::PbsService; -pub use state::{BuilderApiState, PbsState, PbsStateGuard}; +pub use state::{PbsState, PbsStateGuard}; diff --git a/crates/pbs/src/mev_boost/mod.rs b/crates/pbs/src/mev_boost/mod.rs deleted file mode 100644 index 81dc4bf6..00000000 --- a/crates/pbs/src/mev_boost/mod.rs +++ /dev/null @@ -1,76 +0,0 @@ -mod get_header; -mod register_validator; -mod reload; -mod status; -mod submit_block; - -use alloy::primitives::U256; -use cb_common::{ - pbs::{GetHeaderResponse, SubmitBlindedBlockResponse}, - utils::EncodingType, -}; -pub use get_header::get_header; -use lh_types::ForkName; -pub use register_validator::register_validator; -pub use reload::reload; -pub use status::get_status; -pub use submit_block::submit_block; - -/// Enum that handles different GetHeader response types based on the level of -/// validation required -pub enum CompoundGetHeaderResponse { - /// Standard response type, fully parsing the response from a relay into a - /// complete response struct - Full(Box), - - /// Light response type, only extracting the fork and value from the builder - /// bid with the entire (undecoded) payload for forwarding - Light(LightGetHeaderResponse), -} - -/// Core details of a GetHeaderResponse, used for light processing when -/// validation mode is set to none. -#[derive(Clone)] -pub struct LightGetHeaderResponse { - /// The fork name for the bid - pub version: ForkName, - - /// The bid value in wei - pub value: U256, - - /// The raw bytes of the response, for forwarding to the caller - pub raw_bytes: Vec, - - /// The format the response bytes are encoded with - pub encoding_type: EncodingType, -} - -/// Enum that handles different SubmitBlock response types based on the level of -/// validation required -pub enum CompoundSubmitBlockResponse { - /// Standard response type, fully parsing the response from a relay into a - /// complete response struct - Full(Box), - - /// Light response type, only extracting the fork from the response with the - /// entire (undecoded) payload for forwarding - Light(LightSubmitBlockResponse), - - /// Response with no body, used for v2 requests when the relay does not - /// return any content intentionally - EmptyBody, -} - -/// Core details of a SubmitBlockResponse, used for light processing when -/// validation mode is set to none. -#[derive(Clone, Debug)] -pub struct LightSubmitBlockResponse { - /// The fork name for the bid - pub version: ForkName, - - /// The raw bytes of the response, for forwarding to the caller - pub raw_bytes: Vec, - - /// The format the response bytes are encoded with - pub encoding_type: EncodingType, -} diff --git a/crates/pbs/src/mev_boost/register_validator.rs b/crates/pbs/src/mev_boost/register_validator.rs deleted file mode 100644 index 15f68416..00000000 --- a/crates/pbs/src/mev_boost/register_validator.rs +++ /dev/null @@ -1,211 +0,0 @@ -use std::time::{Duration, Instant}; - -use alloy::primitives::Bytes; -use axum::http::{HeaderMap, HeaderValue}; -use cb_common::{ - pbs::{HEADER_START_TIME_UNIX_MS, RelayClient, error::PbsError}, - utils::{get_user_agent_with_version, read_chunked_body_with_max, utcnow_ms}, -}; -use eyre::bail; -use futures::{ - FutureExt, - future::{join_all, select_ok}, -}; -use reqwest::header::{CONTENT_TYPE, USER_AGENT}; -use tracing::{Instrument, debug, error}; -use url::Url; - -use crate::{ - constants::{MAX_SIZE_DEFAULT, REGISTER_VALIDATOR_ENDPOINT_TAG, TIMEOUT_ERROR_CODE_STR}, - metrics::{RELAY_LATENCY, RELAY_STATUS_CODE}, - state::{BuilderApiState, PbsState}, -}; - -/// Implements https://ethereum.github.io/builder-specs/#/Builder/registerValidator -/// Returns 200 if at least one relay returns 200, else 503 -pub async fn register_validator( - registrations: Vec, - req_headers: HeaderMap, - state: PbsState, -) -> eyre::Result<()> { - // prepare headers - let mut send_headers = HeaderMap::new(); - send_headers - .insert(HEADER_START_TIME_UNIX_MS, HeaderValue::from_str(&utcnow_ms().to_string())?); - send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); - - // prepare the body in advance, ugly dyn - let bodies: Box> = - if let Some(batch_size) = state.config.pbs_config.validator_registration_batch_size { - Box::new(registrations.chunks(batch_size).map(|batch| { - // SAFETY: unwrap is ok because we're serializing a &[serde_json::Value] - let body = serde_json::to_vec(batch).unwrap(); - (batch.len(), Bytes::from(body)) - })) - } else { - let body = serde_json::to_vec(®istrations).unwrap(); - Box::new(std::iter::once((registrations.len(), Bytes::from(body)))) - }; - send_headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); - - let mut handles = Vec::with_capacity(state.all_relays().len()); - - for (n_regs, body) in bodies { - for relay in state.all_relays().iter().cloned() { - handles.push( - tokio::spawn( - send_register_validator_with_timeout( - n_regs, - body.clone(), - relay, - send_headers.clone(), - state.pbs_config().timeout_register_validator_ms, - state.pbs_config().register_validator_retry_limit, - ) - .in_current_span(), - ) - .map(|join_result| match join_result { - Ok(res) => res, - Err(err) => Err(PbsError::TokioJoinError(err)), - }), - ); - } - } - - if state.pbs_config().wait_all_registrations { - // wait for all relays registrations to complete - let results = join_all(handles).await; - if results.into_iter().any(|res| res.is_ok()) { - Ok(()) - } else { - bail!("No relay passed register_validator successfully") - } - } else { - // return once first completes, others proceed in background - let result = select_ok(handles).await; - match result { - Ok(_) => Ok(()), - Err(_) => bail!("No relay passed register_validator successfully"), - } - } -} - -/// Register validator to relay, retry connection errors until the -/// given timeout has passed -async fn send_register_validator_with_timeout( - n_regs: usize, - body: Bytes, - relay: RelayClient, - headers: HeaderMap, - timeout_ms: u64, - retry_limit: u32, -) -> Result<(), PbsError> { - let url = relay.register_validator_url()?; - let mut remaining_timeout_ms = timeout_ms; - let mut retry = 0; - let mut backoff = Duration::from_millis(250); - - loop { - let start_request = Instant::now(); - match send_register_validator( - url.clone(), - n_regs, - body.clone(), - &relay, - headers.clone(), - remaining_timeout_ms, - retry, - ) - .await - { - Ok(_) => return Ok(()), - - Err(err) if err.should_retry() => { - retry += 1; - if retry >= retry_limit { - error!( - relay_id = relay.id.as_str(), - retry, "reached retry limit for validator registration" - ); - return Err(err); - } - tokio::time::sleep(backoff).await; - backoff += Duration::from_millis(250); - - remaining_timeout_ms = - timeout_ms.saturating_sub(start_request.elapsed().as_millis() as u64); - - if remaining_timeout_ms == 0 { - return Err(err); - } - } - - Err(err) => return Err(err), - }; - } -} - -async fn send_register_validator( - url: Url, - n_regs: usize, - body: Bytes, - relay: &RelayClient, - headers: HeaderMap, - timeout_ms: u64, - retry: u32, -) -> Result<(), PbsError> { - let start_request = Instant::now(); - let res = match relay - .client - .post(url) - .timeout(Duration::from_millis(timeout_ms)) - .headers(headers) - .body(body.0) - .send() - .await - { - Ok(res) => res, - Err(err) => { - RELAY_STATUS_CODE - .with_label_values(&[ - TIMEOUT_ERROR_CODE_STR, - REGISTER_VALIDATOR_ENDPOINT_TAG, - &relay.id, - ]) - .inc(); - return Err(err.into()); - } - }; - let request_latency = start_request.elapsed(); - RELAY_LATENCY - .with_label_values(&[REGISTER_VALIDATOR_ENDPOINT_TAG, &relay.id]) - .observe(request_latency.as_secs_f64()); - - let code = res.status(); - RELAY_STATUS_CODE - .with_label_values(&[code.as_str(), REGISTER_VALIDATOR_ENDPOINT_TAG, &relay.id]) - .inc(); - - if !code.is_success() { - let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_DEFAULT).await?; - let err = PbsError::RelayResponse { - error_msg: String::from_utf8_lossy(&response_bytes).into_owned(), - code: code.as_u16(), - }; - - // error here since we check if any success above - error!(relay_id = relay.id.as_ref(), retry, %err, "failed registration"); - return Err(err); - }; - - debug!( - relay_id = relay.id.as_ref(), - retry, - ?code, - latency = ?request_latency, - num_registrations = n_regs, - "registration successful" - ); - - Ok(()) -} diff --git a/crates/pbs/src/mev_boost/reload.rs b/crates/pbs/src/mev_boost/reload.rs deleted file mode 100644 index adfab89f..00000000 --- a/crates/pbs/src/mev_boost/reload.rs +++ /dev/null @@ -1,27 +0,0 @@ -use cb_common::config::load_pbs_config; -use tracing::warn; - -use crate::{BuilderApiState, PbsState}; - -/// Reload the PBS state with the latest configuration in the config file -/// Returns 200 if successful or 500 if failed -pub async fn reload(state: PbsState) -> eyre::Result> { - let (pbs_config, config_path) = load_pbs_config(None).await?; - let new_state = PbsState::new(pbs_config, config_path).with_data(state.data); - - if state.config.pbs_config.host != new_state.config.pbs_config.host { - warn!( - "Host change for PBS module require a full restart. Old: {}, New: {}", - state.config.pbs_config.host, new_state.config.pbs_config.host - ); - } - - if state.config.pbs_config.port != new_state.config.pbs_config.port { - warn!( - "Port change for PBS module require a full restart. Old: {}, New: {}", - state.config.pbs_config.port, new_state.config.pbs_config.port - ); - } - - Ok(new_state) -} diff --git a/crates/pbs/src/mev_boost/status.rs b/crates/pbs/src/mev_boost/status.rs deleted file mode 100644 index c4a8cfed..00000000 --- a/crates/pbs/src/mev_boost/status.rs +++ /dev/null @@ -1,90 +0,0 @@ -use std::time::{Duration, Instant}; - -use axum::http::HeaderMap; -use cb_common::{ - pbs::{RelayClient, error::PbsError}, - utils::{get_user_agent_with_version, read_chunked_body_with_max}, -}; -use futures::future::select_ok; -use reqwest::header::USER_AGENT; -use tracing::{debug, error}; - -use crate::{ - constants::{MAX_SIZE_DEFAULT, STATUS_ENDPOINT_TAG, TIMEOUT_ERROR_CODE_STR}, - metrics::{RELAY_LATENCY, RELAY_STATUS_CODE}, - state::{BuilderApiState, PbsState}, -}; - -/// Implements https://ethereum.github.io/builder-specs/#/Builder/status -/// Broadcasts a status check to all relays and returns 200 if at least one -/// relay returns 200 -pub async fn get_status( - req_headers: HeaderMap, - state: PbsState, -) -> eyre::Result<()> { - // If no relay check, return early - if !state.config.pbs_config.relay_check { - Ok(()) - } else { - // prepare headers - let mut send_headers = HeaderMap::new(); - send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); - - let relays = state.all_relays(); - let mut handles = Vec::with_capacity(relays.len()); - for relay in relays { - handles.push(Box::pin(send_relay_check(relay, send_headers.clone()))); - } - - // return ok if at least one relay returns 200 - let results = select_ok(handles).await; - match results { - Ok(_) => Ok(()), - Err(err) => Err(err.into()), - } - } -} - -async fn send_relay_check(relay: &RelayClient, headers: HeaderMap) -> Result<(), PbsError> { - let url = relay.get_status_url()?; - - let start_request = Instant::now(); - let res = match relay - .client - .get(url) - .timeout(Duration::from_secs(30)) - .headers(headers) - .send() - .await - { - Ok(res) => res, - Err(err) => { - RELAY_STATUS_CODE - .with_label_values(&[TIMEOUT_ERROR_CODE_STR, STATUS_ENDPOINT_TAG, &relay.id]) - .inc(); - return Err(err.into()); - } - }; - let request_latency = start_request.elapsed(); - RELAY_LATENCY - .with_label_values(&[STATUS_ENDPOINT_TAG, &relay.id]) - .observe(request_latency.as_secs_f64()); - - let code = res.status(); - RELAY_STATUS_CODE.with_label_values(&[code.as_str(), STATUS_ENDPOINT_TAG, &relay.id]).inc(); - - if !code.is_success() { - let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_DEFAULT).await?; - let err = PbsError::RelayResponse { - error_msg: String::from_utf8_lossy(&response_bytes).into_owned(), - code: code.as_u16(), - }; - - error!(relay_id = relay.id.as_ref(),%err, "status failed"); - return Err(err); - }; - - debug!(relay_id = relay.id.as_ref(),?code, latency = ?request_latency, "status passed"); - - Ok(()) -} diff --git a/crates/pbs/src/routes/get_header.rs b/crates/pbs/src/routes/get_header.rs deleted file mode 100644 index c550d92f..00000000 --- a/crates/pbs/src/routes/get_header.rs +++ /dev/null @@ -1,150 +0,0 @@ -use alloy::primitives::utils::format_ether; -use axum::{ - extract::{Path, State}, - http::{HeaderMap, HeaderValue}, - response::IntoResponse, -}; -use cb_common::{ - pbs::{GetHeaderInfo, GetHeaderParams}, - utils::{ - CONSENSUS_VERSION_HEADER, EncodingType, get_accept_types, get_user_agent, ms_into_slot, - }, -}; -use reqwest::{StatusCode, header::CONTENT_TYPE}; -use ssz::Encode; -use tracing::{error, info}; - -use crate::{ - CompoundGetHeaderResponse, - api::BuilderApi, - constants::GET_HEADER_ENDPOINT_TAG, - error::PbsClientError, - metrics::BEACON_NODE_STATUS, - state::{BuilderApiState, PbsStateGuard}, -}; - -pub async fn handle_get_header>( - State(state): State>, - req_headers: HeaderMap, - Path(params): Path, -) -> Result { - tracing::Span::current().record("slot", params.slot); - tracing::Span::current().record("parent_hash", tracing::field::debug(params.parent_hash)); - tracing::Span::current().record("validator", tracing::field::debug(¶ms.pubkey)); - - let state = state.read().clone(); - - let ua = get_user_agent(&req_headers); - let ms_into_slot = ms_into_slot(params.slot, state.config.chain); - let accept_types = get_accept_types(&req_headers).map_err(|e| { - error!(%e, "error parsing accept header"); - PbsClientError::DecodeError(format!("error parsing accept header: {e}")) - })?; - let accepts_ssz = accept_types.contains(&EncodingType::Ssz); - let accepts_json = accept_types.contains(&EncodingType::Json); - - info!(ua, ms_into_slot, "new request"); - - match A::get_header(params, req_headers, state, accept_types).await { - Ok(res) => { - if let Some(max_bid) = res { - BEACON_NODE_STATUS.with_label_values(&["200", GET_HEADER_ENDPOINT_TAG]).inc(); - match max_bid { - CompoundGetHeaderResponse::Light(light_bid) => { - // Light validation mode, so just forward the raw response - info!( - value_eth = format_ether(light_bid.value), - "received header (unvalidated)" - ); - - // Create the headers - let consensus_version_header = - match HeaderValue::from_str(&light_bid.version.to_string()) { - Ok(consensus_version_header) => { - Ok::(consensus_version_header) - } - Err(e) => { - return Err(PbsClientError::RelayError(format!( - "error decoding consensus version from relay payload: {e}" - ))); - } - }?; - let content_type = light_bid.encoding_type.content_type(); - let content_type_header = HeaderValue::from_str(content_type).unwrap(); - - // Build response - let mut res = light_bid.raw_bytes.into_response(); - res.headers_mut() - .insert(CONSENSUS_VERSION_HEADER, consensus_version_header); - res.headers_mut().insert(CONTENT_TYPE, content_type_header); - info!("sending response as {} (light)", content_type); - Ok(res) - } - CompoundGetHeaderResponse::Full(max_bid) => { - // Full validation mode, so respond based on requester accept types - info!(value_eth = format_ether(*max_bid.data.message.value()), block_hash =% max_bid.block_hash(), "received header"); - - // Handle SSZ - if accepts_ssz { - let mut res = max_bid.data.as_ssz_bytes().into_response(); - let consensus_version_header = match HeaderValue::from_str( - &max_bid.version.to_string(), - ) { - Ok(consensus_version_header) => { - Ok::(consensus_version_header) - } - Err(e) => { - if accepts_json { - info!("sending response as JSON"); - return Ok( - (StatusCode::OK, axum::Json(max_bid)).into_response() - ); - } else { - return Err(PbsClientError::RelayError(format!( - "error decoding consensus version from relay payload: {e}" - ))); - } - } - }?; - - // This won't actually fail since the string is a const - let content_type_header = - HeaderValue::from_str(EncodingType::Ssz.content_type()).unwrap(); - - res.headers_mut() - .insert(CONSENSUS_VERSION_HEADER, consensus_version_header); - res.headers_mut().insert(CONTENT_TYPE, content_type_header); - info!("sending response as SSZ"); - return Ok(res); - } - - // Handle JSON - if accepts_json { - Ok((StatusCode::OK, axum::Json(max_bid)).into_response()) - } else { - // This shouldn't ever happen but the compiler needs it - Err(PbsClientError::DecodeError( - "no viable accept types in request".to_string(), - )) - } - } - } - } else { - // spec: return 204 if request is valid but no bid available - info!("no header available for slot"); - - BEACON_NODE_STATUS.with_label_values(&["204", GET_HEADER_ENDPOINT_TAG]).inc(); - Ok(StatusCode::NO_CONTENT.into_response()) - } - } - Err(err) => { - error!(%err, "no header available from relays"); - - let err = PbsClientError::NoPayload; - BEACON_NODE_STATUS - .with_label_values(&[err.status_code().as_str(), GET_HEADER_ENDPOINT_TAG]) - .inc(); - Err(err) - } - } -} diff --git a/crates/pbs/src/routes/get_header/mod.rs b/crates/pbs/src/routes/get_header/mod.rs new file mode 100644 index 00000000..47c5a068 --- /dev/null +++ b/crates/pbs/src/routes/get_header/mod.rs @@ -0,0 +1,321 @@ +mod relay; +mod validation; + +use std::{collections::HashSet, sync::Arc}; + +use alloy::primitives::{U256, utils::format_ether}; +use axum::{ + extract::{Path, State}, + http::{HeaderMap, HeaderValue}, + response::IntoResponse, +}; +use cb_common::{ + config::HeaderValidationMode, + pbs::{GetHeaderInfo, GetHeaderParams, HEADER_TIMEOUT_MS, error::PbsError}, + utils::{ + CONSENSUS_VERSION_HEADER, EncodingType, get_accept_types, get_user_agent, + get_user_agent_with_version, ms_into_slot, + }, +}; +use futures::future::join_all; +use parking_lot::RwLock; +use relay::{RequestInfo, ValidationContext, send_timed_get_header}; +use reqwest::{ + StatusCode, + header::{ACCEPT, CONTENT_TYPE, USER_AGENT}, +}; +use tracing::{Instrument, debug, error, info, warn}; + +use super::CompoundGetHeaderResponse; +use crate::{ + error::PbsClientError, + metrics::{BEACON_NODE_STATUS, RELAY_HEADER_VALUE, RELAY_LAST_SLOT}, + state::{PbsState, PbsStateGuard}, +}; + +pub async fn handle_get_header( + State(state): State, + req_headers: HeaderMap, + Path(params): Path, +) -> Result { + tracing::Span::current().record("slot", params.slot); + tracing::Span::current().record("parent_hash", tracing::field::debug(params.parent_hash)); + tracing::Span::current().record("validator", tracing::field::debug(¶ms.pubkey)); + + let state = state.read().clone(); + + let ua = get_user_agent(&req_headers); + let ms_into_slot = ms_into_slot(params.slot, state.config.chain); + let accept_types = get_accept_types(&req_headers).map_err(|e| { + error!(%e, "error parsing accept header"); + PbsClientError::DecodeError(format!("error parsing accept header: {e}")) + })?; + let accepts_ssz = accept_types.contains(&EncodingType::Ssz); + let accepts_json = accept_types.contains(&EncodingType::Json); + + info!(ua, ms_into_slot, "new request"); + + match get_header(params, req_headers, state, accept_types).await { + Ok(res) => { + if let Some(max_bid) = res { + BEACON_NODE_STATUS + .with_label_values(&["200", crate::constants::GET_HEADER_ENDPOINT_TAG]) + .inc(); + match max_bid { + CompoundGetHeaderResponse::Light(light_bid) => { + // Light validation mode, so just forward the raw response + info!( + value_eth = format_ether(light_bid.value), + "received header (unvalidated)" + ); + + // Create the headers + let consensus_version_header = + match HeaderValue::from_str(&light_bid.version.to_string()) { + Ok(consensus_version_header) => { + Ok::(consensus_version_header) + } + Err(e) => { + return Err(PbsClientError::RelayError(format!( + "error decoding consensus version from relay payload: {e}" + ))); + } + }?; + let content_type = light_bid.encoding_type.content_type(); + let content_type_header = HeaderValue::from_str(content_type).unwrap(); + + // Build response + let mut res = light_bid.raw_bytes.into_response(); + res.headers_mut() + .insert(CONSENSUS_VERSION_HEADER, consensus_version_header); + res.headers_mut().insert(CONTENT_TYPE, content_type_header); + info!("sending response as {} (light)", content_type); + Ok(res) + } + CompoundGetHeaderResponse::Full(max_bid) => { + // Full validation mode, so respond based on requester accept types + info!(value_eth = format_ether(*max_bid.data.message.value()), block_hash =% max_bid.block_hash(), "received header"); + + // Handle SSZ + if accepts_ssz { + use ssz::Encode; + let mut res = max_bid.data.as_ssz_bytes().into_response(); + let consensus_version_header = match HeaderValue::from_str( + &max_bid.version.to_string(), + ) { + Ok(consensus_version_header) => { + Ok::(consensus_version_header) + } + Err(e) => { + if accepts_json { + info!("sending response as JSON"); + return Ok( + (StatusCode::OK, axum::Json(max_bid)).into_response() + ); + } else { + return Err(PbsClientError::RelayError(format!( + "error decoding consensus version from relay payload: {e}" + ))); + } + } + }?; + + // This won't actually fail since the string is a const + let content_type_header = + HeaderValue::from_str(EncodingType::Ssz.content_type()).unwrap(); + + res.headers_mut() + .insert(CONSENSUS_VERSION_HEADER, consensus_version_header); + res.headers_mut().insert(CONTENT_TYPE, content_type_header); + info!("sending response as SSZ"); + return Ok(res); + } + + // Handle JSON + if accepts_json { + Ok((StatusCode::OK, axum::Json(max_bid)).into_response()) + } else { + // This shouldn't ever happen but the compiler needs it + Err(PbsClientError::DecodeError( + "no viable accept types in request".to_string(), + )) + } + } + } + } else { + // spec: return 204 if request is valid but no bid available + info!("no header available for slot"); + + BEACON_NODE_STATUS + .with_label_values(&["204", crate::constants::GET_HEADER_ENDPOINT_TAG]) + .inc(); + Ok(StatusCode::NO_CONTENT.into_response()) + } + } + Err(err) => { + error!(%err, "no header available from relays"); + + let err = PbsClientError::NoPayload; + BEACON_NODE_STATUS + .with_label_values(&[ + err.status_code().as_str(), + crate::constants::GET_HEADER_ENDPOINT_TAG, + ]) + .inc(); + Err(err) + } + } +} + +// ── Relay logic ────────────────────────────────────────────────────────────── + +/// Implements https://ethereum.github.io/builder-specs/#/Builder/getHeader +/// Returns 200 if at least one relay returns 200, else 204 +pub async fn get_header( + params: GetHeaderParams, + req_headers: HeaderMap, + state: PbsState, + accepted_types: HashSet, +) -> eyre::Result> { + let parent_block = Arc::new(RwLock::new(None)); + let extra_validation_enabled = + state.config.pbs_config.header_validation_mode == HeaderValidationMode::Extra; + if extra_validation_enabled && let Some(rpc_url) = state.pbs_config().rpc_url.clone() { + tokio::spawn( + validation::fetch_parent_block(rpc_url, params.parent_hash, parent_block.clone()) + .in_current_span(), + ); + } + + let ms_into_slot = ms_into_slot(params.slot, state.config.chain); + let (pbs_config, relays, maybe_mux_id) = state.mux_config_and_relays(¶ms.pubkey); + + if let Some(mux_id) = maybe_mux_id { + debug!(mux_id, relays = relays.len(), pubkey = %params.pubkey, "using mux config"); + } else { + debug!(relays = relays.len(), pubkey = %params.pubkey, "using default config"); + } + + let max_timeout_ms = pbs_config + .timeout_get_header_ms + .min(pbs_config.late_in_slot_time_ms.saturating_sub(ms_into_slot)); + + if max_timeout_ms == 0 { + warn!( + ms_into_slot, + threshold = pbs_config.late_in_slot_time_ms, + "late in slot, skipping relay requests" + ); + + return Ok(None); + } + + // Use the minimum of the time left and the user provided timeout header + let max_timeout_ms = req_headers + .get(HEADER_TIMEOUT_MS) + .map(|header| match header.to_str().ok().and_then(|v| v.parse::().ok()) { + None | Some(0) => { + // Header can't be stringified, or parsed, or it's set to 0 + warn!(?header, "invalid user-supplied timeout header, using {max_timeout_ms}ms"); + max_timeout_ms + } + Some(user_timeout) => user_timeout.min(max_timeout_ms), + }) + .unwrap_or(max_timeout_ms); + + // prepare headers, except for start time which is set in `send_one_get_header` + let mut send_headers = HeaderMap::new(); + send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); + + // Create the Accept headers for requests + let mode = state.pbs_config().header_validation_mode; + let accept_types_str = match mode { + HeaderValidationMode::None => { + // No validation mode, so only request what the user wants because the response + // will be forwarded directly + accepted_types.iter().map(|t| t.content_type()).collect::>().join(",") + } + _ => { + // We're unpacking the body, so request both types since we can handle both + [EncodingType::Ssz.content_type(), EncodingType::Json.content_type()].join(",") + } + }; + send_headers.insert( + ACCEPT, + HeaderValue::from_str(&accept_types_str) + .map_err(|e| PbsError::GeneralRequest(format!("invalid accept header value: {e}")))?, + ); + + // Send requests to all relays concurrently + let slot = params.slot as i64; + let request_info = Arc::new(RequestInfo { + params, + headers: Arc::new(send_headers), + chain: state.config.chain, + validation: ValidationContext { + skip_sigverify: state.pbs_config().skip_sigverify, + min_bid_wei: state.pbs_config().min_bid_wei, + mode, + parent_block, + }, + accepted_types, + }); + let mut handles = Vec::with_capacity(relays.len()); + for relay in relays.iter() { + handles.push( + send_timed_get_header( + request_info.clone(), + relay.clone(), + ms_into_slot, + max_timeout_ms, + ) + .in_current_span(), + ); + } + + let results = join_all(handles).await; + let mut relay_bids = Vec::with_capacity(relays.len()); + for (i, res) in results.into_iter().enumerate() { + let relay_id = relays[i].id.as_str(); + + match res { + Ok(Some(res)) => { + let value = match &res { + CompoundGetHeaderResponse::Full(full) => *full.value(), + CompoundGetHeaderResponse::Light(light) => light.value, + }; + RELAY_LAST_SLOT.with_label_values(&[relay_id]).set(slot); + let value_gwei = (value / U256::from(1_000_000_000)).try_into().unwrap_or_default(); + RELAY_HEADER_VALUE.with_label_values(&[relay_id]).set(value_gwei); + + relay_bids.push((relay_id, res)) + } + Ok(_) => {} + Err(err) if err.is_timeout() => error!(err = "Timed Out", relay_id), + Err(err) => error!(%err, relay_id), + } + } + + let max_bid = relay_bids.into_iter().max_by_key(|(_, bid)| match bid { + CompoundGetHeaderResponse::Full(full) => *full.value(), + CompoundGetHeaderResponse::Light(light) => light.value, + }); + + if let Some((winning_relay_id, ref bid)) = max_bid { + match bid { + CompoundGetHeaderResponse::Full(full) => info!( + relay_id = winning_relay_id, + value_eth = format_ether(*full.value()), + block_hash = %full.block_hash(), + "auction winner" + ), + CompoundGetHeaderResponse::Light(light) => info!( + relay_id = winning_relay_id, + value_eth = format_ether(light.value), + "auction winner (unvalidated)" + ), + } + } + + Ok(max_bid.map(|(_, bid)| bid)) +} diff --git a/crates/pbs/src/mev_boost/get_header.rs b/crates/pbs/src/routes/get_header/relay.rs similarity index 52% rename from crates/pbs/src/mev_boost/get_header.rs rename to crates/pbs/src/routes/get_header/relay.rs index 751987af..0e74438e 100644 --- a/crates/pbs/src/mev_boost/get_header.rs +++ b/crates/pbs/src/routes/get_header/relay.rs @@ -4,74 +4,61 @@ use std::{ time::{Duration, Instant}, }; -use alloy::{ - primitives::{B256, U256, aliases::B32, utils::format_ether}, - providers::Provider, - rpc::types::Block, -}; +use alloy::primitives::{U256, utils::format_ether}; use axum::http::{HeaderMap, HeaderValue}; use cb_common::{ config::HeaderValidationMode, - constants::APPLICATION_BUILDER_DOMAIN, pbs::{ - EMPTY_TX_ROOT_HASH, ExecutionPayloadHeaderRef, ForkName, ForkVersionDecode, GetHeaderInfo, - GetHeaderParams, GetHeaderResponse, HEADER_START_TIME_UNIX_MS, HEADER_TIMEOUT_MS, - RelayClient, SignedBuilderBid, + ExecutionPayloadHeaderRef, ForkName, GetHeaderInfo, GetHeaderParams, GetHeaderResponse, + HEADER_START_TIME_UNIX_MS, HEADER_TIMEOUT_MS, RelayClient, error::{PbsError, ValidationError}, }, - signature::verify_signed_message, - types::{BlsPublicKey, BlsPublicKeyBytes, BlsSignature, Chain}, + types::Chain, utils::{ EncodingType, get_bid_value_from_signed_builder_bid_ssz, get_consensus_version_header, - get_user_agent_with_version, ms_into_slot, read_chunked_body_with_max, - timestamp_of_slot_start_sec, utcnow_ms, + read_chunked_body_with_max, utcnow_ms, }, }; -use futures::future::join_all; use parking_lot::RwLock; -use reqwest::{ - StatusCode, - header::{ACCEPT, CONTENT_TYPE, USER_AGENT}, -}; -use serde::Deserialize; +use reqwest::{StatusCode, header::CONTENT_TYPE}; use tokio::time::sleep; -use tracing::{Instrument, debug, error, warn}; -use tree_hash::TreeHash; +use tracing::{Instrument, debug, error, info, warn}; use url::Url; -use crate::{ - constants::{ - GET_HEADER_ENDPOINT_TAG, MAX_SIZE_GET_HEADER_RESPONSE, TIMEOUT_ERROR_CODE, - TIMEOUT_ERROR_CODE_STR, +use super::{ + super::{CompoundGetHeaderResponse, LightGetHeaderResponse}, + validation::{ + HeaderData, decode_json_payload, decode_ssz_payload, extra_validation, + get_light_info_from_json, validate_header_data, validate_signature, }, - metrics::{RELAY_HEADER_VALUE, RELAY_LAST_SLOT, RELAY_LATENCY, RELAY_STATUS_CODE}, - mev_boost::{CompoundGetHeaderResponse, LightGetHeaderResponse}, - state::{BuilderApiState, PbsState}, - utils::check_gas_limit, +}; +use crate::constants::{ + GET_HEADER_ENDPOINT_TAG, MAX_SIZE_GET_HEADER_RESPONSE, TIMEOUT_ERROR_CODE, + TIMEOUT_ERROR_CODE_STR, }; /// Info about an incoming get_header request. /// Sent from get_header to each send_timed_get_header call. #[derive(Clone)] -struct RequestInfo { +pub struct RequestInfo { /// The blockchain parameters of the get_header request (what slot it's for, /// which pubkey is requesting it, etc) - params: GetHeaderParams, + pub params: GetHeaderParams, /// Common baseline of headers to send with each request - headers: Arc, + pub headers: Arc, /// The chain the request is for - chain: Chain, + pub chain: Chain, /// Context for validating the header returned by the relay - validation: ValidationContext, + pub validation: ValidationContext, /// The accepted encoding types from the original request - accepted_types: HashSet, + pub accepted_types: HashSet, } -/// Used interally to provide info and context about a get_header request and +/// Used internally to provide info and context about a get_header request and /// its response struct GetHeaderResponseInfo { /// ID of the relay the response came from @@ -96,180 +83,21 @@ struct GetHeaderResponseInfo { /// Context for validating the header #[derive(Clone)] -struct ValidationContext { +pub struct ValidationContext { /// Whether to skip signature verification - skip_sigverify: bool, + pub skip_sigverify: bool, /// Minimum acceptable bid, in wei - min_bid_wei: U256, + pub min_bid_wei: U256, /// The mode used for response validation - mode: HeaderValidationMode, + pub mode: HeaderValidationMode, /// The parent block, if fetched - parent_block: Arc>>, + pub parent_block: Arc>>, } -/// Implements https://ethereum.github.io/builder-specs/#/Builder/getHeader -/// Returns 200 if at least one relay returns 200, else 204 -pub async fn get_header( - params: GetHeaderParams, - req_headers: HeaderMap, - state: PbsState, - accepted_types: HashSet, -) -> eyre::Result> { - let parent_block = Arc::new(RwLock::new(None)); - let extra_validation_enabled = - state.config.pbs_config.header_validation_mode == HeaderValidationMode::Extra; - if extra_validation_enabled && let Some(rpc_url) = state.pbs_config().rpc_url.clone() { - tokio::spawn( - fetch_parent_block(rpc_url, params.parent_hash, parent_block.clone()).in_current_span(), - ); - } - - let ms_into_slot = ms_into_slot(params.slot, state.config.chain); - let (pbs_config, relays, maybe_mux_id) = state.mux_config_and_relays(¶ms.pubkey); - - if let Some(mux_id) = maybe_mux_id { - debug!(mux_id, relays = relays.len(), pubkey = %params.pubkey, "using mux config"); - } else { - debug!(relays = relays.len(), pubkey = %params.pubkey, "using default config"); - } - - let max_timeout_ms = pbs_config - .timeout_get_header_ms - .min(pbs_config.late_in_slot_time_ms.saturating_sub(ms_into_slot)); - - if max_timeout_ms == 0 { - warn!( - ms_into_slot, - threshold = pbs_config.late_in_slot_time_ms, - "late in slot, skipping relay requests" - ); - - return Ok(None); - } - - // Use the minimum of the time left and the user provided timeout header - let max_timeout_ms = req_headers - .get(HEADER_TIMEOUT_MS) - .map(|header| match header.to_str().ok().and_then(|v| v.parse::().ok()) { - None | Some(0) => { - // Header can't be stringified, or parsed, or it's set to 0 - warn!(?header, "invalid user-supplied timeout header, using {max_timeout_ms}ms"); - max_timeout_ms - } - Some(user_timeout) => user_timeout.min(max_timeout_ms), - }) - .unwrap_or(max_timeout_ms); - - // prepare headers, except for start time which is set in `send_one_get_header` - let mut send_headers = HeaderMap::new(); - send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); - - // Create the Accept headers for requests - let mode = state.pbs_config().header_validation_mode; - let accept_types = match mode { - HeaderValidationMode::None => { - // No validation mode, so only request what the user wants because the response - // will be forwarded directly - accepted_types.iter().map(|t| t.content_type()).collect::>().join(",") - } - _ => { - // We're unpacking the body, so request both types since we can handle both - [EncodingType::Ssz.content_type(), EncodingType::Json.content_type()].join(",") - } - }; - send_headers.insert( - ACCEPT, - HeaderValue::from_str(&accept_types) - .map_err(|e| PbsError::GeneralRequest(format!("invalid accept header value: {e}")))?, - ); - - // Send requests to all relays concurrently - let slot = params.slot as i64; - let request_info = Arc::new(RequestInfo { - params, - headers: Arc::new(send_headers), - chain: state.config.chain, - validation: ValidationContext { - skip_sigverify: state.pbs_config().skip_sigverify, - min_bid_wei: state.pbs_config().min_bid_wei, - mode, - parent_block, - }, - accepted_types, - }); - let mut handles = Vec::with_capacity(relays.len()); - for relay in relays.iter() { - handles.push( - send_timed_get_header( - request_info.clone(), - relay.clone(), - ms_into_slot, - max_timeout_ms, - ) - .in_current_span(), - ); - } - - let results = join_all(handles).await; - let mut relay_bids = Vec::with_capacity(relays.len()); - for (i, res) in results.into_iter().enumerate() { - let relay_id = relays[i].id.as_str(); - - match res { - Ok(Some(res)) => { - let value = match &res { - CompoundGetHeaderResponse::Full(full) => *full.value(), - CompoundGetHeaderResponse::Light(light) => light.value, - }; - RELAY_LAST_SLOT.with_label_values(&[relay_id]).set(slot); - let value_gwei = (value / U256::from(1_000_000_000)).try_into().unwrap_or_default(); - RELAY_HEADER_VALUE.with_label_values(&[relay_id]).set(value_gwei); - - relay_bids.push(res) - } - Ok(_) => {} - Err(err) if err.is_timeout() => error!(err = "Timed Out", relay_id), - Err(err) => error!(%err, relay_id), - } - } - - let max_bid = relay_bids.into_iter().max_by_key(|bid| match bid { - CompoundGetHeaderResponse::Full(full) => *full.value(), - CompoundGetHeaderResponse::Light(light) => light.value, - }); - - Ok(max_bid) -} - -/// Fetch the parent block from the RPC URL for extra validation of the header. -/// Extra validation will be skipped if: -/// - relay returns header before parent block is fetched -/// - parent block is not found, eg because of a RPC delay -async fn fetch_parent_block( - rpc_url: Url, - parent_hash: B256, - parent_block: Arc>>, -) { - let provider = alloy::providers::ProviderBuilder::new().connect_http(rpc_url).to_owned(); - - debug!(%parent_hash, "fetching parent block"); - - match provider.get_block_by_hash(parent_hash).await { - Ok(maybe_block) => { - debug!(block_found = maybe_block.is_some(), "fetched parent block"); - let mut guard = parent_block.write(); - *guard = maybe_block; - } - Err(err) => { - error!(%err, "fetch failed"); - } - } -} - -async fn send_timed_get_header( +pub async fn send_timed_get_header( request_info: Arc, relay: RelayClient, ms_into_slot: u64, @@ -326,7 +154,7 @@ async fn send_timed_get_header( } } - let results = join_all(handles).await; + let results = futures::future::join_all(handles).await; let mut n_headers = 0; if let Some((_, maybe_header)) = results @@ -541,7 +369,7 @@ async fn send_get_header_full( }; // Log and return - debug!( + info!( relay_id = info.relay_id.as_ref(), header_size_bytes = info.response_bytes.len(), latency = ?info.request_latency, @@ -588,7 +416,7 @@ async fn send_get_header_light( }; // Log and return - debug!( + info!( relay_id = info.relay_id.as_ref(), header_size_bytes = info.response_bytes.len(), latency = ?info.request_latency, @@ -638,7 +466,7 @@ async fn send_get_header_impl( { Ok(res) => res, Err(err) => { - RELAY_STATUS_CODE + crate::metrics::RELAY_STATUS_CODE .with_label_values(&[TIMEOUT_ERROR_CODE_STR, GET_HEADER_ENDPOINT_TAG, &relay.id]) .inc(); return Err(err.into()); @@ -648,10 +476,7 @@ async fn send_get_header_impl( // Log the response code and latency let code = res.status(); let request_latency = start_request.elapsed(); - RELAY_LATENCY - .with_label_values(&[GET_HEADER_ENDPOINT_TAG, &relay.id]) - .observe(request_latency.as_secs_f64()); - RELAY_STATUS_CODE.with_label_values(&[code.as_str(), GET_HEADER_ENDPOINT_TAG, &relay.id]).inc(); + super::super::record_relay_metrics(GET_HEADER_ENDPOINT_TAG, &relay.id, code, request_latency); // According to the spec, OK is the only allowed success code so this can break // early @@ -716,304 +541,3 @@ async fn send_get_header_impl( }), )) } - -/// Decode a JSON-encoded get_header response -fn decode_json_payload(response_bytes: &[u8]) -> Result { - match serde_json::from_slice::(response_bytes) { - Ok(parsed) => Ok(parsed), - Err(err) => Err(PbsError::JsonDecode { - err, - raw: String::from_utf8_lossy(response_bytes).into_owned(), - }), - } -} - -/// Get the value of a builder bid and the fork name from a get_header JSON -/// response (used for light-level processing) -fn get_light_info_from_json(response_bytes: &[u8]) -> Result<(ForkName, U256), PbsError> { - #[derive(Deserialize)] - struct LightBuilderBid { - #[serde(with = "serde_utils::quoted_u256")] - pub value: U256, - } - - #[derive(Deserialize)] - struct LightSignedBuilderBid { - pub message: LightBuilderBid, - } - - #[derive(Deserialize)] - struct LightHeaderResponse { - version: ForkName, - data: LightSignedBuilderBid, - } - - match serde_json::from_slice::(response_bytes) { - Ok(parsed) => Ok((parsed.version, parsed.data.message.value)), - Err(err) => Err(PbsError::JsonDecode { - err, - raw: String::from_utf8_lossy(response_bytes).into_owned(), - }), - } -} - -/// Decode an SSZ-encoded get_header response -fn decode_ssz_payload( - response_bytes: &[u8], - fork: ForkName, -) -> Result { - let data = SignedBuilderBid::from_ssz_bytes_by_fork(response_bytes, fork).map_err(|e| { - PbsError::RelayResponse { - error_msg: (format!("error decoding relay payload: {e:?}")).to_string(), - code: 200, - } - })?; - Ok(GetHeaderResponse { version: fork, data, metadata: Default::default() }) -} - -struct HeaderData { - block_hash: B256, - parent_hash: B256, - tx_root: B256, - value: U256, - timestamp: u64, -} - -fn validate_header_data( - header_data: &HeaderData, - chain: Chain, - expected_parent_hash: B256, - minimum_bid_wei: U256, - slot: u64, -) -> Result<(), ValidationError> { - if header_data.block_hash == B256::ZERO { - return Err(ValidationError::EmptyBlockhash); - } - - if expected_parent_hash != header_data.parent_hash { - return Err(ValidationError::ParentHashMismatch { - expected: expected_parent_hash, - got: header_data.parent_hash, - }); - } - - if header_data.tx_root == EMPTY_TX_ROOT_HASH { - return Err(ValidationError::EmptyTxRoot); - } - - if header_data.value < minimum_bid_wei { - return Err(ValidationError::BidTooLow { min: minimum_bid_wei, got: header_data.value }); - } - - let expected_timestamp = timestamp_of_slot_start_sec(slot, chain); - if expected_timestamp != header_data.timestamp { - return Err(ValidationError::TimestampMismatch { - expected: expected_timestamp, - got: header_data.timestamp, - }); - } - - Ok(()) -} - -fn validate_signature( - chain: Chain, - expected_relay_pubkey: &BlsPublicKey, - received_relay_pubkey: &BlsPublicKeyBytes, - message: &T, - signature: &BlsSignature, -) -> Result<(), ValidationError> { - if expected_relay_pubkey.serialize() != received_relay_pubkey.as_serialized() { - return Err(ValidationError::PubkeyMismatch { - expected: BlsPublicKeyBytes::from(expected_relay_pubkey), - got: *received_relay_pubkey, - }); - } - - if !verify_signed_message( - chain, - expected_relay_pubkey, - &message, - signature, - None, - &B32::from(APPLICATION_BUILDER_DOMAIN), - ) { - return Err(ValidationError::Sigverify); - } - - Ok(()) -} - -fn extra_validation( - parent_block: &Block, - signed_header: &GetHeaderResponse, -) -> Result<(), ValidationError> { - if signed_header.block_number() != parent_block.header.number + 1 { - return Err(ValidationError::BlockNumberMismatch { - parent: parent_block.header.number, - header: signed_header.block_number(), - }); - } - - if !check_gas_limit(signed_header.gas_limit(), parent_block.header.gas_limit) { - return Err(ValidationError::GasLimit { - parent: parent_block.header.gas_limit, - header: signed_header.gas_limit(), - }); - }; - - Ok(()) -} - -#[cfg(test)] -mod tests { - use std::{fs, path::Path}; - - use alloy::primitives::{B256, U256}; - use cb_common::{ - pbs::*, - signature::sign_builder_message, - types::{BlsPublicKeyBytes, BlsSecretKey, BlsSignature, Chain}, - utils::{TestRandomSeed, timestamp_of_slot_start_sec}, - }; - use ssz::Encode; - - use super::{validate_header_data, *}; - - #[test] - fn test_validate_header() { - let slot = 5; - let parent_hash = B256::from_slice(&[1; 32]); - let chain = Chain::Holesky; - let min_bid = U256::from(10); - - let mut mock_header_data = HeaderData { - block_hash: B256::default(), - parent_hash: B256::default(), - tx_root: EMPTY_TX_ROOT_HASH, - value: U256::default(), - timestamp: 0, - }; - - assert_eq!( - validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), - Err(ValidationError::EmptyBlockhash) - ); - - mock_header_data.block_hash.0[1] = 1; - - assert_eq!( - validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), - Err(ValidationError::ParentHashMismatch { - expected: parent_hash, - got: B256::default() - }) - ); - - mock_header_data.parent_hash = parent_hash; - - assert_eq!( - validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), - Err(ValidationError::EmptyTxRoot) - ); - - mock_header_data.tx_root = Default::default(); - - assert_eq!( - validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), - Err(ValidationError::BidTooLow { min: min_bid, got: U256::ZERO }) - ); - - mock_header_data.value = U256::from(11); - - let expected = timestamp_of_slot_start_sec(slot, chain); - assert_eq!( - validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), - Err(ValidationError::TimestampMismatch { expected, got: 0 }) - ); - - mock_header_data.timestamp = expected; - - assert!(validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot).is_ok()); - } - - #[test] - fn test_validate_signature() { - let secret_key = BlsSecretKey::test_random(); - let pubkey = secret_key.public_key(); - let wrong_pubkey = BlsPublicKeyBytes::test_random(); - let wrong_signature = BlsSignature::test_random(); - - let message = B256::random(); - - let signature = sign_builder_message(Chain::Holesky, &secret_key, &message); - - assert_eq!( - validate_signature(Chain::Holesky, &pubkey, &wrong_pubkey, &message, &wrong_signature), - Err(ValidationError::PubkeyMismatch { - expected: BlsPublicKeyBytes::from(&pubkey), - got: wrong_pubkey - }) - ); - - assert!(matches!( - validate_signature( - Chain::Holesky, - &pubkey, - &BlsPublicKeyBytes::from(&pubkey), - &message, - &wrong_signature - ), - Err(ValidationError::Sigverify) - )); - - assert!( - validate_signature( - Chain::Holesky, - &pubkey, - &BlsPublicKeyBytes::from(&pubkey), - &message, - &signature - ) - .is_ok() - ); - } - - #[test] - fn test_ssz_value_extraction() { - for fork_name in ForkName::list_all() { - match fork_name { - // Handle forks that didn't have builder bids yet - ForkName::Altair | ForkName::Base => continue, - - // Handle supported forks - ForkName::Bellatrix | - ForkName::Capella | - ForkName::Deneb | - ForkName::Electra | - ForkName::Fulu => {} - - // Skip unsupported forks - ForkName::Gloas => continue, - } - - // Load get_header JSON from test data - let fork_name_str = fork_name.to_string().to_lowercase(); - let path_str = format!("../../tests/data/get_header/{fork_name_str}.json"); - let path = Path::new(path_str.as_str()); - let json_bytes = fs::read(path).expect("file not found"); - let decoded = decode_json_payload(&json_bytes).expect("failed to decode JSON"); - - // Extract the bid value from the SSZ - let encoded = decoded.data.as_ssz_bytes(); - let bid_value = get_bid_value_from_signed_builder_bid_ssz(&encoded, fork_name) - .expect("failed to extract bid value from SSZ"); - - // Compare to the original value - println!("Testing fork: {}", fork_name); - println!("Original value: {}", decoded.value()); - println!("Extracted value: {}", bid_value); - assert_eq!(*decoded.value(), bid_value); - } - } -} diff --git a/crates/pbs/src/routes/get_header/validation.rs b/crates/pbs/src/routes/get_header/validation.rs new file mode 100644 index 00000000..4006d805 --- /dev/null +++ b/crates/pbs/src/routes/get_header/validation.rs @@ -0,0 +1,349 @@ +use std::sync::Arc; + +use alloy::{ + primitives::{B256, U256, aliases::B32}, + providers::Provider, + rpc::types::Block, +}; +use cb_common::{ + constants::APPLICATION_BUILDER_DOMAIN, + pbs::{ + EMPTY_TX_ROOT_HASH, ForkName, ForkVersionDecode, GetHeaderInfo, GetHeaderResponse, + SignedBuilderBid, + error::{PbsError, ValidationError}, + }, + signature::verify_signed_message, + types::{BlsPublicKey, BlsPublicKeyBytes, BlsSignature, Chain}, + utils::timestamp_of_slot_start_sec, +}; +use parking_lot::RwLock; +use serde::Deserialize; +use tracing::{debug, error}; +use tree_hash::TreeHash; +use url::Url; + +use crate::utils::check_gas_limit; + +/// Fetch the parent block from the RPC URL for extra validation of the header. +/// Extra validation will be skipped if: +/// - relay returns header before parent block is fetched +/// - parent block is not found, eg because of a RPC delay +pub async fn fetch_parent_block( + rpc_url: Url, + parent_hash: B256, + parent_block: Arc>>, +) { + let provider = alloy::providers::ProviderBuilder::new().connect_http(rpc_url).to_owned(); + + debug!(%parent_hash, "fetching parent block"); + + match provider.get_block_by_hash(parent_hash).await { + Ok(maybe_block) => { + debug!(block_found = maybe_block.is_some(), "fetched parent block"); + let mut guard = parent_block.write(); + *guard = maybe_block; + } + Err(err) => { + error!(%err, "fetch failed"); + } + } +} + +pub struct HeaderData { + pub block_hash: B256, + pub parent_hash: B256, + pub tx_root: B256, + pub value: U256, + pub timestamp: u64, +} + +pub fn validate_header_data( + header_data: &HeaderData, + chain: Chain, + expected_parent_hash: B256, + minimum_bid_wei: U256, + slot: u64, +) -> Result<(), ValidationError> { + if header_data.block_hash == B256::ZERO { + return Err(ValidationError::EmptyBlockhash); + } + + if expected_parent_hash != header_data.parent_hash { + return Err(ValidationError::ParentHashMismatch { + expected: expected_parent_hash, + got: header_data.parent_hash, + }); + } + + if header_data.tx_root == EMPTY_TX_ROOT_HASH { + return Err(ValidationError::EmptyTxRoot); + } + + if header_data.value < minimum_bid_wei { + return Err(ValidationError::BidTooLow { min: minimum_bid_wei, got: header_data.value }); + } + + let expected_timestamp = timestamp_of_slot_start_sec(slot, chain); + if expected_timestamp != header_data.timestamp { + return Err(ValidationError::TimestampMismatch { + expected: expected_timestamp, + got: header_data.timestamp, + }); + } + + Ok(()) +} + +pub fn validate_signature( + chain: Chain, + expected_relay_pubkey: &BlsPublicKey, + received_relay_pubkey: &BlsPublicKeyBytes, + message: &T, + signature: &BlsSignature, +) -> Result<(), ValidationError> { + if expected_relay_pubkey.serialize() != received_relay_pubkey.as_serialized() { + return Err(ValidationError::PubkeyMismatch { + expected: BlsPublicKeyBytes::from(expected_relay_pubkey), + got: *received_relay_pubkey, + }); + } + + if !verify_signed_message( + chain, + expected_relay_pubkey, + &message, + signature, + None, + &B32::from(APPLICATION_BUILDER_DOMAIN), + ) { + return Err(ValidationError::Sigverify); + } + + Ok(()) +} + +pub fn extra_validation( + parent_block: &Block, + signed_header: &GetHeaderResponse, +) -> Result<(), ValidationError> { + if signed_header.block_number() != parent_block.header.number + 1 { + return Err(ValidationError::BlockNumberMismatch { + parent: parent_block.header.number, + header: signed_header.block_number(), + }); + } + + if !check_gas_limit(signed_header.gas_limit(), parent_block.header.gas_limit) { + return Err(ValidationError::GasLimit { + parent: parent_block.header.gas_limit, + header: signed_header.gas_limit(), + }); + }; + + Ok(()) +} + +pub fn decode_json_payload(response_bytes: &[u8]) -> Result { + match serde_json::from_slice::(response_bytes) { + Ok(parsed) => Ok(parsed), + Err(err) => Err(PbsError::JsonDecode { + err, + raw: String::from_utf8_lossy(response_bytes).into_owned(), + }), + } +} + +pub fn get_light_info_from_json(response_bytes: &[u8]) -> Result<(ForkName, U256), PbsError> { + #[derive(Deserialize)] + struct LightBuilderBid { + #[serde(with = "serde_utils::quoted_u256")] + pub value: U256, + } + + #[derive(Deserialize)] + struct LightSignedBuilderBid { + pub message: LightBuilderBid, + } + + #[derive(Deserialize)] + struct LightHeaderResponse { + version: ForkName, + data: LightSignedBuilderBid, + } + + match serde_json::from_slice::(response_bytes) { + Ok(parsed) => Ok((parsed.version, parsed.data.message.value)), + Err(err) => Err(PbsError::JsonDecode { + err, + raw: String::from_utf8_lossy(response_bytes).into_owned(), + }), + } +} + +pub fn decode_ssz_payload( + response_bytes: &[u8], + fork: ForkName, +) -> Result { + let data = SignedBuilderBid::from_ssz_bytes_by_fork(response_bytes, fork).map_err(|e| { + PbsError::RelayResponse { + error_msg: (format!("error decoding relay payload: {e:?}")).to_string(), + code: 200, + } + })?; + Ok(GetHeaderResponse { version: fork, data, metadata: Default::default() }) +} + +#[cfg(test)] +mod tests { + use std::{fs, path::Path}; + + use alloy::primitives::{B256, U256}; + use cb_common::{ + pbs::*, + signature::sign_builder_message, + types::{BlsPublicKeyBytes, BlsSecretKey, BlsSignature, Chain}, + utils::{ + TestRandomSeed, get_bid_value_from_signed_builder_bid_ssz, timestamp_of_slot_start_sec, + }, + }; + use ssz::Encode; + + use super::{validate_header_data, *}; + + #[test] + fn test_validate_header() { + let slot = 5; + let parent_hash = B256::from_slice(&[1; 32]); + let chain = Chain::Holesky; + let min_bid = U256::from(10); + + let mut mock_header_data = HeaderData { + block_hash: B256::default(), + parent_hash: B256::default(), + tx_root: EMPTY_TX_ROOT_HASH, + value: U256::default(), + timestamp: 0, + }; + + assert_eq!( + validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), + Err(ValidationError::EmptyBlockhash) + ); + + mock_header_data.block_hash.0[1] = 1; + + assert_eq!( + validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), + Err(ValidationError::ParentHashMismatch { + expected: parent_hash, + got: B256::default() + }) + ); + + mock_header_data.parent_hash = parent_hash; + + assert_eq!( + validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), + Err(ValidationError::EmptyTxRoot) + ); + + mock_header_data.tx_root = Default::default(); + + assert_eq!( + validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), + Err(ValidationError::BidTooLow { min: min_bid, got: U256::ZERO }) + ); + + mock_header_data.value = U256::from(11); + + let expected = timestamp_of_slot_start_sec(slot, chain); + assert_eq!( + validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot,), + Err(ValidationError::TimestampMismatch { expected, got: 0 }) + ); + + mock_header_data.timestamp = expected; + + assert!(validate_header_data(&mock_header_data, chain, parent_hash, min_bid, slot).is_ok()); + } + + #[test] + fn test_validate_signature() { + let secret_key = BlsSecretKey::test_random(); + let pubkey = secret_key.public_key(); + let wrong_pubkey = BlsPublicKeyBytes::test_random(); + let wrong_signature = BlsSignature::test_random(); + + let message = B256::random(); + + let signature = sign_builder_message(Chain::Holesky, &secret_key, &message); + + assert_eq!( + validate_signature(Chain::Holesky, &pubkey, &wrong_pubkey, &message, &wrong_signature), + Err(ValidationError::PubkeyMismatch { + expected: BlsPublicKeyBytes::from(&pubkey), + got: wrong_pubkey + }) + ); + + assert!(matches!( + validate_signature( + Chain::Holesky, + &pubkey, + &BlsPublicKeyBytes::from(&pubkey), + &message, + &wrong_signature + ), + Err(ValidationError::Sigverify) + )); + + assert!( + validate_signature( + Chain::Holesky, + &pubkey, + &BlsPublicKeyBytes::from(&pubkey), + &message, + &signature + ) + .is_ok() + ); + } + + #[test] + fn test_ssz_value_extraction() { + for fork_name in ForkName::list_all() { + match fork_name { + // Handle forks that didn't have builder bids yet + ForkName::Altair | ForkName::Base => continue, + + // Handle supported forks + ForkName::Bellatrix | + ForkName::Capella | + ForkName::Deneb | + ForkName::Electra | + ForkName::Fulu => {} + + // Skip unsupported forks + ForkName::Gloas => continue, + } + + // Load get_header JSON from test data + let fork_name_str = fork_name.to_string().to_lowercase(); + let path_str = format!("../../tests/data/get_header/{fork_name_str}.json"); + let path = Path::new(path_str.as_str()); + let json_bytes = fs::read(path).expect("file not found"); + let decoded = decode_json_payload(&json_bytes).expect("failed to decode JSON"); + + // Extract the bid value from the SSZ + let encoded = decoded.data.as_ssz_bytes(); + let bid_value = get_bid_value_from_signed_builder_bid_ssz(&encoded, fork_name) + .expect("failed to extract bid value from SSZ"); + + // Compare to the original value + println!("Testing fork: {}", fork_name); + println!("Original value: {}", decoded.value()); + println!("Extracted value: {}", bid_value); + assert_eq!(*decoded.value(), bid_value); + } + } +} diff --git a/crates/pbs/src/routes/mod.rs b/crates/pbs/src/routes/mod.rs index 84853d9e..5bcb1b73 100644 --- a/crates/pbs/src/routes/mod.rs +++ b/crates/pbs/src/routes/mod.rs @@ -5,8 +5,90 @@ mod router; mod status; mod submit_block; +use std::time::Duration; + +use alloy::primitives::U256; +use cb_common::{ + pbs::{GetHeaderResponse, SubmitBlindedBlockResponse}, + utils::EncodingType, +}; +pub use get_header::get_header; use get_header::handle_get_header; +use lh_types::ForkName; use register_validator::handle_register_validator; pub use router::create_app_router; use status::handle_get_status; use submit_block::handle_submit_block_v1; + +use crate::metrics::{RELAY_LATENCY, RELAY_STATUS_CODE}; + +/// Records the HTTP status code and request latency metrics for a relay +/// endpoint interaction. +pub(crate) fn record_relay_metrics( + endpoint: &str, + relay_id: &str, + code: reqwest::StatusCode, + latency: Duration, +) { + RELAY_STATUS_CODE.with_label_values(&[code.as_str(), endpoint, relay_id]).inc(); + RELAY_LATENCY.with_label_values(&[endpoint, relay_id]).observe(latency.as_secs_f64()); +} + +/// Enum that handles different GetHeader response types based on the level of +/// validation required +pub enum CompoundGetHeaderResponse { + /// Standard response type, fully parsing the response from a relay into a + /// complete response struct + Full(Box), + + /// Light response type, only extracting the fork and value from the builder + /// bid with the entire (undecoded) payload for forwarding + Light(LightGetHeaderResponse), +} + +/// Core details of a GetHeaderResponse, used for light processing when +/// validation mode is set to none. +#[derive(Clone)] +pub struct LightGetHeaderResponse { + /// The fork name for the bid + pub version: ForkName, + + /// The bid value in wei + pub value: U256, + + /// The raw bytes of the response, for forwarding to the caller + pub raw_bytes: Vec, + + /// The format the response bytes are encoded with + pub encoding_type: EncodingType, +} + +/// Enum that handles different SubmitBlock response types based on the level of +/// validation required +pub enum CompoundSubmitBlockResponse { + /// Standard response type, fully parsing the response from a relay into a + /// complete response struct + Full(Box), + + /// Light response type, only extracting the fork from the response with the + /// entire (undecoded) payload for forwarding + Light(LightSubmitBlockResponse), + + /// Response with no body, used for v2 requests when the relay does not + /// return any content intentionally + EmptyBody, +} + +/// Core details of a SubmitBlockResponse, used for light processing when +/// validation mode is set to none. +#[derive(Clone, Debug)] +pub struct LightSubmitBlockResponse { + /// The fork name for the bid + pub version: ForkName, + + /// The raw bytes of the response, for forwarding to the caller + pub raw_bytes: Vec, + + /// The format the response bytes are encoded with + pub encoding_type: EncodingType, +} diff --git a/crates/pbs/src/routes/register_validator.rs b/crates/pbs/src/routes/register_validator.rs index 51c8ce6e..5e854a26 100644 --- a/crates/pbs/src/routes/register_validator.rs +++ b/crates/pbs/src/routes/register_validator.rs @@ -1,18 +1,37 @@ -use axum::{Json, extract::State, http::HeaderMap, response::IntoResponse}; -use cb_common::utils::get_user_agent; -use reqwest::StatusCode; -use tracing::{error, info, trace}; +use std::time::{Duration, Instant}; + +use alloy::primitives::Bytes; +use axum::{ + Json, + extract::State, + http::{HeaderMap, HeaderValue}, + response::IntoResponse, +}; +use cb_common::{ + pbs::{HEADER_START_TIME_UNIX_MS, RelayClient, error::PbsError}, + utils::{get_user_agent, get_user_agent_with_version, read_chunked_body_with_max, utcnow_ms}, +}; +use eyre::bail; +use futures::{ + FutureExt, + future::{join_all, select_ok}, +}; +use reqwest::{ + StatusCode, + header::{CONTENT_TYPE, USER_AGENT}, +}; +use tracing::{Instrument, debug, error, info, trace}; +use url::Url; use crate::{ - api::BuilderApi, - constants::REGISTER_VALIDATOR_ENDPOINT_TAG, + constants::{MAX_SIZE_DEFAULT, REGISTER_VALIDATOR_ENDPOINT_TAG, TIMEOUT_ERROR_CODE_STR}, error::PbsClientError, - metrics::BEACON_NODE_STATUS, - state::{BuilderApiState, PbsStateGuard}, + metrics::{BEACON_NODE_STATUS, RELAY_STATUS_CODE}, + state::{PbsState, PbsStateGuard}, }; -pub async fn handle_register_validator>( - State(state): State>, +pub async fn handle_register_validator( + State(state): State, req_headers: HeaderMap, Json(registrations): Json>, ) -> Result { @@ -24,7 +43,7 @@ pub async fn handle_register_validator>( info!(ua, num_registrations = registrations.len(), "new request"); - if let Err(err) = A::register_validator(registrations, req_headers, state).await { + if let Err(err) = register_validator(registrations, req_headers, state).await { error!(%err, "all relays failed registration"); let err = PbsClientError::NoResponse; @@ -39,3 +58,188 @@ pub async fn handle_register_validator>( Ok(StatusCode::OK) } } + +// ── Relay logic ────────────────────────────────────────────────────────────── + +/// Implements https://ethereum.github.io/builder-specs/#/Builder/registerValidator +/// Returns 200 if at least one relay returns 200, else 503 +async fn register_validator( + registrations: Vec, + req_headers: HeaderMap, + state: PbsState, +) -> eyre::Result<()> { + // prepare headers + let mut send_headers = HeaderMap::new(); + send_headers + .insert(HEADER_START_TIME_UNIX_MS, HeaderValue::from_str(&utcnow_ms().to_string())?); + send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); + + // prepare the body in advance, ugly dyn + let bodies: Box> = + if let Some(batch_size) = state.config.pbs_config.validator_registration_batch_size { + Box::new(registrations.chunks(batch_size).map(|batch| { + // SAFETY: unwrap is ok because we're serializing a &[serde_json::Value] + let body = serde_json::to_vec(batch).unwrap(); + (batch.len(), Bytes::from(body)) + })) + } else { + let body = serde_json::to_vec(®istrations).unwrap(); + Box::new(std::iter::once((registrations.len(), Bytes::from(body)))) + }; + send_headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); + + let mut handles = Vec::with_capacity(state.all_relays().len()); + + for (n_regs, body) in bodies { + for relay in state.all_relays().iter().cloned() { + handles.push( + tokio::spawn( + send_register_validator_with_timeout( + n_regs, + body.clone(), + relay, + send_headers.clone(), + state.pbs_config().timeout_register_validator_ms, + state.pbs_config().register_validator_retry_limit, + ) + .in_current_span(), + ) + .map(|join_result| match join_result { + Ok(res) => res, + Err(err) => Err(PbsError::TokioJoinError(err)), + }), + ); + } + } + + if state.pbs_config().wait_all_registrations { + // wait for all relays registrations to complete + let results = join_all(handles).await; + if results.into_iter().any(|res| res.is_ok()) { + Ok(()) + } else { + bail!("No relay passed register_validator successfully") + } + } else { + // return once first completes, others proceed in background + let result = select_ok(handles).await; + match result { + Ok(_) => Ok(()), + Err(_) => bail!("No relay passed register_validator successfully"), + } + } +} + +/// Register validator to relay, retry connection errors until the +/// given timeout has passed +async fn send_register_validator_with_timeout( + n_regs: usize, + body: Bytes, + relay: RelayClient, + headers: HeaderMap, + timeout_ms: u64, + retry_limit: u32, +) -> Result<(), PbsError> { + let url = relay.register_validator_url()?; + let mut remaining_timeout_ms = timeout_ms; + let mut retry = 0; + let mut backoff = Duration::from_millis(250); + + loop { + let start_request = Instant::now(); + match send_register_validator( + url.clone(), + n_regs, + body.clone(), + &relay, + headers.clone(), + remaining_timeout_ms, + retry, + ) + .await + { + Ok(_) => return Ok(()), + + Err(err) if err.should_retry() => { + retry += 1; + if retry >= retry_limit { + error!( + relay_id = relay.id.as_str(), + retry, "reached retry limit for validator registration" + ); + return Err(err); + } + tokio::time::sleep(backoff).await; + backoff += Duration::from_millis(250); + + remaining_timeout_ms = + timeout_ms.saturating_sub(start_request.elapsed().as_millis() as u64); + + if remaining_timeout_ms == 0 { + return Err(err); + } + } + + Err(err) => return Err(err), + }; + } +} + +async fn send_register_validator( + url: Url, + n_regs: usize, + body: Bytes, + relay: &RelayClient, + headers: HeaderMap, + timeout_ms: u64, + retry: u32, +) -> Result<(), PbsError> { + let start_request = Instant::now(); + let res = match relay + .client + .post(url) + .timeout(Duration::from_millis(timeout_ms)) + .headers(headers) + .body(body.0) + .send() + .await + { + Ok(res) => res, + Err(err) => { + RELAY_STATUS_CODE + .with_label_values(&[ + TIMEOUT_ERROR_CODE_STR, + REGISTER_VALIDATOR_ENDPOINT_TAG, + &relay.id, + ]) + .inc(); + return Err(err.into()); + } + }; + let request_latency = start_request.elapsed(); + let code = res.status(); + super::record_relay_metrics(REGISTER_VALIDATOR_ENDPOINT_TAG, &relay.id, code, request_latency); + + if !code.is_success() { + let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_DEFAULT).await?; + let err = PbsError::RelayResponse { + error_msg: String::from_utf8_lossy(&response_bytes).into_owned(), + code: code.as_u16(), + }; + + // error here since we check if any success above + error!(relay_id = relay.id.as_ref(), retry, %err, "failed registration"); + return Err(err); + }; + + debug!( + relay_id = relay.id.as_ref(), + retry, + ?code, + latency = ?request_latency, + num_registrations = n_regs, + "registration successful" + ); + + Ok(()) +} diff --git a/crates/pbs/src/routes/reload.rs b/crates/pbs/src/routes/reload.rs index aa031d47..d7c03172 100644 --- a/crates/pbs/src/routes/reload.rs +++ b/crates/pbs/src/routes/reload.rs @@ -1,18 +1,18 @@ use axum::{extract::State, http::HeaderMap, response::IntoResponse}; -use cb_common::utils::get_user_agent; +use cb_common::{config::load_pbs_config, utils::get_user_agent}; use reqwest::StatusCode; -use tracing::{error, info}; +use tracing::{error, info, warn}; use crate::{ - BuilderApi, RELOAD_ENDPOINT_TAG, + RELOAD_ENDPOINT_TAG, error::PbsClientError, metrics::BEACON_NODE_STATUS, - state::{BuilderApiState, PbsStateGuard}, + state::{PbsState, PbsStateGuard}, }; -pub async fn handle_reload>( +pub async fn handle_reload( req_headers: HeaderMap, - State(state): State>, + State(state): State, ) -> Result { let prev_state = state.read().clone(); @@ -20,7 +20,7 @@ pub async fn handle_reload>( info!(ua, relay_check = prev_state.config.pbs_config.relay_check); - match A::reload(prev_state).await { + match reload(prev_state).await { Ok(new_state) => { info!("config reload successful"); @@ -40,3 +40,28 @@ pub async fn handle_reload>( } } } + +// ── Relay logic ────────────────────────────────────────────────────────────── + +/// Reload the PBS state with the latest configuration in the config file +/// Returns 200 if successful or 500 if failed +async fn reload(state: PbsState) -> eyre::Result { + let (pbs_config, config_path) = load_pbs_config(None).await?; + let new_state = PbsState::new(pbs_config, config_path); + + if state.config.pbs_config.host != new_state.config.pbs_config.host { + warn!( + "Host change for PBS module require a full restart. Old: {}, New: {}", + state.config.pbs_config.host, new_state.config.pbs_config.host + ); + } + + if state.config.pbs_config.port != new_state.config.pbs_config.port { + warn!( + "Port change for PBS module require a full restart. Old: {}, New: {}", + state.config.pbs_config.port, new_state.config.pbs_config.port + ); + } + + Ok(new_state) +} diff --git a/crates/pbs/src/routes/router.rs b/crates/pbs/src/routes/router.rs index e98c89c1..a92da735 100644 --- a/crates/pbs/src/routes/router.rs +++ b/crates/pbs/src/routes/router.rs @@ -21,37 +21,35 @@ use super::{ }; use crate::{ MAX_SIZE_REGISTER_VALIDATOR_REQUEST, MAX_SIZE_SUBMIT_BLOCK_RESPONSE, - api::BuilderApi, - routes::submit_block::handle_submit_block_v2, - state::{BuilderApiState, PbsStateGuard}, + routes::submit_block::handle_submit_block_v2, state::PbsStateGuard, }; -pub fn create_app_router>(state: PbsStateGuard) -> Router { +pub fn create_app_router(state: PbsStateGuard) -> Router { // DefaultBodyLimit is 2Mib by default, so we only increase it for a few routes // that may need more let v1_builder_routes = Router::new() - .route(GET_HEADER_PATH, get(handle_get_header::)) - .route(GET_STATUS_PATH, get(handle_get_status::)) + .route(GET_HEADER_PATH, get(handle_get_header)) + .route(GET_STATUS_PATH, get(handle_get_status)) .route( REGISTER_VALIDATOR_PATH, - post(handle_register_validator::) + post(handle_register_validator) .route_layer(DefaultBodyLimit::max(MAX_SIZE_REGISTER_VALIDATOR_REQUEST)), ) .route( SUBMIT_BLOCK_PATH, - post(handle_submit_block_v1::) + post(handle_submit_block_v1) .route_layer(DefaultBodyLimit::max(MAX_SIZE_SUBMIT_BLOCK_RESPONSE)), ); // header is smaller than the response but err on the safe side let v2_builder_routes = Router::new().route( SUBMIT_BLOCK_PATH, - post(handle_submit_block_v2::) + post(handle_submit_block_v2) .route_layer(DefaultBodyLimit::max(MAX_SIZE_SUBMIT_BLOCK_RESPONSE)), ); let v1_builder_router = Router::new().nest(BUILDER_V1_API_PATH, v1_builder_routes); let v2_builder_router = Router::new().nest(BUILDER_V2_API_PATH, v2_builder_routes); - let reload_router = Router::new().route(RELOAD_PATH, post(handle_reload::)); - let builder_api = + let reload_router = Router::new().route(RELOAD_PATH, post(handle_reload)); + let app = Router::new().merge(v1_builder_router).merge(v2_builder_router).merge(reload_router).layer( TraceLayer::new_for_http().on_response( |response: &Response, latency: std::time::Duration, _: &tracing::Span| { @@ -60,12 +58,6 @@ pub fn create_app_router>(state: PbsStateGu ), ); - let app = if let Some(extra_routes) = A::extra_routes() { - builder_api.merge(extra_routes) - } else { - builder_api - }; - app.layer(middleware::from_fn(tracing_middleware)).with_state(state) } diff --git a/crates/pbs/src/routes/status.rs b/crates/pbs/src/routes/status.rs index 52fd3e2f..4dc679b8 100644 --- a/crates/pbs/src/routes/status.rs +++ b/crates/pbs/src/routes/status.rs @@ -1,19 +1,24 @@ +use std::time::{Duration, Instant}; + use axum::{extract::State, http::HeaderMap, response::IntoResponse}; -use cb_common::utils::get_user_agent; -use reqwest::StatusCode; -use tracing::{error, info}; +use cb_common::{ + pbs::{RelayClient, error::PbsError}, + utils::{get_user_agent, get_user_agent_with_version, read_chunked_body_with_max}, +}; +use futures::future::select_ok; +use reqwest::{StatusCode, header::USER_AGENT}; +use tracing::{debug, error, info}; use crate::{ - api::BuilderApi, - constants::STATUS_ENDPOINT_TAG, + constants::{MAX_SIZE_DEFAULT, STATUS_ENDPOINT_TAG, TIMEOUT_ERROR_CODE_STR}, error::PbsClientError, - metrics::BEACON_NODE_STATUS, - state::{BuilderApiState, PbsStateGuard}, + metrics::{BEACON_NODE_STATUS, RELAY_STATUS_CODE}, + state::{PbsState, PbsStateGuard}, }; -pub async fn handle_get_status>( +pub async fn handle_get_status( req_headers: HeaderMap, - State(state): State>, + State(state): State, ) -> Result { let state = state.read().clone(); @@ -21,7 +26,7 @@ pub async fn handle_get_status>( info!(ua, relay_check = state.config.pbs_config.relay_check, "new request"); - match A::get_status(req_headers, state).await { + match get_status(req_headers, state).await { Ok(_) => { info!("relay check successful"); @@ -39,3 +44,72 @@ pub async fn handle_get_status>( } } } + +// ── Relay logic ────────────────────────────────────────────────────────────── + +/// Implements https://ethereum.github.io/builder-specs/#/Builder/status +/// Broadcasts a status check to all relays and returns 200 if at least one +/// relay returns 200 +async fn get_status(req_headers: HeaderMap, state: PbsState) -> eyre::Result<()> { + // If no relay check, return early + if !state.config.pbs_config.relay_check { + Ok(()) + } else { + // prepare headers + let mut send_headers = HeaderMap::new(); + send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); + + let relays = state.all_relays(); + let mut handles = Vec::with_capacity(relays.len()); + for relay in relays { + handles.push(Box::pin(send_relay_check(relay, send_headers.clone()))); + } + + // return ok if at least one relay returns 200 + let results = select_ok(handles).await; + match results { + Ok(_) => Ok(()), + Err(err) => Err(err.into()), + } + } +} + +async fn send_relay_check(relay: &RelayClient, headers: HeaderMap) -> Result<(), PbsError> { + let url = relay.get_status_url()?; + + let start_request = Instant::now(); + let res = match relay + .client + .get(url) + .timeout(Duration::from_secs(30)) + .headers(headers) + .send() + .await + { + Ok(res) => res, + Err(err) => { + RELAY_STATUS_CODE + .with_label_values(&[TIMEOUT_ERROR_CODE_STR, STATUS_ENDPOINT_TAG, &relay.id]) + .inc(); + return Err(err.into()); + } + }; + let request_latency = start_request.elapsed(); + let code = res.status(); + super::record_relay_metrics(STATUS_ENDPOINT_TAG, &relay.id, code, request_latency); + + if !code.is_success() { + let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_DEFAULT).await?; + let err = PbsError::RelayResponse { + error_msg: String::from_utf8_lossy(&response_bytes).into_owned(), + code: code.as_u16(), + }; + + error!(relay_id = relay.id.as_ref(),%err, "status failed"); + return Err(err); + }; + + debug!(relay_id = relay.id.as_ref(),?code, latency = ?request_latency, "status passed"); + + Ok(()) +} diff --git a/crates/pbs/src/routes/submit_block.rs b/crates/pbs/src/routes/submit_block/mod.rs similarity index 59% rename from crates/pbs/src/routes/submit_block.rs rename to crates/pbs/src/routes/submit_block/mod.rs index 1cf442e0..77adae77 100644 --- a/crates/pbs/src/routes/submit_block.rs +++ b/crates/pbs/src/routes/submit_block/mod.rs @@ -1,4 +1,7 @@ -use std::sync::Arc; +mod relay; +mod validation; + +use std::{collections::HashSet, sync::Arc}; use axum::{ extract::State, @@ -6,45 +9,53 @@ use axum::{ response::IntoResponse, }; use cb_common::{ - pbs::{BuilderApiVersion, GetPayloadInfo}, + config::BlockValidationMode, + pbs::{ + BuilderApiVersion, GetPayloadInfo, HEADER_START_TIME_UNIX_MS, SignedBlindedBeaconBlock, + error::PbsError, + }, utils::{ - CONSENSUS_VERSION_HEADER, EncodingType, RawRequest, deserialize_body, get_accept_types, - get_user_agent, timestamp_of_slot_start_millis, utcnow_ms, + CONSENSUS_VERSION_HEADER, EncodingType, deserialize_body, get_accept_types, get_user_agent, + get_user_agent_with_version, timestamp_of_slot_start_millis, utcnow_ms, }, }; -use reqwest::{StatusCode, header::CONTENT_TYPE}; +use futures::{FutureExt, future::select_ok}; +use relay::{ProposalInfo, submit_block_with_timeout}; +use reqwest::{ + StatusCode, + header::{ACCEPT, CONTENT_TYPE, USER_AGENT}, +}; use ssz::Encode; -use tracing::{error, info, trace}; +use tracing::{debug, error, info, trace}; +use super::CompoundSubmitBlockResponse; use crate::{ - CompoundSubmitBlockResponse, - api::BuilderApi, constants::SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, error::PbsClientError, metrics::BEACON_NODE_STATUS, - state::{BuilderApiState, PbsStateGuard}, + state::{PbsState, PbsStateGuard}, }; -pub async fn handle_submit_block_v1>( - state: State>, +pub async fn handle_submit_block_v1( + state: State, req_headers: HeaderMap, - raw_request: RawRequest, + raw_request: cb_common::utils::RawRequest, ) -> Result { - handle_submit_block_impl::(state, req_headers, raw_request, BuilderApiVersion::V1).await + handle_submit_block_impl(state, req_headers, raw_request, BuilderApiVersion::V1).await } -pub async fn handle_submit_block_v2>( - state: State>, +pub async fn handle_submit_block_v2( + state: State, req_headers: HeaderMap, - raw_request: RawRequest, + raw_request: cb_common::utils::RawRequest, ) -> Result { - handle_submit_block_impl::(state, req_headers, raw_request, BuilderApiVersion::V2).await + handle_submit_block_impl(state, req_headers, raw_request, BuilderApiVersion::V2).await } -async fn handle_submit_block_impl>( - State(state): State>, +async fn handle_submit_block_impl( + State(state): State, req_headers: HeaderMap, - raw_request: RawRequest, + raw_request: cb_common::utils::RawRequest, api_version: BuilderApiVersion, ) -> Result { let signed_blinded_block = @@ -72,8 +83,7 @@ async fn handle_submit_block_impl>( info!(ua, ms_into_slot = now.saturating_sub(slot_start_ms), "new request"); - match A::submit_block(signed_blinded_block, req_headers, state, api_version, accept_types).await - { + match submit_block(signed_blinded_block, req_headers, state, api_version, accept_types).await { Ok(res) => match res { crate::CompoundSubmitBlockResponse::EmptyBody => { info!("received unblinded block (v2)"); @@ -162,3 +172,67 @@ async fn handle_submit_block_impl>( } } } + +// ── Relay logic ────────────────────────────────────────────────────────────── + +/// Implements https://ethereum.github.io/builder-specs/#/Builder/submitBlindedBlock and +/// https://ethereum.github.io/builder-specs/#/Builder/submitBlindedBlockV2. Use `api_version` to +/// distinguish between the two. +pub(crate) async fn submit_block( + signed_blinded_block: Arc, + req_headers: HeaderMap, + state: PbsState, + api_version: BuilderApiVersion, + accepted_types: HashSet, +) -> eyre::Result { + debug!(?req_headers, "received headers"); + + // prepare headers + let mut send_headers = HeaderMap::new(); + send_headers.insert(HEADER_START_TIME_UNIX_MS, HeaderValue::from(utcnow_ms())); + send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); + + // Create the Accept headers for requests + let mode = state.pbs_config().block_validation_mode; + let accept_types_str = match mode { + BlockValidationMode::None => { + // No validation mode, so only request what the user wants because the response + // will be forwarded directly + accepted_types.iter().map(|t| t.content_type()).collect::>().join(",") + } + _ => { + // We're unpacking the body, so request both types since we can handle both + [EncodingType::Ssz.content_type(), EncodingType::Json.content_type()].join(",") + } + }; + send_headers.insert(ACCEPT, HeaderValue::from_str(&accept_types_str).unwrap()); + + // Send requests to all relays concurrently + let proposal_info = Arc::new(ProposalInfo { + signed_blinded_block, + headers: Arc::new(send_headers), + api_version, + validation_mode: mode, + accepted_types, + }); + let mut handles = Vec::with_capacity(state.all_relays().len()); + for relay in state.all_relays().iter() { + handles.push( + tokio::spawn(submit_block_with_timeout( + proposal_info.clone(), + relay.clone(), + state.pbs_config().timeout_get_payload_ms, + )) + .map(|join_result| match join_result { + Ok(res) => res, + Err(err) => Err(PbsError::TokioJoinError(err)), + }), + ); + } + + let results = select_ok(handles).await; + match results { + Ok((res, _)) => Ok(res), + Err(err) => Err(err.into()), + } +} diff --git a/crates/pbs/src/mev_boost/submit_block.rs b/crates/pbs/src/routes/submit_block/relay.rs similarity index 65% rename from crates/pbs/src/mev_boost/submit_block.rs rename to crates/pbs/src/routes/submit_block/relay.rs index 11e0e289..d0a0ae23 100644 --- a/crates/pbs/src/mev_boost/submit_block.rs +++ b/crates/pbs/src/routes/submit_block/relay.rs @@ -4,143 +4,76 @@ use std::{ time::{Duration, Instant}, }; -use alloy::{eips::eip7594::CELLS_PER_EXT_BLOB, primitives::B256}; -use axum::http::{HeaderMap, HeaderValue}; +use axum::http::HeaderMap; use cb_common::{ config::BlockValidationMode, pbs::{ - BlindedBeaconBlock, BlobsBundle, BuilderApiVersion, ForkName, ForkVersionDecode, - HEADER_START_TIME_UNIX_MS, KzgCommitments, PayloadAndBlobs, RelayClient, - SignedBlindedBeaconBlock, SubmitBlindedBlockResponse, + BlindedBeaconBlock, BuilderApiVersion, ForkName, RelayClient, SignedBlindedBeaconBlock, + SubmitBlindedBlockResponse, error::{PbsError, ValidationError}, }, utils::{ CONSENSUS_VERSION_HEADER, EncodingType, get_consensus_version_header, - get_user_agent_with_version, read_chunked_body_with_max, utcnow_ms, + read_chunked_body_with_max, }, }; -use futures::{FutureExt, future::select_ok}; -use reqwest::{ - StatusCode, - header::{ACCEPT, CONTENT_TYPE, USER_AGENT}, -}; -use serde::Deserialize; +use reqwest::{StatusCode, header::CONTENT_TYPE}; use ssz::Encode; use tracing::{debug, warn}; use url::Url; +use super::validation::{ + decode_json_payload, decode_ssz_payload, get_light_info_from_json, validate_unblinded_block, +}; use crate::{ CompoundSubmitBlockResponse, LightSubmitBlockResponse, TIMEOUT_ERROR_CODE_STR, constants::{MAX_SIZE_SUBMIT_BLOCK_RESPONSE, SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG}, - metrics::{RELAY_LATENCY, RELAY_STATUS_CODE}, - state::{BuilderApiState, PbsState}, + metrics::RELAY_STATUS_CODE, }; /// Info about a proposal submission request. /// Sent from submit_block to the submit_block_with_timeout function. #[derive(Clone)] -struct ProposalInfo { +pub struct ProposalInfo { /// The signed blinded block to submit - signed_blinded_block: Arc, + pub signed_blinded_block: Arc, /// Common baseline of headers to send with each request - headers: Arc, + pub headers: Arc, /// The version of the submit_block route being used - api_version: BuilderApiVersion, + pub api_version: BuilderApiVersion, /// How to validate the block returned by the relay - validation_mode: BlockValidationMode, + pub validation_mode: BlockValidationMode, /// The accepted encoding types from the original request - accepted_types: HashSet, + pub accepted_types: HashSet, } -/// Used interally to provide info and context about a submit_block request and +/// Used internally to provide info and context about a submit_block request and /// its response -struct SubmitBlockResponseInfo { +pub struct SubmitBlockResponseInfo { /// The raw body of the response - response_bytes: Vec, + pub response_bytes: Vec, /// The content type the response is encoded with - content_type: EncodingType, + pub content_type: EncodingType, /// Which fork the response bid is for (if provided as a header, rather than /// part of the body) - fork: Option, + pub fork: Option, /// The status code of the response, for logging - code: StatusCode, + pub code: StatusCode, /// The round-trip latency of the request - request_latency: Duration, -} - -/// Implements https://ethereum.github.io/builder-specs/#/Builder/submitBlindedBlock and -/// https://ethereum.github.io/builder-specs/#/Builder/submitBlindedBlockV2. Use `api_version` to -/// distinguish between the two. -pub async fn submit_block( - signed_blinded_block: Arc, - req_headers: HeaderMap, - state: PbsState, - api_version: BuilderApiVersion, - accepted_types: HashSet, -) -> eyre::Result { - debug!(?req_headers, "received headers"); - - // prepare headers - let mut send_headers = HeaderMap::new(); - send_headers.insert(HEADER_START_TIME_UNIX_MS, HeaderValue::from(utcnow_ms())); - send_headers.insert(USER_AGENT, get_user_agent_with_version(&req_headers)?); - - // Create the Accept headers for requests - let mode = state.pbs_config().block_validation_mode; - let accept_types = match mode { - BlockValidationMode::None => { - // No validation mode, so only request what the user wants because the response - // will be forwarded directly - accepted_types.iter().map(|t| t.content_type()).collect::>().join(",") - } - _ => { - // We're unpacking the body, so request both types since we can handle both - [EncodingType::Ssz.content_type(), EncodingType::Json.content_type()].join(",") - } - }; - send_headers.insert(ACCEPT, HeaderValue::from_str(&accept_types).unwrap()); - - // Send requests to all relays concurrently - let proposal_info = Arc::new(ProposalInfo { - signed_blinded_block, - headers: Arc::new(send_headers), - api_version, - validation_mode: mode, - accepted_types, - }); - let mut handles = Vec::with_capacity(state.all_relays().len()); - for relay in state.all_relays().iter() { - handles.push( - tokio::spawn(submit_block_with_timeout( - proposal_info.clone(), - relay.clone(), - state.pbs_config().timeout_get_payload_ms, - )) - .map(|join_result| match join_result { - Ok(res) => res, - Err(err) => Err(PbsError::TokioJoinError(err)), - }), - ); - } - - let results = select_ok(handles).await; - match results { - Ok((res, _)) => Ok(res), - Err(err) => Err(err.into()), - } + pub request_latency: Duration, } /// Submit blinded block to relay, retry connection errors until the /// given timeout has passed -async fn submit_block_with_timeout( +pub async fn submit_block_with_timeout( proposal_info: Arc, relay: RelayClient, timeout_ms: u64, @@ -415,7 +348,7 @@ async fn send_submit_block_light( /// Sends the actual HTTP request to the relay's submit_block endpoint, /// returning the response (if applicable), the round-trip time, and the /// encoding type used for the body (if any). Used by send_submit_block. -async fn send_submit_block_impl( +pub async fn send_submit_block_impl( relay: &RelayClient, url: Arc, timeout_ms: u64, @@ -485,12 +418,12 @@ async fn send_submit_block_impl( // Log the response code and latency let code = res.status(); let request_latency = start_request.elapsed(); - RELAY_LATENCY - .with_label_values(&[SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, &relay.id]) - .observe(request_latency.as_secs_f64()); - RELAY_STATUS_CODE - .with_label_values(&[code.as_str(), SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, &relay.id]) - .inc(); + super::super::record_relay_metrics( + SUBMIT_BLINDED_BLOCK_ENDPOINT_TAG, + &relay.id, + code, + request_latency, + ); // If this was API v2 and succeeded then we can just return here if api_version != BuilderApiVersion::V1 { @@ -581,152 +514,3 @@ async fn send_submit_block_impl( let response_bytes = read_chunked_body_with_max(res, MAX_SIZE_SUBMIT_BLOCK_RESPONSE).await?; Ok(SubmitBlockResponseInfo { response_bytes, content_type, fork, code, request_latency }) } - -/// Decode a JSON-encoded submit_block response -fn decode_json_payload(response_bytes: &[u8]) -> Result { - match serde_json::from_slice::(response_bytes) { - Ok(parsed) => Ok(parsed), - Err(err) => Err(PbsError::JsonDecode { - err, - raw: String::from_utf8_lossy(response_bytes).into_owned(), - }), - } -} - -/// Get the fork name from a submit_block JSON response (used for light -/// processing) -fn get_light_info_from_json(response_bytes: &[u8]) -> Result { - #[derive(Deserialize)] - struct LightSubmitBlockResponse { - version: ForkName, - } - - match serde_json::from_slice::(response_bytes) { - Ok(parsed) => Ok(parsed.version), - Err(err) => Err(PbsError::JsonDecode { - err, - raw: String::from_utf8_lossy(response_bytes).into_owned(), - }), - } -} - -/// Decode an SSZ-encoded submit_block response -fn decode_ssz_payload( - response_bytes: &[u8], - fork: ForkName, -) -> Result { - let data = PayloadAndBlobs::from_ssz_bytes_by_fork(response_bytes, fork).map_err(|e| { - PbsError::RelayResponse { - error_msg: (format!("error decoding relay payload: {e:?}")).to_string(), - code: 200, - } - })?; - Ok(SubmitBlindedBlockResponse { version: fork, data, metadata: Default::default() }) -} - -fn validate_unblinded_block( - expected_block_hash: B256, - got_block_hash: B256, - expected_commitments: &KzgCommitments, - blobs_bundle: &BlobsBundle, - fork_name: ForkName, -) -> Result<(), PbsError> { - match fork_name { - ForkName::Base | - ForkName::Altair | - ForkName::Bellatrix | - ForkName::Capella | - ForkName::Deneb | - ForkName::Gloas => Err(PbsError::Validation(ValidationError::UnsupportedFork)), - ForkName::Electra => validate_unblinded_block_electra( - expected_block_hash, - got_block_hash, - expected_commitments, - blobs_bundle, - ), - ForkName::Fulu => validate_unblinded_block_fulu( - expected_block_hash, - got_block_hash, - expected_commitments, - blobs_bundle, - ), - } -} - -fn validate_unblinded_block_electra( - expected_block_hash: B256, - got_block_hash: B256, - expected_commitments: &KzgCommitments, - blobs_bundle: &BlobsBundle, -) -> Result<(), PbsError> { - if expected_block_hash != got_block_hash { - return Err(PbsError::Validation(ValidationError::BlockHashMismatch { - expected: expected_block_hash, - got: got_block_hash, - })); - } - - if expected_commitments.len() != blobs_bundle.blobs.len() || - expected_commitments.len() != blobs_bundle.commitments.len() || - expected_commitments.len() != blobs_bundle.proofs.len() - { - return Err(PbsError::Validation(ValidationError::KzgCommitments { - expected_blobs: expected_commitments.len(), - got_blobs: blobs_bundle.blobs.len(), - got_commitments: blobs_bundle.commitments.len(), - got_proofs: blobs_bundle.proofs.len(), - })); - } - - for (i, comm) in expected_commitments.iter().enumerate() { - // this is safe since we already know they are the same length - if *comm != blobs_bundle.commitments[i] { - return Err(PbsError::Validation(ValidationError::KzgMismatch { - expected: format!("{comm}"), - got: format!("{}", blobs_bundle.commitments[i]), - index: i, - })); - } - } - - Ok(()) -} - -fn validate_unblinded_block_fulu( - expected_block_hash: B256, - got_block_hash: B256, - expected_commitments: &KzgCommitments, - blobs_bundle: &BlobsBundle, -) -> Result<(), PbsError> { - if expected_block_hash != got_block_hash { - return Err(PbsError::Validation(ValidationError::BlockHashMismatch { - expected: expected_block_hash, - got: got_block_hash, - })); - } - - if expected_commitments.len() != blobs_bundle.blobs.len() || - expected_commitments.len() != blobs_bundle.commitments.len() || - expected_commitments.len() * CELLS_PER_EXT_BLOB != blobs_bundle.proofs.len() - { - return Err(PbsError::Validation(ValidationError::KzgCommitments { - expected_blobs: expected_commitments.len(), - got_blobs: blobs_bundle.blobs.len(), - got_commitments: blobs_bundle.commitments.len(), - got_proofs: blobs_bundle.proofs.len(), - })); - } - - for (i, comm) in expected_commitments.iter().enumerate() { - // this is safe since we already know they are the same length - if *comm != blobs_bundle.commitments[i] { - return Err(PbsError::Validation(ValidationError::KzgMismatch { - expected: format!("{comm}"), - got: format!("{}", blobs_bundle.commitments[i]), - index: i, - })); - } - } - - Ok(()) -} diff --git a/crates/pbs/src/routes/submit_block/validation.rs b/crates/pbs/src/routes/submit_block/validation.rs new file mode 100644 index 00000000..97659640 --- /dev/null +++ b/crates/pbs/src/routes/submit_block/validation.rs @@ -0,0 +1,148 @@ +use alloy::{eips::eip7594::CELLS_PER_EXT_BLOB, primitives::B256}; +use cb_common::pbs::{ + BlobsBundle, ForkName, ForkVersionDecode, KzgCommitments, PayloadAndBlobs, + SubmitBlindedBlockResponse, + error::{PbsError, ValidationError}, +}; +use serde::Deserialize; + +/// Decode a JSON-encoded submit_block response +pub fn decode_json_payload(response_bytes: &[u8]) -> Result { + match serde_json::from_slice::(response_bytes) { + Ok(parsed) => Ok(parsed), + Err(err) => Err(PbsError::JsonDecode { + err, + raw: String::from_utf8_lossy(response_bytes).into_owned(), + }), + } +} + +/// Get the fork name from a submit_block JSON response (used for light +/// processing) +pub fn get_light_info_from_json(response_bytes: &[u8]) -> Result { + #[derive(Deserialize)] + struct LightVersionOnly { + version: ForkName, + } + + match serde_json::from_slice::(response_bytes) { + Ok(parsed) => Ok(parsed.version), + Err(err) => Err(PbsError::JsonDecode { + err, + raw: String::from_utf8_lossy(response_bytes).into_owned(), + }), + } +} + +/// Decode an SSZ-encoded submit_block response +pub fn decode_ssz_payload( + response_bytes: &[u8], + fork: ForkName, +) -> Result { + let data = PayloadAndBlobs::from_ssz_bytes_by_fork(response_bytes, fork).map_err(|e| { + PbsError::RelayResponse { + error_msg: (format!("error decoding relay payload: {e:?}")).to_string(), + code: 200, + } + })?; + Ok(SubmitBlindedBlockResponse { version: fork, data, metadata: Default::default() }) +} + +pub fn validate_unblinded_block( + expected_block_hash: B256, + got_block_hash: B256, + expected_commitments: &KzgCommitments, + blobs_bundle: &BlobsBundle, + fork_name: ForkName, +) -> Result<(), PbsError> { + match fork_name { + ForkName::Base | + ForkName::Altair | + ForkName::Bellatrix | + ForkName::Capella | + ForkName::Deneb | + ForkName::Gloas => Err(PbsError::Validation(ValidationError::UnsupportedFork)), + ForkName::Electra => validate_unblinded_block_electra( + expected_block_hash, + got_block_hash, + expected_commitments, + blobs_bundle, + ), + ForkName::Fulu => validate_unblinded_block_fulu( + expected_block_hash, + got_block_hash, + expected_commitments, + blobs_bundle, + ), + } +} + +pub fn validate_unblinded_block_electra( + expected_block_hash: B256, + got_block_hash: B256, + expected_commitments: &KzgCommitments, + blobs_bundle: &BlobsBundle, +) -> Result<(), PbsError> { + validate_unblinded_block_inner( + expected_block_hash, + got_block_hash, + expected_commitments, + blobs_bundle, + expected_commitments.len(), + ) +} + +pub fn validate_unblinded_block_fulu( + expected_block_hash: B256, + got_block_hash: B256, + expected_commitments: &KzgCommitments, + blobs_bundle: &BlobsBundle, +) -> Result<(), PbsError> { + validate_unblinded_block_inner( + expected_block_hash, + got_block_hash, + expected_commitments, + blobs_bundle, + expected_commitments.len() * CELLS_PER_EXT_BLOB, + ) +} + +pub fn validate_unblinded_block_inner( + expected_block_hash: B256, + got_block_hash: B256, + expected_commitments: &KzgCommitments, + blobs_bundle: &BlobsBundle, + expected_proof_count: usize, +) -> Result<(), PbsError> { + if expected_block_hash != got_block_hash { + return Err(PbsError::Validation(ValidationError::BlockHashMismatch { + expected: expected_block_hash, + got: got_block_hash, + })); + } + + if expected_commitments.len() != blobs_bundle.blobs.len() || + expected_commitments.len() != blobs_bundle.commitments.len() || + expected_proof_count != blobs_bundle.proofs.len() + { + return Err(PbsError::Validation(ValidationError::KzgCommitments { + expected_blobs: expected_commitments.len(), + got_blobs: blobs_bundle.blobs.len(), + got_commitments: blobs_bundle.commitments.len(), + got_proofs: blobs_bundle.proofs.len(), + })); + } + + for (i, comm) in expected_commitments.iter().enumerate() { + // this is safe since we already know they are the same length + if *comm != blobs_bundle.commitments[i] { + return Err(PbsError::Validation(ValidationError::KzgMismatch { + expected: format!("{comm}"), + got: format!("{}", blobs_bundle.commitments[i]), + index: i, + })); + } + } + + Ok(()) +} diff --git a/crates/pbs/src/service.rs b/crates/pbs/src/service.rs index 8be422ca..36b417a7 100644 --- a/crates/pbs/src/service.rs +++ b/crates/pbs/src/service.rs @@ -20,16 +20,25 @@ use tracing::{debug, info, warn}; use url::Url; use crate::{ - api::BuilderApi, metrics::PBS_METRICS_REGISTRY, routes::create_app_router, - state::{BuilderApiState, PbsState, PbsStateGuard}, + state::{PbsState, PbsStateGuard}, }; pub struct PbsService; impl PbsService { - pub async fn run>(state: PbsState) -> Result<()> { + pub async fn run(state: PbsState) -> Result<()> { + let listener = TcpListener::bind(state.config.endpoint).await?; + Self::run_with_listener(state, listener).await + } + + /// Like [`run`], but accepts a pre-bound [`TcpListener`]. + /// + /// Useful in tests where the caller binds the socket with port 0 to get + /// an OS-assigned port and then passes the listener here, eliminating the + /// TOCTOU race that would otherwise exist between port discovery and bind. + pub async fn run_with_listener(state: PbsState, listener: TcpListener) -> Result<()> { let addr = state.config.endpoint; info!(version = COMMIT_BOOST_VERSION, commit_hash = COMMIT_BOOST_COMMIT, ?addr, chain =? state.config.chain, "starting PBS service"); @@ -42,9 +51,8 @@ impl PbsService { }); let config_path = state.config_path.clone(); - let state: Arc>> = RwLock::new(state).into(); - let app = create_app_router::(state.clone()); - let listener = TcpListener::bind(addr).await?; + let state: Arc> = RwLock::new(state).into(); + let app = create_app_router(state.clone()); let task = tokio::spawn( @@ -131,7 +139,7 @@ impl PbsService { MetricsProvider::load_and_run(network, PBS_METRICS_REGISTRY.clone()) } - async fn refresh_registry_muxes(state: PbsStateGuard) { + async fn refresh_registry_muxes(state: PbsStateGuard) { // Read-only portion let mut new_pubkeys = HashMap::new(); let mut removed_pubkeys = HashSet::new(); diff --git a/crates/pbs/src/state.rs b/crates/pbs/src/state.rs index cbe86af9..560a0cf3 100644 --- a/crates/pbs/src/state.rs +++ b/crates/pbs/src/state.rs @@ -7,39 +7,22 @@ use cb_common::{ }; use parking_lot::RwLock; -pub trait BuilderApiState: Clone + Sync + Send + 'static {} -impl BuilderApiState for () {} +pub type PbsStateGuard = Arc>; -pub type PbsStateGuard = Arc>>; - -/// Config for the Pbs module. It can be extended by adding extra data to the -/// state for modules that need it -// TODO: consider remove state from the PBS module altogether +/// Config for the Pbs module. #[derive(Clone)] -pub struct PbsState { +pub struct PbsState { /// Config data for the Pbs service pub config: Arc, /// Path of the config file, for watching changes pub config_path: Arc, - /// Opaque extra data for library use - pub data: S, } -impl PbsState<()> { +impl PbsState { pub fn new(config: PbsModuleConfig, config_path: PathBuf) -> Self { - Self { config: Arc::new(config), config_path: Arc::new(config_path), data: () } - } - - pub fn with_data(self, data: S) -> PbsState { - PbsState { data, config: self.config, config_path: self.config_path } + Self { config: Arc::new(config), config_path: Arc::new(config_path) } } -} -impl PbsState -where - S: BuilderApiState, -{ - // Getters pub fn pbs_config(&self) -> &PbsConfig { &self.config.pbs_config } diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index fdf86879..81729272 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -177,15 +177,14 @@ impl SigningService { break; } Err(e) => { + if attempts >= 3 { + return Err(eyre::eyre!( + "Exceeded maximum attempts to install AWS-LC as default TLS provider: {e:?}" + )); + } error!( "Failed to install AWS-LC as default TLS provider: {e:?}. Retrying..." ); - if attempts >= 3 { - error!( - "Exceeded maximum attempts to install AWS-LC as default TLS provider" - ); - break; - } attempts += 1; } } @@ -249,7 +248,6 @@ async fn jwt_auth( let path = parts.uri.path(); let bytes = to_bytes(body, REQUEST_MAX_BODY_LENGTH).await.map_err(|e| { error!("Failed to read request body: {e}"); - mark_jwt_failure(&state, client_ip); SignerModuleError::RequestError(e.to_string()) })?; @@ -274,38 +272,40 @@ async fn jwt_auth( /// Checks if the incoming request needs to be rate limited due to previous JWT /// authentication failures fn check_jwt_rate_limit(state: &SigningState, client_ip: &IpAddr) -> Result<(), SignerModuleError> { - let mut failures = state.jwt_auth_failures.write(); + let failures = state.jwt_auth_failures.read(); // Ignore clients that don't have any failures - if let Some(failure_info) = failures.get(client_ip) { - // If the last failure was more than the timeout ago, remove this entry so it's - // eligible again - let elapsed = failure_info.last_failure.elapsed(); - if elapsed > state.jwt_auth_fail_timeout { - debug!("Removing {client_ip} from JWT auth failure list"); - failures.remove(client_ip); - return Ok(()); - } + let Some(failure_info) = failures.get(client_ip) else { + debug!("Client {client_ip} has no JWT auth failures, no rate limit applied"); + return Ok(()); + }; - // If the failure threshold hasn't been met yet, don't rate limit - if failure_info.failure_count < state.jwt_auth_fail_limit { - debug!( - "Client {client_ip} has {}/{} JWT auth failures, no rate limit applied", - failure_info.failure_count, state.jwt_auth_fail_limit - ); - return Ok(()); - } + let elapsed = failure_info.last_failure.elapsed(); + + // If the last failure was more than the timeout ago, remove this entry so it's + // eligible again + if elapsed > state.jwt_auth_fail_timeout { + drop(failures); + debug!("Removing {client_ip} from JWT auth failure list"); + state.jwt_auth_failures.write().remove(client_ip); + return Ok(()); + } - // Rate limit the request - let remaining = state.jwt_auth_fail_timeout.saturating_sub(elapsed); - warn!( - "Client {client_ip} is rate limited for {remaining:?} more seconds due to JWT auth failures" + // If the failure threshold hasn't been met yet, don't rate limit + if failure_info.failure_count < state.jwt_auth_fail_limit { + debug!( + "Client {client_ip} has {}/{} JWT auth failures, no rate limit applied", + failure_info.failure_count, state.jwt_auth_fail_limit ); - return Err(SignerModuleError::RateLimited(remaining.as_secs_f64())); + return Ok(()); } - debug!("Client {client_ip} has no JWT auth failures, no rate limit applied"); - Ok(()) + // Rate limit the request + let remaining = state.jwt_auth_fail_timeout.saturating_sub(elapsed); + warn!( + "Client {client_ip} is rate limited for {remaining:?} more seconds due to JWT auth failures" + ); + Err(SignerModuleError::RateLimited(remaining.as_secs_f64())) } /// Checks if a request can successfully authenticate with the JWT secret @@ -359,7 +359,6 @@ async fn admin_auth( let path = parts.uri.path(); let bytes = to_bytes(body, REQUEST_MAX_BODY_LENGTH).await.map_err(|e| { error!("Failed to read request body: {e}"); - mark_jwt_failure(&state, client_ip); SignerModuleError::RequestError(e.to_string()) })?; @@ -633,7 +632,6 @@ async fn handle_reload( ) -> Result { debug!(event = "reload", "New request"); - // Regenerate the config let config = match StartSignerConfig::load_from_env() { Ok(config) => config, Err(err) => { @@ -642,7 +640,6 @@ async fn handle_reload( } }; - // Start a new manager with the updated config let new_manager = match start_manager(config).await { Ok(manager) => manager, Err(err) => { @@ -651,17 +648,24 @@ async fn handle_reload( } }; - // Update the JWT configs if provided in the request + apply_reload(state, request, new_manager).await +} + +/// Applies a reload request to the signing state. Separated from +/// `handle_reload` so the business logic can be tested without requiring a +/// live environment (config file, env vars, keystore on disk). +async fn apply_reload( + state: SigningState, + request: ReloadRequest, + new_manager: SigningManager, +) -> Result { + // Update the JWT configs if provided in the request. Only the provided + // modules are updated; omitted modules keep their existing secrets. if let Some(jwt_secrets) = request.jwt_secrets { let mut jwt_configs = state.jwts.write(); - let mut new_configs = HashMap::new(); for (module_id, jwt_secret) in jwt_secrets { - if let Some(signing_id) = jwt_configs.get(&module_id).map(|cfg| cfg.signing_id) { - new_configs.insert(module_id.clone(), ModuleSigningConfig { - module_name: module_id, - jwt_secret, - signing_id, - }); + if let Some(cfg) = jwt_configs.get_mut(&module_id) { + cfg.jwt_secret = jwt_secret; } else { let error_message = format!( "Module {module_id} signing ID not found in commit-boost config, cannot reload" @@ -670,10 +674,8 @@ async fn handle_reload( return Err(SignerModuleError::RequestError(error_message)); } } - *jwt_configs = new_configs; } - // Update the rest of the state once everything has passed if let Some(admin_secret) = request.admin_secret { *state.admin_secret.write() = admin_secret; } @@ -723,3 +725,156 @@ async fn start_manager(config: StartSignerConfig) -> eyre::Result ModuleSigningConfig { + ModuleSigningConfig { + module_name: ModuleId(module_name.to_string()), + jwt_secret: secret.to_string(), + signing_id, + } + } + + fn make_state(jwts: HashMap) -> SigningState { + SigningState { + manager: Arc::new(RwLock::new(SigningManager::Local( + LocalSigningManager::new(Chain::Holesky, None).unwrap(), + ))), + jwts: Arc::new(ParkingRwLock::new(jwts)), + admin_secret: Arc::new(ParkingRwLock::new("admin".to_string())), + jwt_auth_failures: Arc::new(ParkingRwLock::new(HashMap::new())), + jwt_auth_fail_limit: 3, + jwt_auth_fail_timeout: Duration::from_secs(60), + reverse_proxy: ReverseProxyHeaderSetup::None, + } + } + + fn empty_manager() -> SigningManager { + SigningManager::Local(LocalSigningManager::new(Chain::Holesky, None).unwrap()) + } + + /// Partial reload must update only the provided modules and leave omitted + /// modules with their existing secrets. + #[tokio::test] + async fn test_partial_reload_preserves_omitted_modules() { + let module_a = ModuleId("module-a".to_string()); + let module_b = ModuleId("module-b".to_string()); + let signing_id_a = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + let signing_id_b = + b256!("0202020202020202020202020202020202020202020202020202020202020202"); + + let state = make_state(HashMap::from([ + (module_a.clone(), make_signing_config("module-a", "secret-a", signing_id_a)), + (module_b.clone(), make_signing_config("module-b", "secret-b", signing_id_b)), + ])); + + let request = ReloadRequest { + jwt_secrets: Some(HashMap::from([(module_a.clone(), "rotated-secret-a".to_string())])), + admin_secret: None, + }; + + let result = apply_reload(state.clone(), request, empty_manager()).await; + assert!(result.is_ok(), "apply_reload should succeed"); + + let jwts = state.jwts.read(); + assert_eq!( + jwts[&module_a].jwt_secret, "rotated-secret-a", + "module_a secret should be updated" + ); + assert_eq!( + jwts[&module_b].jwt_secret, "secret-b", + "module_b secret must be preserved when omitted" + ); + } + + /// A full reload (all modules provided) should update every module. + #[tokio::test] + async fn test_full_reload_updates_all_modules() { + let module_a = ModuleId("module-a".to_string()); + let module_b = ModuleId("module-b".to_string()); + let signing_id_a = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + let signing_id_b = + b256!("0202020202020202020202020202020202020202020202020202020202020202"); + + let state = make_state(HashMap::from([ + (module_a.clone(), make_signing_config("module-a", "secret-a", signing_id_a)), + (module_b.clone(), make_signing_config("module-b", "secret-b", signing_id_b)), + ])); + + let request = ReloadRequest { + jwt_secrets: Some(HashMap::from([ + (module_a.clone(), "new-secret-a".to_string()), + (module_b.clone(), "new-secret-b".to_string()), + ])), + admin_secret: None, + }; + + apply_reload(state.clone(), request, empty_manager()).await.unwrap(); + + let jwts = state.jwts.read(); + assert_eq!(jwts[&module_a].jwt_secret, "new-secret-a"); + assert_eq!(jwts[&module_b].jwt_secret, "new-secret-b"); + } + + /// Reload with an unknown module ID in jwt_secrets should return an error + /// and leave the existing state unchanged. + #[tokio::test] + async fn test_reload_unknown_module_returns_error() { + let module_a = ModuleId("module-a".to_string()); + let signing_id_a = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + + let state = make_state(HashMap::from([( + module_a.clone(), + make_signing_config("module-a", "secret-a", signing_id_a), + )])); + + let request = ReloadRequest { + jwt_secrets: Some(HashMap::from([( + ModuleId("unknown-module".to_string()), + "some-secret".to_string(), + )])), + admin_secret: None, + }; + + let result = apply_reload(state.clone(), request, empty_manager()).await; + assert!(result.is_err(), "unknown module should return an error"); + + // Existing module must be untouched + let jwts = state.jwts.read(); + assert_eq!(jwts[&module_a].jwt_secret, "secret-a"); + } + + /// Reload with no jwt_secrets should leave all module secrets unchanged. + #[tokio::test] + async fn test_reload_without_jwt_secrets_preserves_all() { + let module_a = ModuleId("module-a".to_string()); + let signing_id_a = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + + let state = make_state(HashMap::from([( + module_a.clone(), + make_signing_config("module-a", "secret-a", signing_id_a), + )])); + + let request = ReloadRequest { jwt_secrets: None, admin_secret: None }; + + apply_reload(state.clone(), request, empty_manager()).await.unwrap(); + + let jwts = state.jwts.read(); + assert_eq!(jwts[&module_a].jwt_secret, "secret-a"); + } +} diff --git a/docs/docs/developing/custom-modules.md b/docs/docs/developing/custom-modules.md index cf224448..22044a58 100644 --- a/docs/docs/developing/custom-modules.md +++ b/docs/docs/developing/custom-modules.md @@ -4,9 +4,7 @@ sidebar_position: 1 # Custom Modules -Commit-Boost aims to provide an open platform for developers to create and distribute commitment protocols sidecars. +Commit-Boost aims to provide an open platform for developers to create and distribute commitment protocol sidecars. -There are two ways to leverage Commit-Boost modularity: -1. Commit Modules, which request signatures from the proposer, e.g. for preconfirmations ([example](https://github.com/Commit-Boost/commit-boost-client/tree/78bdc47bf89082f4d1ea302f9a3f86f609966b28/examples/da_commit)). -2. PBS Modules, which tweak the default PBS Module with additional logic, e.g. verifying additional constraints in `get_header` ([example](https://github.com/Commit-Boost/commit-boost-client/tree/78bdc47bf89082f4d1ea302f9a3f86f609966b28/examples/status_api)). +This happens through Commit Modules, which request signatures from the proposer, e.g. for preconfirmations ([example](https://github.com/Commit-Boost/commit-boost-client/tree/main/examples/da_commit)). diff --git a/docs/docs/developing/prop-commit-signing.md b/docs/docs/developing/prop-commit-signing.md index 1e8bd249..30f70413 100644 --- a/docs/docs/developing/prop-commit-signing.md +++ b/docs/docs/developing/prop-commit-signing.md @@ -44,7 +44,7 @@ Your module has the option of using **Nonces** for each of its signature request If you want to use them within your module, your module (or whatever remote backend system it connects to) **will be responsible** for storing, comparing, validating, and otherwise using the nonces. Commit-Boost's signer service by itself **does not** store nonces or track which ones have already been used by a given module. -In terms of implementation, the nonce format conforms to the specification in [EIP-2681](https://eips.ethereum.org/EIPS/eip-2681). It is an unsigned 64-bit big-endian integer, with a minimum value of 0 and a maximum value of `2^64-2`. We recommend using `2^64-1` as a signifier indicating that your module doesn't use nonces, rather than using 0 for such a purpose. +In terms of implementation, the nonce format conforms to the specification in [EIP-2681](https://eips.ethereum.org/EIPS/eip-2681). It is an unsigned 64-bit integer, with a minimum value of 0 and a maximum value of `2^64-2`. The field is required and is always mixed into the signing root. Modules that do not use nonces for replay protection should always send `0`; modules that do should use a monotonically increasing value per key. ## Structure of a Signature @@ -63,7 +63,7 @@ where, for the sub-tree in blue: - `Signing ID` is your module's 32-byte signing ID. The signer service will load this for your module from its configuration file. -- `Nonce` is the nonce value for the signature request. While this value must be present, it can be effectively ignored by setting it to some arbitrary value if your module does not track nonces. Conforming with the tree specification, it must be added as a 256-bit unsigned little-endian integer. Most libraries will be able to do this conversion automatically if you specify the field as the language's primitive for 64-bit unsigned integers (e.g., `uint64`, `u64`, `ulong`, etc.). +- `Nonce` is the nonce value for the signature request. This field is required. Modules that do not use replay protection should always send `0`; modules that do should use a monotonically increasing value per key. Conforming with the tree specification, it must be added as a 256-bit unsigned little-endian integer. Most libraries will be able to do this conversion automatically if you specify the field as the language's primitive for 64-bit unsigned integers (e.g., `uint64`, `u64`, `ulong`, etc.). - `Chain ID` is the ID of the chain that the Signer service is currently configured to use, as indicated by the [Commit-Boost configuration file](../get_started/configuration.md). This must also be a 256-bit unsigned little-endian integer. diff --git a/docs/docs/get_started/building.md b/docs/docs/get_started/building.md index dd860be2..9d73d6bc 100644 --- a/docs/docs/get_started/building.md +++ b/docs/docs/get_started/building.md @@ -108,7 +108,6 @@ chain = "Hoodi" [pbs] port = 18550 -with_signer = true [[relays]] url = "https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-hoodi.flashbots.net" diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index 60e55515..240de972 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -63,8 +63,6 @@ To start a local Signer Service, you need to include its parameters in the confi ```toml [pbs] ... -with_signer = true - [signer] port = 20000 @@ -97,8 +95,6 @@ We currently support Lighthouse, Prysm, Teku, Lodestar, and Nimbus's keystores s ```toml [pbs] ... -with_signer = true - [signer] port = 20000 @@ -129,8 +125,6 @@ secrets_path = "secrets" ```toml [pbs] ... -with_signer = true - [signer] port = 20000 @@ -161,8 +155,6 @@ secrets_path = "secrets/password.txt" ```toml [pbs] ... -with_signer = true - [signer] port = 20000 @@ -192,8 +184,6 @@ secrets_path = "secrets" ```toml [pbs] ... -with_signer = true - [signer] port = 20000 @@ -228,8 +218,6 @@ All keys have the same password stored in `secrets/password.txt` ```toml [pbs] ... - with_signer = true - [signer] port = 20000 @@ -397,8 +385,6 @@ Specifying it is done within Commit-Boost's configuration file using the `[signe ```toml [pbs] ... -with_signer = true - [signer] port = 20000 ... @@ -550,8 +536,7 @@ Parameters that are not provided will not be updated; they will be regenerated u ### Notes -- The hot reload feature is available for both the PBS service (both default and custom) and Signer service. +- The hot reload feature is available for both the PBS service and Signer service. - Changes related to listening hosts and ports will not been applied, as it requires the server to be restarted. - If running in Docker containers, changes in `volumes` will not be applied, as it requires the container to be recreated. Be careful if changing a path to a local file as it may not be accessible from the container. -- Custom PBS modules may override the default behaviour of the hot reload feature to parse extra configuration fields. Check the [examples](https://github.com/Commit-Boost/commit-boost-client/blob/main/examples/status_api/src/main.rs) for more details. - In case the reload fails (most likely because of some misconfigured option), the server will return a 500 error and the previous configuration will be kept. diff --git a/examples/status_api/Cargo.toml b/examples/status_api/Cargo.toml deleted file mode 100644 index f2be040e..00000000 --- a/examples/status_api/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -edition.workspace = true -name = "status_api" -rust-version.workspace = true -version.workspace = true - -[dependencies] -async-trait.workspace = true -axum.workspace = true -color-eyre.workspace = true -commit-boost = { path = "../../bin" } -eyre.workspace = true -lazy_static.workspace = true -prometheus.workspace = true -reqwest.workspace = true -serde.workspace = true -tokio.workspace = true -tracing.workspace = true diff --git a/examples/status_api/Dockerfile b/examples/status_api/Dockerfile deleted file mode 100644 index dd20f000..00000000 --- a/examples/status_api/Dockerfile +++ /dev/null @@ -1,29 +0,0 @@ -FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef -WORKDIR /app - -FROM chef AS planner -COPY . . -RUN cargo chef prepare --recipe-path recipe.json - -FROM chef AS builder -COPY --from=planner /app/recipe.json recipe.json - -RUN cargo chef cook --release --recipe-path recipe.json - -RUN apt-get update && apt-get install -y protobuf-compiler - -COPY . . -RUN cargo build --release --bin status_api - - -FROM ubuntu AS runtime -WORKDIR /app - -RUN apt-get update -RUN apt-get install -y openssl ca-certificates libssl3 libssl-dev - -COPY --from=builder /app/target/release/status_api /usr/local/bin -ENTRYPOINT ["/usr/local/bin/status_api"] - - - diff --git a/examples/status_api/src/main.rs b/examples/status_api/src/main.rs deleted file mode 100644 index aa65f4d6..00000000 --- a/examples/status_api/src/main.rs +++ /dev/null @@ -1,108 +0,0 @@ -use std::{ - path::PathBuf, - sync::{ - Arc, - atomic::{AtomicU64, Ordering}, - }, -}; - -use async_trait::async_trait; -use axum::{ - Router, - extract::State, - response::{IntoResponse, Response}, - routing::get, -}; -use commit_boost::prelude::*; -use eyre::Result; -use lazy_static::lazy_static; -use prometheus::IntCounter; -use reqwest::{StatusCode, header::HeaderMap}; -use serde::Deserialize; -use tracing::info; - -lazy_static! { - pub static ref CHECK_RECEIVED_COUNTER: IntCounter = - IntCounter::new("checks", "successful /check requests received").unwrap(); -} - -/// Extra config loaded from the config file -/// You should add an `inc_amount` field to the config file in the `pbs` -/// section. Be sure also to change the `pbs.docker_image` field, -/// `test_status_api` in this case (from scripts/build_local_modules.sh). -#[derive(Debug, Deserialize)] -struct ExtraConfig { - inc_amount: u64, -} - -// Extra state available at runtime -#[derive(Clone)] -struct MyBuilderState { - inc_amount: u64, - counter: Arc, -} - -impl BuilderApiState for MyBuilderState {} - -impl MyBuilderState { - fn from_config(extra: ExtraConfig) -> Self { - Self { inc_amount: extra.inc_amount, counter: Arc::new(AtomicU64::new(0)) } - } - - fn inc(&self) { - self.counter.fetch_add(self.inc_amount, Ordering::Relaxed); - } - fn get(&self) -> u64 { - self.counter.load(Ordering::Relaxed) - } -} - -struct MyBuilderApi; - -#[async_trait] -impl BuilderApi for MyBuilderApi { - async fn get_status(req_headers: HeaderMap, state: PbsState) -> Result<()> { - state.data.inc(); - info!("THIS IS A CUSTOM LOG"); - CHECK_RECEIVED_COUNTER.inc(); - get_status(req_headers, state).await - } - - async fn reload(state: PbsState) -> Result> { - let (pbs_config, extra_config) = load_pbs_custom_config::().await?; - let mut data = state.data.clone(); - data.inc_amount = extra_config.inc_amount; - - let empty_config_path = PathBuf::new(); - Ok(PbsState::new(pbs_config, empty_config_path).with_data(data)) - } - - fn extra_routes() -> Option>> { - let mut router = Router::new(); - router = router.route("/check", get(handle_check)); - Some(router) - } -} - -async fn handle_check(State(state): State>) -> Response { - (StatusCode::OK, format!("Received {count} status requests!", count = state.read().data.get())) - .into_response() -} - -#[tokio::main] -async fn main() -> Result<()> { - color_eyre::install()?; - - let (pbs_config, extra) = load_pbs_custom_config::().await?; - let chain = pbs_config.chain; - let _guard = initialize_tracing_log(PBS_SERVICE_NAME, LogsSettings::from_env_config()?)?; - - let custom_state = MyBuilderState::from_config(extra); - let empty_config_path = PathBuf::new(); - let state = PbsState::new(pbs_config, empty_config_path).with_data(custom_state); - - PbsService::register_metric(Box::new(CHECK_RECEIVED_COUNTER.clone())); - PbsService::init_metrics(chain)?; - - PbsService::run::(state).await -} diff --git a/tests/data/configs/pbs.happy.toml b/tests/data/configs/pbs.happy.toml index 67b39911..e4695b61 100644 --- a/tests/data/configs/pbs.happy.toml +++ b/tests/data/configs/pbs.happy.toml @@ -14,7 +14,6 @@ timeout_get_header_ms = 950 timeout_get_payload_ms = 4000 timeout_register_validator_ms = 3000 wait_all_registrations = true -with_signer = false [[relays]] diff --git a/tests/data/configs/signer.happy.toml b/tests/data/configs/signer.happy.toml index 6fb76445..8a546401 100644 --- a/tests/data/configs/signer.happy.toml +++ b/tests/data/configs/signer.happy.toml @@ -2,7 +2,6 @@ chain = "Hoodi" [pbs] docker_image = "ghcr.io/commit-boost/pbs:latest" -with_signer = true host = "127.0.0.1" port = 18550 relay_check = true diff --git a/tests/src/mock_relay.rs b/tests/src/mock_relay.rs index 21accb34..0eb4eabc 100644 --- a/tests/src/mock_relay.rs +++ b/tests/src/mock_relay.rs @@ -40,11 +40,17 @@ use tracing::{debug, error}; use tree_hash::TreeHash; pub async fn start_mock_relay_service(state: Arc, port: u16) -> eyre::Result<()> { - let app = mock_relay_app_router(state); - let socket = SocketAddr::new("0.0.0.0".parse()?, port); let listener = TcpListener::bind(socket).await?; + start_mock_relay_service_with_listener(state, listener).await +} +/// Like [`start_mock_relay_service`], but accepts a pre-bound [`TcpListener`]. +pub async fn start_mock_relay_service_with_listener( + state: Arc, + listener: TcpListener, +) -> eyre::Result<()> { + let app = mock_relay_app_router(state); axum::serve(listener, app).await?; Ok(()) } diff --git a/tests/src/mock_ssv_public.rs b/tests/src/mock_ssv_public.rs index a014db42..dcd62df5 100644 --- a/tests/src/mock_ssv_public.rs +++ b/tests/src/mock_ssv_public.rs @@ -30,6 +30,18 @@ pub async fn create_mock_public_ssv_server( port: u16, state: Option, ) -> Result, axum::Error> { + let address = SocketAddr::from(([127, 0, 0, 1], port)); + let listener = TcpListener::bind(address).await.map_err(axum::Error::new)?; + create_mock_public_ssv_server_with_listener(listener, state).await +} + +/// Like [`create_mock_public_ssv_server`], but accepts a pre-bound +/// [`TcpListener`]. +pub async fn create_mock_public_ssv_server_with_listener( + listener: TcpListener, + state: Option, +) -> Result, axum::Error> { + let port = listener.local_addr().map(|a| a.port()).unwrap_or(0); let data = include_str!("../../tests/data/ssv_valid_public.json"); let response = serde_json::from_str::(data).expect("failed to parse test data"); @@ -46,8 +58,6 @@ pub async fn create_mock_public_ssv_server( .with_state(state) .into_make_service(); - let address = SocketAddr::from(([127, 0, 0, 1], port)); - let listener = TcpListener::bind(address).await.map_err(axum::Error::new)?; let server = axum::serve(listener, router).with_graceful_shutdown(async { tokio::signal::ctrl_c().await.expect("Failed to listen for shutdown signal"); }); diff --git a/tests/src/utils.rs b/tests/src/utils.rs index dd0ba733..bfcbccc3 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -27,6 +27,18 @@ pub fn get_local_address(port: u16) -> String { format!("http://0.0.0.0:{port}") } +/// Bind to port 0 and let the OS assign an unused ephemeral port. +/// +/// The returned listener keeps the port reserved. Pass it to +/// [`PbsService::run_with_listener`] or +/// [`start_mock_relay_service_with_listener`] so the socket is never released +/// between allocation and use (zero TOCTOU race). Extract the port with +/// `listener.local_addr().unwrap().port()` when you need the number for config +/// or client construction. +pub async fn get_free_listener() -> tokio::net::TcpListener { + tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap() +} + static SYNC_SETUP: Once = Once::new(); pub fn setup_test_env() { SYNC_SETUP.call_once(|| { @@ -95,7 +107,7 @@ pub fn get_pbs_config(port: u16) -> PbsConfig { } pub fn get_pbs_static_config(pbs_config: PbsConfig) -> StaticPbsConfig { - StaticPbsConfig { docker_image: String::from(""), pbs_config, with_signer: true } + StaticPbsConfig { docker_image: String::from(""), pbs_config } } pub fn get_commit_boost_config(pbs_static_config: StaticPbsConfig) -> CommitBoostConfig { @@ -120,7 +132,6 @@ pub fn to_pbs_config( chain, endpoint: SocketAddr::new(pbs_config.host.into(), pbs_config.port), pbs_config: Arc::new(pbs_config), - signer_client: None, all_relays: relays.clone(), relays, registry_muxes: None, diff --git a/tests/tests/config.rs b/tests/tests/config.rs index 27b02318..60e72aec 100644 --- a/tests/tests/config.rs +++ b/tests/tests/config.rs @@ -41,7 +41,6 @@ async fn test_load_pbs_happy() -> Result<()> { // Docker and general settings assert_eq!(config.pbs.docker_image, "ghcr.io/commit-boost/pbs:latest"); - assert!(!config.pbs.with_signer); assert_eq!(config.pbs.pbs_config.host, "127.0.0.1".parse::().unwrap()); assert_eq!(config.pbs.pbs_config.port, 18550); assert!(config.pbs.pbs_config.relay_check); diff --git a/tests/tests/pbs_cfg_file_update.rs b/tests/tests/pbs_cfg_file_update.rs index b70ab47a..01f34e2d 100644 --- a/tests/tests/pbs_cfg_file_update.rs +++ b/tests/tests/pbs_cfg_file_update.rs @@ -10,11 +10,13 @@ use cb_common::{ signer::random_secret, types::Chain, }; -use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_pbs::{PbsService, PbsState}; use cb_tests::{ - mock_relay::{MockRelayState, start_mock_relay_service}, + mock_relay::{MockRelayState, start_mock_relay_service_with_listener}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, + utils::{ + generate_mock_relay, get_free_listener, get_pbs_config, setup_test_env, to_pbs_config, + }, }; use eyre::Result; use lh_types::ForkName; @@ -32,20 +34,23 @@ async fn test_cfg_file_update() -> Result<()> { let pubkey = signer.public_key(); let chain = Chain::Hoodi; - let pbs_port = 3730; + let pbs_listener = get_free_listener().await; + let relay1_listener = get_free_listener().await; + let relay2_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay1_port = relay1_listener.local_addr().unwrap().port(); + let relay2_port = relay2_listener.local_addr().unwrap().port(); // Start relay 1 - let relay1_port = pbs_port + 1; let relay1 = generate_mock_relay(relay1_port, pubkey.clone())?; let relay1_state = Arc::new(MockRelayState::new(chain, signer.clone())); - tokio::spawn(start_mock_relay_service(relay1_state.clone(), relay1_port)); + tokio::spawn(start_mock_relay_service_with_listener(relay1_state.clone(), relay1_listener)); // Start relay 2 - let relay2_port = relay1_port + 1; let relay2 = generate_mock_relay(relay2_port, pubkey.clone())?; let relay2_id = relay2.id.clone().to_string(); let relay2_state = Arc::new(MockRelayState::new(chain, signer)); - tokio::spawn(start_mock_relay_service(relay2_state.clone(), relay2_port)); + tokio::spawn(start_mock_relay_service_with_listener(relay2_state.clone(), relay2_listener)); // Make a config with relay 1 only let pbs_config = PbsConfig { @@ -76,7 +81,6 @@ async fn test_cfg_file_update() -> Result<()> { pbs: StaticPbsConfig { docker_image: "cb-fake-repo/cb-fake-image:latest".to_string(), pbs_config: pbs_config.clone(), - with_signer: false, }, muxes: None, modules: None, @@ -109,7 +113,7 @@ async fn test_cfg_file_update() -> Result<()> { // Run the PBS service let config = to_pbs_config(chain, get_pbs_config(pbs_port), vec![relay1.clone()]); let state = PbsState::new(config, config_path.clone()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers - extra time for the file watcher tokio::time::sleep(Duration::from_millis(1000)).await; @@ -128,7 +132,6 @@ async fn test_cfg_file_update() -> Result<()> { pbs: StaticPbsConfig { docker_image: "cb-fake-repo/cb-fake-image:latest".to_string(), pbs_config, - with_signer: false, }, muxes: None, modules: None, diff --git a/tests/tests/pbs_get_header.rs b/tests/tests/pbs_get_header.rs index b7f3c4a5..679e6ab5 100644 --- a/tests/tests/pbs_get_header.rs +++ b/tests/tests/pbs_get_header.rs @@ -12,11 +12,13 @@ use cb_common::{ get_consensus_version_header, timestamp_of_slot_start_sec, }, }; -use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_pbs::{PbsService, PbsState}; use cb_tests::{ - mock_relay::{MockRelayState, start_mock_relay_service}, + mock_relay::{MockRelayState, start_mock_relay_service_with_listener}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, + utils::{ + generate_mock_relay, get_free_listener, get_pbs_config, setup_test_env, to_pbs_config, + }, }; use eyre::Result; use lh_types::{ForkVersionDecode, beacon_response::EmptyMetadata}; @@ -29,7 +31,6 @@ use url::Url; #[tokio::test] async fn test_get_header() -> Result<()> { test_get_header_impl( - 3200, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), 1, @@ -47,7 +48,6 @@ async fn test_get_header() -> Result<()> { #[tokio::test] async fn test_get_header_ssz() -> Result<()> { test_get_header_impl( - 3202, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), 1, @@ -67,7 +67,6 @@ async fn test_get_header_ssz() -> Result<()> { #[tokio::test] async fn test_get_header_ssz_into_json() -> Result<()> { test_get_header_impl( - 3204, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Json]), 1, @@ -86,7 +85,6 @@ async fn test_get_header_ssz_into_json() -> Result<()> { #[tokio::test] async fn test_get_header_multitype_ssz() -> Result<()> { test_get_header_impl( - 3206, HashSet::from([EncodingType::Ssz, EncodingType::Json]), HashSet::from([EncodingType::Ssz]), 1, @@ -105,7 +103,6 @@ async fn test_get_header_multitype_ssz() -> Result<()> { #[tokio::test] async fn test_get_header_multitype_json() -> Result<()> { test_get_header_impl( - 3208, HashSet::from([EncodingType::Ssz, EncodingType::Json]), HashSet::from([EncodingType::Json]), 1, @@ -125,7 +122,6 @@ async fn test_get_header_multitype_json() -> Result<()> { #[tokio::test] async fn test_get_header_light() -> Result<()> { test_get_header_impl( - 3210, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), 1, @@ -143,7 +139,6 @@ async fn test_get_header_light() -> Result<()> { #[tokio::test] async fn test_get_header_ssz_light() -> Result<()> { test_get_header_impl( - 3212, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), 1, @@ -163,7 +158,6 @@ async fn test_get_header_ssz_light() -> Result<()> { #[tokio::test] async fn test_get_header_ssz_into_json_light() -> Result<()> { test_get_header_impl( - 3214, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Json]), 1, @@ -182,7 +176,6 @@ async fn test_get_header_ssz_into_json_light() -> Result<()> { #[tokio::test] async fn test_get_header_multitype_ssz_light() -> Result<()> { test_get_header_impl( - 3216, HashSet::from([EncodingType::Ssz, EncodingType::Json]), HashSet::from([EncodingType::Ssz]), 1, @@ -201,7 +194,6 @@ async fn test_get_header_multitype_ssz_light() -> Result<()> { #[tokio::test] async fn test_get_header_multitype_json_light() -> Result<()> { test_get_header_impl( - 3218, HashSet::from([EncodingType::Ssz, EncodingType::Json]), HashSet::from([EncodingType::Json]), 1, @@ -221,7 +213,6 @@ async fn test_get_header_multitype_json_light() -> Result<()> { /// fine; if the parent block fetch fails the relay response is still returned /// (extra validation is skipped with a warning). async fn test_get_header_impl( - pbs_port: u16, accept_types: HashSet, relay_types: HashSet, expected_try_count: u64, @@ -237,13 +228,16 @@ async fn test_get_header_impl( let signer = random_secret(); let pubkey = signer.public_key(); let chain = Chain::Holesky; - let relay_port = pbs_port + 1; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); let mut mock_state = MockRelayState::new(chain, signer).with_bid_value(bid_value); mock_state.supported_content_types = Arc::new(relay_types); let mock_state = Arc::new(mock_state); let mock_relay = generate_mock_relay(relay_port, pubkey)?; - tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); // Run the PBS service let mut pbs_config = get_pbs_config(pbs_port); @@ -252,7 +246,7 @@ async fn test_get_header_impl( pbs_config.rpc_url = rpc_url; let config = to_pbs_config(chain, pbs_config, vec![mock_relay.clone()]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -310,20 +304,24 @@ async fn test_get_header_returns_204_if_relay_down() -> Result<()> { let pubkey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 3300; - let relay_port = pbs_port + 1; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); // Create a mock relay client let mock_state = Arc::new(MockRelayState::new(chain, signer)); let mock_relay = generate_mock_relay(relay_port, pubkey)?; // Don't start the relay - // tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); + // tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), + // relay_listener)); + drop(relay_listener); // Run the PBS service let config = to_pbs_config(chain, get_pbs_config(pbs_port), vec![mock_relay.clone()]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -344,18 +342,20 @@ async fn test_get_header_returns_400_if_request_is_invalid() -> Result<()> { let pubkey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 3400; - let relay_port = pbs_port + 1; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); // Run a mock relay let mock_state = Arc::new(MockRelayState::new(chain, signer)); let mock_relay = generate_mock_relay(relay_port, pubkey.clone())?; - tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); // Run the PBS service let config = to_pbs_config(chain, get_pbs_config(pbs_port), vec![mock_relay.clone()]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -394,14 +394,13 @@ async fn test_get_header_all_modes_enforce_min_bid() -> Result<()> { // handled gracefully (extra validation is skipped with a warning). let fake_rpc: Url = "http://127.0.0.1:1".parse()?; - for (pbs_port, mode, rpc_url) in [ - (3500u16, HeaderValidationMode::Standard, None), - (3502u16, HeaderValidationMode::None, None), - (3504u16, HeaderValidationMode::Extra, Some(fake_rpc.clone())), + for (mode, rpc_url) in [ + (HeaderValidationMode::Standard, None), + (HeaderValidationMode::None, None), + (HeaderValidationMode::Extra, Some(fake_rpc.clone())), ] { // Bid below min → all modes reject (204). test_get_header_impl( - pbs_port, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Json]), 1, @@ -416,7 +415,6 @@ async fn test_get_header_all_modes_enforce_min_bid() -> Result<()> { // Bid above min → all modes accept (200). test_get_header_impl( - pbs_port + 100, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Json]), 1, @@ -447,12 +445,15 @@ async fn test_get_header_ssz_bid_value_round_trip() -> Result<()> { // Use a distinctive value so accidental zero-matches are impossible. let relay_bid = U256::from(999_888_777u64); - for (pbs_port, fork_name) in [(3508u16, ForkName::Electra), (3510u16, ForkName::Fulu)] { - let relay_port = pbs_port + 1; + for fork_name in [ForkName::Electra, ForkName::Fulu] { + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); let mock_state = Arc::new(MockRelayState::new(chain, signer.clone()).with_bid_value(relay_bid)); let mock_relay = generate_mock_relay(relay_port, pubkey.clone())?; - tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); let mut pbs_config = get_pbs_config(pbs_port); // None mode: PBS forwards the raw SSZ bytes without re-encoding. @@ -460,7 +461,7 @@ async fn test_get_header_ssz_bid_value_round_trip() -> Result<()> { pbs_config.min_bid_wei = U256::ZERO; let config = to_pbs_config(chain, pbs_config, vec![mock_relay]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); tokio::time::sleep(Duration::from_millis(100)).await; @@ -490,9 +491,10 @@ async fn test_get_header_unsupported_fork_returns_400() -> Result<()> { let signer = random_secret(); let chain = Chain::Holesky; - let relay_port = 3512u16; + let relay_listener = get_free_listener().await; + let relay_port = relay_listener.local_addr().unwrap().port(); let mock_state = Arc::new(MockRelayState::new(chain, signer.clone())); - tokio::spawn(start_mock_relay_service(mock_state, relay_port)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state, relay_listener)); tokio::time::sleep(Duration::from_millis(100)).await; @@ -625,7 +627,6 @@ async fn test_get_header_bid_validation_matrix() -> Result<()> { for (i, &(fork, encoding, mode, relay_bid, expected_status)) in cases.iter().enumerate() { test_get_header_impl( - 3900u16 + (i as u16 * 2), HashSet::from([encoding]), HashSet::from([encoding]), 1, @@ -656,22 +657,25 @@ async fn test_get_header_none_mode_bypasses_pubkey_validation() -> Result<()> { let signer = random_secret(); let wrong_pubkey = random_secret().public_key(); - for (pbs_port, mode, expected_status) in [ - (3504u16, HeaderValidationMode::Standard, StatusCode::NO_CONTENT), - (3506u16, HeaderValidationMode::None, StatusCode::OK), + for (mode, expected_status) in [ + (HeaderValidationMode::Standard, StatusCode::NO_CONTENT), + (HeaderValidationMode::None, StatusCode::OK), ] { - let relay_port = pbs_port + 1; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); let mock_state = Arc::new(MockRelayState::new(chain, signer.clone())); // Register with `wrong_pubkey` — PBS will expect this key but the relay // embeds `signer.public_key()`, causing a mismatch in Standard mode. let mock_relay = generate_mock_relay(relay_port, wrong_pubkey.clone())?; - tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); let mut pbs_config = get_pbs_config(pbs_port); pbs_config.header_validation_mode = mode; let config = to_pbs_config(chain, pbs_config, vec![mock_relay]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); tokio::time::sleep(Duration::from_millis(100)).await; diff --git a/tests/tests/pbs_get_status.rs b/tests/tests/pbs_get_status.rs index cd2ab51d..f7497b6e 100644 --- a/tests/tests/pbs_get_status.rs +++ b/tests/tests/pbs_get_status.rs @@ -1,11 +1,13 @@ use std::{path::PathBuf, sync::Arc, time::Duration}; use cb_common::{signer::random_secret, types::Chain}; -use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_pbs::{PbsService, PbsState}; use cb_tests::{ - mock_relay::{MockRelayState, start_mock_relay_service}, + mock_relay::{MockRelayState, start_mock_relay_service_with_listener}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, + utils::{ + generate_mock_relay, get_free_listener, get_pbs_config, setup_test_env, to_pbs_config, + }, }; use eyre::Result; use reqwest::StatusCode; @@ -18,21 +20,24 @@ async fn test_get_status() -> Result<()> { let pubkey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 3500; - let relay_0_port = pbs_port + 1; - let relay_1_port = pbs_port + 2; + let pbs_listener = get_free_listener().await; + let relay_0_listener = get_free_listener().await; + let relay_1_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_0_port = relay_0_listener.local_addr().unwrap().port(); + let relay_1_port = relay_1_listener.local_addr().unwrap().port(); let relays = vec![ generate_mock_relay(relay_0_port, pubkey.clone())?, generate_mock_relay(relay_1_port, pubkey)?, ]; let mock_state = Arc::new(MockRelayState::new(chain, signer)); - tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_0_port)); - tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_1_port)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_0_listener)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_1_listener)); let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays.clone()); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -54,18 +59,22 @@ async fn test_get_status_returns_502_if_relay_down() -> Result<()> { let pubkey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 3600; - let relay_port = pbs_port + 1; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); let relays = vec![generate_mock_relay(relay_port, pubkey)?]; let mock_state = Arc::new(MockRelayState::new(chain, signer)); // Don't start the relay - // tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); + // tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), + // relay_listener)); + drop(relay_listener); let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays.clone()); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; diff --git a/tests/tests/pbs_mux.rs b/tests/tests/pbs_mux.rs index 93731aa5..5d184c04 100644 --- a/tests/tests/pbs_mux.rs +++ b/tests/tests/pbs_mux.rs @@ -19,15 +19,18 @@ use cb_common::{ types::Chain, utils::{EncodingType, ForkName, ResponseReadError, set_ignore_content_length}, }; -use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_pbs::{PbsService, PbsState}; use cb_tests::{ - mock_relay::{MockRelayState, start_mock_relay_service}, + mock_relay::{MockRelayState, start_mock_relay_service_with_listener}, mock_ssv_node::{SsvNodeMockState, create_mock_ssv_node_server}, - mock_ssv_public::{PublicSsvMockState, TEST_HTTP_TIMEOUT, create_mock_public_ssv_server}, + mock_ssv_public::{ + PublicSsvMockState, TEST_HTTP_TIMEOUT, create_mock_public_ssv_server, + create_mock_public_ssv_server_with_listener, + }, mock_validator::MockValidator, utils::{ - bls_pubkey_from_hex_unchecked, generate_mock_relay, get_pbs_config, setup_test_env, - to_pbs_config, + bls_pubkey_from_hex_unchecked, generate_mock_relay, get_free_listener, get_pbs_config, + setup_test_env, to_pbs_config, }, }; use eyre::Result; @@ -205,17 +208,24 @@ async fn test_mux() -> Result<()> { let pubkey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 3700; - - let mux_relay_1 = generate_mock_relay(pbs_port + 1, pubkey.clone())?; - let mux_relay_2 = generate_mock_relay(pbs_port + 2, pubkey.clone())?; - let default_relay = generate_mock_relay(pbs_port + 3, pubkey.clone())?; + let pbs_listener = get_free_listener().await; + let relay_1_listener = get_free_listener().await; + let relay_2_listener = get_free_listener().await; + let relay_3_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_1_port = relay_1_listener.local_addr().unwrap().port(); + let relay_2_port = relay_2_listener.local_addr().unwrap().port(); + let relay_3_port = relay_3_listener.local_addr().unwrap().port(); + + let mux_relay_1 = generate_mock_relay(relay_1_port, pubkey.clone())?; + let mux_relay_2 = generate_mock_relay(relay_2_port, pubkey.clone())?; + let default_relay = generate_mock_relay(relay_3_port, pubkey.clone())?; // Run 3 mock relays let mock_state = Arc::new(MockRelayState::new(chain, signer)); - tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); - tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 2)); - tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 3)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_1_listener)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_2_listener)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_3_listener)); // Register all relays in PBS config let relays = vec![default_relay.clone()]; @@ -235,7 +245,7 @@ async fn test_mux() -> Result<()> { // Run PBS service let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -315,10 +325,19 @@ async fn test_ssv_multi_with_node() -> Result<()> { let pubkey2 = signer2.public_key(); let chain = Chain::Hoodi; - let pbs_port = 3711; + let pbs_listener = get_free_listener().await; + let ssv_node_listener = get_free_listener().await; + let ssv_public_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let ssv_node_port = ssv_node_listener.local_addr().unwrap().port(); + let ssv_public_port = ssv_public_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); + // Drop the ssv_node_listener so create_mock_ssv_node_server can bind the port + // (no _with_listener variant available for the SSV node mock server). + drop(ssv_node_listener); // Start the mock SSV node - let ssv_node_port = pbs_port + 1; let ssv_node_url = Url::parse(&format!("http://localhost:{ssv_node_port}/v1/"))?; let mock_ssv_node_state = SsvNodeMockState { validators: Arc::new(RwLock::new(vec![ @@ -331,21 +350,23 @@ async fn test_ssv_multi_with_node() -> Result<()> { create_mock_ssv_node_server(ssv_node_port, Some(mock_ssv_node_state.clone())).await?; // Start the mock SSV public API - let ssv_public_port = ssv_node_port + 1; let ssv_public_url = Url::parse(&format!("http://localhost:{ssv_public_port}/api/v4/"))?; let mock_ssv_public_state = PublicSsvMockState { validators: Arc::new(RwLock::new(vec![SSVPublicValidator { pubkey: pubkey.clone() }])), force_timeout: Arc::new(RwLock::new(false)), }; - let ssv_public_handle = - create_mock_public_ssv_server(ssv_public_port, Some(mock_ssv_public_state.clone())).await?; + let ssv_public_handle = create_mock_public_ssv_server_with_listener( + ssv_public_listener, + Some(mock_ssv_public_state.clone()), + ) + .await?; // Start a mock relay to be used by the mux - let relay_port = ssv_public_port + 1; let relay = generate_mock_relay(relay_port, pubkey.clone())?; let relay_id = relay.id.clone().to_string(); let relay_state = Arc::new(MockRelayState::new(chain, signer)); - let relay_task = tokio::spawn(start_mock_relay_service(relay_state.clone(), relay_port)); + let relay_task = + tokio::spawn(start_mock_relay_service_with_listener(relay_state.clone(), relay_listener)); // Create the registry mux let loader = MuxKeysLoader::Registry { @@ -379,7 +400,7 @@ async fn test_ssv_multi_with_node() -> Result<()> { // Run PBS service let state = PbsState::new(config, PathBuf::new()); - let pbs_server = tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + let pbs_server = tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); info!("Started PBS server with pubkey {pubkey}"); // Wait for the server to start @@ -393,7 +414,7 @@ async fn test_ssv_multi_with_node() -> Result<()> { .do_get_header(Some(pubkey2.clone()), HashSet::new(), ForkName::Electra) .await?; assert_eq!(res.status(), StatusCode::OK); - assert_eq!(relay_state.received_get_header(), 1); // pubkey2 was loaded from the SSV node + assert_eq!(relay_state.received_get_header(), 1); // pubkey2 was loaded from the SSV node // Shut down the server handles pbs_server.abort(); @@ -415,10 +436,17 @@ async fn test_ssv_multi_with_public() -> Result<()> { let pubkey2 = signer2.public_key(); let chain = Chain::Hoodi; - let pbs_port = 3720; - - // Start the mock SSV node - let ssv_node_port = pbs_port + 1; + let pbs_listener = get_free_listener().await; + let ssv_node_listener = get_free_listener().await; + let ssv_public_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let ssv_node_port = ssv_node_listener.local_addr().unwrap().port(); + let ssv_public_port = ssv_public_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); + drop(ssv_node_listener); // SSV node is intentionally down — release the reserved port + + // Start the mock SSV node (not started — simulating it being down) let ssv_node_url = Url::parse(&format!("http://localhost:{ssv_node_port}/v1/"))?; // Don't start the SSV node server to simulate it being down @@ -426,7 +454,6 @@ async fn test_ssv_multi_with_public() -> Result<()> { // Some(mock_ssv_node_state.clone())).await?; // Start the mock SSV public API - let ssv_public_port = ssv_node_port + 1; let ssv_public_url = Url::parse(&format!("http://localhost:{ssv_public_port}/api/v4/"))?; let mock_ssv_public_state = PublicSsvMockState { validators: Arc::new(RwLock::new(vec![ @@ -435,15 +462,18 @@ async fn test_ssv_multi_with_public() -> Result<()> { ])), force_timeout: Arc::new(RwLock::new(false)), }; - let ssv_public_handle = - create_mock_public_ssv_server(ssv_public_port, Some(mock_ssv_public_state.clone())).await?; + let ssv_public_handle = create_mock_public_ssv_server_with_listener( + ssv_public_listener, + Some(mock_ssv_public_state.clone()), + ) + .await?; // Start a mock relay to be used by the mux - let relay_port = ssv_public_port + 1; let relay = generate_mock_relay(relay_port, pubkey.clone())?; let relay_id = relay.id.clone().to_string(); let relay_state = Arc::new(MockRelayState::new(chain, signer)); - let relay_task = tokio::spawn(start_mock_relay_service(relay_state.clone(), relay_port)); + let relay_task = + tokio::spawn(start_mock_relay_service_with_listener(relay_state.clone(), relay_listener)); // Create the registry mux let loader = MuxKeysLoader::Registry { @@ -477,7 +507,7 @@ async fn test_ssv_multi_with_public() -> Result<()> { // Run PBS service let state = PbsState::new(config, PathBuf::new()); - let pbs_server = tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + let pbs_server = tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); info!("Started PBS server with pubkey {pubkey}"); // Wait for the server to start @@ -491,7 +521,7 @@ async fn test_ssv_multi_with_public() -> Result<()> { .do_get_header(Some(pubkey2.clone()), HashSet::new(), ForkName::Electra) .await?; assert_eq!(res.status(), StatusCode::OK); - assert_eq!(relay_state.received_get_header(), 1); // pubkey2 was loaded from the SSV public API + assert_eq!(relay_state.received_get_header(), 1); // pubkey2 was loaded from the SSV public API // Shut down the server handles pbs_server.abort(); diff --git a/tests/tests/pbs_mux_refresh.rs b/tests/tests/pbs_mux_refresh.rs index 11a96712..aff8300b 100644 --- a/tests/tests/pbs_mux_refresh.rs +++ b/tests/tests/pbs_mux_refresh.rs @@ -6,12 +6,12 @@ use cb_common::{ signer::random_secret, types::Chain, }; -use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_pbs::{PbsService, PbsState}; use cb_tests::{ - mock_relay::{MockRelayState, start_mock_relay_service}, - mock_ssv_public::{PublicSsvMockState, create_mock_public_ssv_server}, + mock_relay::{MockRelayState, start_mock_relay_service_with_listener}, + mock_ssv_public::{PublicSsvMockState, create_mock_public_ssv_server_with_listener}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_config, to_pbs_config}, + utils::{generate_mock_relay, get_free_listener, get_pbs_config, to_pbs_config}, }; use eyre::Result; use lh_types::ForkName; @@ -39,10 +39,15 @@ async fn test_auto_refresh() -> Result<()> { let new_mux_pubkey = new_mux_signer.public_key(); let chain = Chain::Hoodi; - let pbs_port = 3710; + let pbs_listener = get_free_listener().await; + let ssv_listener = get_free_listener().await; + let default_relay_listener = get_free_listener().await; + let mux_relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let ssv_api_port = ssv_listener.local_addr().unwrap().port(); + let default_relay_port = default_relay_listener.local_addr().unwrap().port(); + let mux_relay_port = mux_relay_listener.local_addr().unwrap().port(); - // Start the mock SSV API server - let ssv_api_port = pbs_port + 1; // Intentionally missing a trailing slash to ensure this is handled properly let ssv_api_url = Url::parse(&format!("http://localhost:{ssv_api_port}/api/v4"))?; let mock_ssv_state = PublicSsvMockState { @@ -51,23 +56,24 @@ async fn test_auto_refresh() -> Result<()> { }])), force_timeout: Arc::new(RwLock::new(false)), }; - let ssv_server_handle = - create_mock_public_ssv_server(ssv_api_port, Some(mock_ssv_state.clone())).await?; + create_mock_public_ssv_server_with_listener(ssv_listener, Some(mock_ssv_state.clone())).await?; // Start a default relay for non-mux keys - let default_relay_port = ssv_api_port + 1; let default_relay = generate_mock_relay(default_relay_port, default_pubkey.clone())?; let default_relay_state = Arc::new(MockRelayState::new(chain, default_signer.clone())); - let default_relay_task = - tokio::spawn(start_mock_relay_service(default_relay_state.clone(), default_relay_port)); + let default_relay_task = tokio::spawn(start_mock_relay_service_with_listener( + default_relay_state.clone(), + default_relay_listener, + )); // Start a mock relay to be used by the mux - let mux_relay_port = default_relay_port + 1; let mux_relay = generate_mock_relay(mux_relay_port, default_pubkey.clone())?; let mux_relay_id = mux_relay.id.clone().to_string(); let mux_relay_state = Arc::new(MockRelayState::new(chain, default_signer)); - let mux_relay_task = - tokio::spawn(start_mock_relay_service(mux_relay_state.clone(), mux_relay_port)); + let mux_relay_task = tokio::spawn(start_mock_relay_service_with_listener( + mux_relay_state.clone(), + mux_relay_listener, + )); // Create the registry mux let loader = MuxKeysLoader::Registry { @@ -100,7 +106,7 @@ async fn test_auto_refresh() -> Result<()> { // Run PBS service let state = PbsState::new(config, PathBuf::new()); - let pbs_server = tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + let pbs_server = tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); info!("Started PBS server with pubkey {default_pubkey}"); // Wait for the server to start @@ -177,7 +183,6 @@ async fn test_auto_refresh() -> Result<()> { // Shut down the server handles pbs_server.abort(); - ssv_server_handle.abort(); default_relay_task.abort(); mux_relay_task.abort(); diff --git a/tests/tests/pbs_post_blinded_blocks.rs b/tests/tests/pbs_post_blinded_blocks.rs index 36214c15..f921c6c6 100644 --- a/tests/tests/pbs_post_blinded_blocks.rs +++ b/tests/tests/pbs_post_blinded_blocks.rs @@ -7,11 +7,13 @@ use cb_common::{ types::Chain, utils::{EncodingType, ForkName}, }; -use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_pbs::{PbsService, PbsState}; use cb_tests::{ - mock_relay::{MockRelayState, start_mock_relay_service}, + mock_relay::{MockRelayState, start_mock_relay_service_with_listener}, mock_validator::{MockValidator, load_test_signed_blinded_block}, - utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, + utils::{ + generate_mock_relay, get_free_listener, get_pbs_config, setup_test_env, to_pbs_config, + }, }; use eyre::Result; use lh_types::beacon_response::ForkVersionDecode; @@ -21,7 +23,6 @@ use tracing::info; #[tokio::test] async fn test_submit_block_v1() -> Result<()> { let res = submit_block_impl( - 3800, BuilderApiVersion::V1, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -46,7 +47,6 @@ async fn test_submit_block_v1() -> Result<()> { #[tokio::test] async fn test_submit_block_v2() -> Result<()> { let res = submit_block_impl( - 3802, BuilderApiVersion::V2, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -67,7 +67,6 @@ async fn test_submit_block_v2() -> Result<()> { #[tokio::test] async fn test_submit_block_v2_without_relay_support() -> Result<()> { let res = submit_block_impl( - 3804, BuilderApiVersion::V2, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -88,7 +87,6 @@ async fn test_submit_block_v2_without_relay_support() -> Result<()> { #[tokio::test] async fn test_submit_block_on_broken_relay() -> Result<()> { let _res = submit_block_impl( - 3806, BuilderApiVersion::V2, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -106,7 +104,6 @@ async fn test_submit_block_on_broken_relay() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_ssz() -> Result<()> { let res = submit_block_impl( - 3808, BuilderApiVersion::V1, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -132,7 +129,6 @@ async fn test_submit_block_v1_ssz() -> Result<()> { #[tokio::test] async fn test_submit_block_v2_ssz() -> Result<()> { let res = submit_block_impl( - 3810, BuilderApiVersion::V2, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -153,7 +149,6 @@ async fn test_submit_block_v2_ssz() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_ssz_into_json() -> Result<()> { let res = submit_block_impl( - 3812, BuilderApiVersion::V1, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Json]), @@ -181,7 +176,6 @@ async fn test_submit_block_v1_ssz_into_json() -> Result<()> { #[tokio::test] async fn test_submit_block_v2_ssz_into_json() -> Result<()> { let res = submit_block_impl( - 3814, BuilderApiVersion::V2, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Json]), @@ -202,7 +196,6 @@ async fn test_submit_block_v2_ssz_into_json() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_multitype_ssz() -> Result<()> { let res = submit_block_impl( - 3816, BuilderApiVersion::V1, HashSet::from([EncodingType::Ssz, EncodingType::Json]), HashSet::from([EncodingType::Ssz]), @@ -230,7 +223,6 @@ async fn test_submit_block_v1_multitype_ssz() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_multitype_json() -> Result<()> { let res = submit_block_impl( - 3818, BuilderApiVersion::V1, HashSet::from([EncodingType::Ssz, EncodingType::Json]), HashSet::from([EncodingType::Json]), @@ -256,7 +248,6 @@ async fn test_submit_block_v1_multitype_json() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_light() -> Result<()> { let res = submit_block_impl( - 3820, BuilderApiVersion::V1, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -281,7 +272,6 @@ async fn test_submit_block_v1_light() -> Result<()> { #[tokio::test] async fn test_submit_block_v2_light() -> Result<()> { let res = submit_block_impl( - 3822, BuilderApiVersion::V2, HashSet::from([EncodingType::Json]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -300,7 +290,6 @@ async fn test_submit_block_v2_light() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_ssz_light() -> Result<()> { let res = submit_block_impl( - 3824, BuilderApiVersion::V1, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -326,7 +315,6 @@ async fn test_submit_block_v1_ssz_light() -> Result<()> { #[tokio::test] async fn test_submit_block_v2_ssz_light() -> Result<()> { let res = submit_block_impl( - 3826, BuilderApiVersion::V2, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Ssz, EncodingType::Json]), @@ -347,7 +335,6 @@ async fn test_submit_block_v2_ssz_light() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_ssz_into_json_light() -> Result<()> { submit_block_impl( - 3828, BuilderApiVersion::V1, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Json]), @@ -367,7 +354,6 @@ async fn test_submit_block_v1_ssz_into_json_light() -> Result<()> { #[tokio::test] async fn test_submit_block_v2_ssz_into_json_light() -> Result<()> { let res = submit_block_impl( - 3830, BuilderApiVersion::V2, HashSet::from([EncodingType::Ssz]), HashSet::from([EncodingType::Json]), @@ -388,7 +374,6 @@ async fn test_submit_block_v2_ssz_into_json_light() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_multitype_ssz_light() -> Result<()> { let res = submit_block_impl( - 3832, BuilderApiVersion::V1, HashSet::from([EncodingType::Ssz, EncodingType::Json]), HashSet::from([EncodingType::Ssz]), @@ -416,7 +401,6 @@ async fn test_submit_block_v1_multitype_ssz_light() -> Result<()> { #[tokio::test] async fn test_submit_block_v1_multitype_json_light() -> Result<()> { let res = submit_block_impl( - 3834, BuilderApiVersion::V1, HashSet::from([EncodingType::Ssz, EncodingType::Json]), HashSet::from([EncodingType::Json]), @@ -445,15 +429,18 @@ async fn test_submit_block_too_large() -> Result<()> { let pubkey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 3836; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); - let relays = vec![generate_mock_relay(pbs_port + 1, pubkey)?]; + let relays = vec![generate_mock_relay(relay_port, pubkey)?]; let mock_state = Arc::new(MockRelayState::new(chain, signer).with_large_body()); - tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -477,7 +464,6 @@ async fn test_submit_block_too_large() -> Result<()> { #[allow(clippy::too_many_arguments)] async fn submit_block_impl( - pbs_port: u16, api_version: BuilderApiVersion, accept_types: HashSet, relay_types: HashSet, @@ -493,7 +479,10 @@ async fn submit_block_impl( let signer = random_secret(); let pubkey = signer.public_key(); let chain = Chain::Holesky; - let relay_port = pbs_port + 1; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); // Run a mock relay let mock_relay = generate_mock_relay(relay_port, pubkey)?; @@ -506,14 +495,14 @@ async fn submit_block_impl( mock_relay_state = mock_relay_state.with_not_found_for_submit_block(); } let mock_state = Arc::new(mock_relay_state); - tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); // Run the PBS service let mut pbs_config = get_pbs_config(pbs_port); pbs_config.block_validation_mode = mode; let config = to_pbs_config(chain, pbs_config, vec![mock_relay]); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; diff --git a/tests/tests/pbs_post_validators.rs b/tests/tests/pbs_post_validators.rs index 12601cda..3f4ebf76 100644 --- a/tests/tests/pbs_post_validators.rs +++ b/tests/tests/pbs_post_validators.rs @@ -5,11 +5,13 @@ use cb_common::{ signer::random_secret, types::{BlsPublicKey, Chain}, }; -use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; +use cb_pbs::{PbsService, PbsState}; use cb_tests::{ - mock_relay::{MockRelayState, start_mock_relay_service}, + mock_relay::{MockRelayState, start_mock_relay_service_with_listener}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, + utils::{ + generate_mock_relay, get_free_listener, get_pbs_config, setup_test_env, to_pbs_config, + }, }; use eyre::Result; use reqwest::StatusCode; @@ -22,17 +24,20 @@ async fn test_register_validators() -> Result<()> { let pubkey: BlsPublicKey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 4000; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); // Run a mock relay - let relays = vec![generate_mock_relay(pbs_port + 1, pubkey)?]; + let relays = vec![generate_mock_relay(relay_port, pubkey)?]; let mock_state = Arc::new(MockRelayState::new(chain, signer)); - tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); // Run the PBS service let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); + tokio::spawn(PbsService::run_with_listener(state, pbs_listener)); // leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -68,20 +73,23 @@ async fn test_register_validators_does_not_retry_on_429() -> Result<()> { let pubkey: BlsPublicKey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 4200; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); // Set up mock relay state and override response to 429 let mock_state = Arc::new(MockRelayState::new(chain, signer)); mock_state.set_response_override(StatusCode::TOO_MANY_REQUESTS); // Run a mock relay - let relays = vec![generate_mock_relay(pbs_port + 1, pubkey)?]; - tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); + let relays = vec![generate_mock_relay(relay_port, pubkey)?]; + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); // Run the PBS service let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state.clone())); + tokio::spawn(PbsService::run_with_listener(state.clone(), pbs_listener)); // Leave some time to start servers tokio::time::sleep(Duration::from_millis(100)).await; @@ -121,14 +129,17 @@ async fn test_register_validators_retries_on_500() -> Result<()> { let pubkey: BlsPublicKey = signer.public_key(); let chain = Chain::Holesky; - let pbs_port = 4300; + let pbs_listener = get_free_listener().await; + let relay_listener = get_free_listener().await; + let pbs_port = pbs_listener.local_addr().unwrap().port(); + let relay_port = relay_listener.local_addr().unwrap().port(); // Set up internal mock relay with 500 response override let mock_state = Arc::new(MockRelayState::new(chain, signer)); mock_state.set_response_override(StatusCode::INTERNAL_SERVER_ERROR); // 500 - let relays = vec![generate_mock_relay(pbs_port + 1, pubkey)?]; - tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); + let relays = vec![generate_mock_relay(relay_port, pubkey)?]; + tokio::spawn(start_mock_relay_service_with_listener(mock_state.clone(), relay_listener)); // Set retry limit to 3 let mut pbs_config = get_pbs_config(pbs_port); @@ -136,7 +147,7 @@ async fn test_register_validators_retries_on_500() -> Result<()> { let config = to_pbs_config(chain, pbs_config, relays); let state = PbsState::new(config, PathBuf::new()); - tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state.clone())); + tokio::spawn(PbsService::run_with_listener(state.clone(), pbs_listener)); tokio::time::sleep(Duration::from_millis(100)).await;