From c68125d48d8e546bf2dce27ca311da6896b47b13 Mon Sep 17 00:00:00 2001 From: eltitanb Date: Wed, 2 Apr 2025 15:03:57 +0100 Subject: [PATCH 01/67] bump version --- crates/signer/src/proto/v1.rs | 205 +++++++++++++++++++++++----------- 1 file changed, 141 insertions(+), 64 deletions(-) diff --git a/crates/signer/src/proto/v1.rs b/crates/signer/src/proto/v1.rs index ba8012c3..36984aa0 100644 --- a/crates/signer/src/proto/v1.rs +++ b/crates/signer/src/proto/v1.rs @@ -24,8 +24,7 @@ impl ResponseState { /// String value of the enum field names used in the ProtoBuf definition. /// /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic - /// use. + /// (if the ProtoBuf definition does not change) and safe for programmatic use. pub fn as_str_name(&self) -> &'static str { match self { Self::Unknown => "UNKNOWN", @@ -90,9 +89,10 @@ pub mod lister_client { dead_code, missing_docs, clippy::wildcard_imports, - clippy::let_unit_value + clippy::let_unit_value, )] - use tonic::codegen::{http::Uri, *}; + use tonic::codegen::*; + use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct ListerClient { inner: tonic::client::Grpc, @@ -136,15 +136,16 @@ pub mod lister_client { >::ResponseBody, >, >, - >>::Error: - Into + std::marker::Send + std::marker::Sync, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, { ListerClient::new(InterceptedService::new(inner, interceptor)) } /// Compress requests with the given encoding. /// - /// This requires the server to support it otherwise it might respond - /// with an error. + /// This requires the server to support it otherwise it might respond with an + /// error. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.send_compressed(encoding); @@ -175,11 +176,18 @@ pub mod lister_client { pub async fn list_accounts( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> - { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/v1.Lister/ListAccounts"); let mut req = request.into_request(); @@ -239,9 +247,10 @@ pub mod account_manager_client { dead_code, missing_docs, clippy::wildcard_imports, - clippy::let_unit_value + clippy::let_unit_value, )] - use tonic::codegen::{http::Uri, *}; + use tonic::codegen::*; + use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct AccountManagerClient { inner: tonic::client::Grpc, @@ -285,15 +294,16 @@ pub mod account_manager_client { >::ResponseBody, >, >, - >>::Error: - Into + std::marker::Send + std::marker::Sync, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, { AccountManagerClient::new(InterceptedService::new(inner, interceptor)) } /// Compress requests with the given encoding. /// - /// This requires the server to support it otherwise it might respond - /// with an error. + /// This requires the server to support it otherwise it might respond with an + /// error. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.send_compressed(encoding); @@ -324,11 +334,18 @@ pub mod account_manager_client { pub async fn unlock( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> - { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/v1.AccountManager/Unlock"); let mut req = request.into_request(); @@ -338,11 +355,18 @@ pub mod account_manager_client { pub async fn lock( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> - { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/v1.AccountManager/Lock"); let mut req = request.into_request(); @@ -352,14 +376,25 @@ pub mod account_manager_client { pub async fn generate( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/v1.AccountManager/Generate"); + let path = http::uri::PathAndQuery::from_static( + "/v1.AccountManager/Generate", + ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("v1.AccountManager", "Generate")); + req.extensions_mut() + .insert(GrpcMethod::new("v1.AccountManager", "Generate")); self.inner.unary(req, path, codec).await } } @@ -486,9 +521,10 @@ pub mod signer_client { dead_code, missing_docs, clippy::wildcard_imports, - clippy::let_unit_value + clippy::let_unit_value, )] - use tonic::codegen::{http::Uri, *}; + use tonic::codegen::*; + use tonic::codegen::http::Uri; #[derive(Debug, Clone)] pub struct SignerClient { inner: tonic::client::Grpc, @@ -532,15 +568,16 @@ pub mod signer_client { >::ResponseBody, >, >, - >>::Error: - Into + std::marker::Send + std::marker::Sync, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, { SignerClient::new(InterceptedService::new(inner, interceptor)) } /// Compress requests with the given encoding. /// - /// This requires the server to support it otherwise it might respond - /// with an error. + /// This requires the server to support it otherwise it might respond with an + /// error. #[must_use] pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { self.inner = self.inner.send_compressed(encoding); @@ -572,9 +609,14 @@ pub mod signer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/v1.Signer/Sign"); let mut req = request.into_request(); @@ -584,10 +626,18 @@ pub mod signer_client { pub async fn multisign( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); let path = http::uri::PathAndQuery::from_static("/v1.Signer/Multisign"); let mut req = request.into_request(); @@ -598,39 +648,66 @@ pub mod signer_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/v1.Signer/SignBeaconAttestation"); + let path = http::uri::PathAndQuery::from_static( + "/v1.Signer/SignBeaconAttestation", + ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("v1.Signer", "SignBeaconAttestation")); + req.extensions_mut() + .insert(GrpcMethod::new("v1.Signer", "SignBeaconAttestation")); self.inner.unary(req, path, codec).await } pub async fn sign_beacon_attestations( &mut self, request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/v1.Signer/SignBeaconAttestations"); + let path = http::uri::PathAndQuery::from_static( + "/v1.Signer/SignBeaconAttestations", + ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("v1.Signer", "SignBeaconAttestations")); + req.extensions_mut() + .insert(GrpcMethod::new("v1.Signer", "SignBeaconAttestations")); self.inner.unary(req, path, codec).await } pub async fn sign_beacon_proposal( &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { - self.inner.ready().await.map_err(|e| { - tonic::Status::unknown(format!("Service was not ready: {}", e.into())) - })?; + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; let codec = tonic::codec::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/v1.Signer/SignBeaconProposal"); + let path = http::uri::PathAndQuery::from_static( + "/v1.Signer/SignBeaconProposal", + ); let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("v1.Signer", "SignBeaconProposal")); + req.extensions_mut() + .insert(GrpcMethod::new("v1.Signer", "SignBeaconProposal")); self.inner.unary(req, path, codec).await } } From d9979a239eaf6fc0365a30c39cf81264d34d9edd Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 5 May 2025 17:13:43 -0400 Subject: [PATCH 02/67] Successful cross-compilation, but runtime has memory allocation issues --- provisioning/signer.Dockerfile | 70 +++++++++++++++++++++++++++++----- 1 file changed, 60 insertions(+), 10 deletions(-) diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 85c2be43..bc258b47 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -1,22 +1,72 @@ -FROM lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +# This will be the main build image +FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +ARG TARGETOS TARGETARCH BUILDPLATFORM WORKDIR /app -FROM chef AS planner +# Planner stage +FROM --platform=${BUILDPLATFORM} chef AS planner +ARG TARGETOS TARGETARCH BUILDPLATFORM COPY . . RUN cargo chef prepare --recipe-path recipe.json -FROM chef AS builder +# Builder stage +FROM --platform=${BUILDPLATFORM} chef AS builder +ARG TARGETOS TARGETARCH BUILDPLATFORM COPY --from=planner /app/recipe.json recipe.json - -RUN cargo chef cook --release --recipe-path recipe.json - -RUN apt-get update && apt-get install -y protobuf-compiler - COPY . . -RUN cargo build --release --bin commit-boost-signer +# Get the latest Protoc since the one in the Debian repo is incredibly old +RUN apt update && apt install -y unzip curl ca-certificates && \ + PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + PROTOC_ARCH=x86_64; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ + PROTOC_ARCH=aarch_64; \ + else \ + echo "${BUILDPLATFORM} is not supported."; \ + exit 1; \ + fi && \ + curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ + unzip -q protoc.zip bin/protoc -d /usr && \ + unzip -q protoc.zip "include/google/*" -d /usr && \ + chmod a+x /usr/bin/protoc && \ + rm -rf protoc.zip + +# Build the application +RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ + # We're on x64, cross-compiling for arm64 - get OpenSSL and zlib for arm64, and set up the GCC vars + dpkg --add-architecture arm64 && \ + apt update && \ + apt install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 && \ + rustup target add aarch64-unknown-linux-gnu && \ + TARGET="aarch64-unknown-linux-gnu" && \ + TARGET_FLAG="--target=${TARGET}" && \ + export PKG_CONFIG_ALLOW_CROSS="true" && \ + export PKG_CONFIG_PATH="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ + export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/aarch64-linux-gnu-ld" && \ + export RUSTFLAGS="-L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))"; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ + # We're on arm64, cross-compiling for x64 - get OpenSSL and zlib for x64, and set up the GCC vars + dpkg --add-architecture amd64 && \ + apt update && \ + apt install -y gcc-x86-64-linux-gnu libssl-dev:amd64 zlib1g-dev:amd64 && \ + rustup target add x86_64-unknown-linux-gnu && \ + TARGET="x86_64-unknown-linux-gnu" && \ + TARGET_FLAG="--target=${TARGET}" && \ + export PKG_CONFIG_ALLOW_CROSS="true" && \ + export PKG_CONFIG_PATH="/usr/lib/x86_64-linux-gnu/pkgconfig"; \ + export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/x86_64-linux-gnu-ld"; \ + export RUSTFLAGS="-L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))"; \ + fi && \ + # Build the signer - general setup that works with or without cross-compilation + cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json && \ + cargo build ${TARGET_FLAG} --release --bin commit-boost-signer && \ + if [ ! -z "$TARGET" ]; then \ + # If we're cross-compiling, we need to move the binary out of the target dir + mv target/${TARGET}/release/commit-boost-signer target/release/commit-boost-signer; \ + fi -FROM debian:bookworm-20240904-slim AS runtime +FROM debian:bookworm-slim AS runtime WORKDIR /app RUN apt-get update && apt-get install -y \ From 97ef653d602dbf9397de54abdc48ba21f063eb9e Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 6 May 2025 04:09:49 -0400 Subject: [PATCH 03/67] Working with OpenSSL static-linked --- Cargo.lock | 12 ++++++++++++ Cargo.toml | 3 +++ crates/common/Cargo.toml | 4 ++++ crates/common/build.rs | 8 ++++++++ provisioning/signer.Dockerfile | 20 +++++++++++--------- 5 files changed, 38 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5ebc811a..436d3b65 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1494,6 +1494,8 @@ dependencies = [ "ethereum_ssz_derive", "eyre", "jsonwebtoken", + "k256", + "openssl", "pbkdf2 0.12.2", "rand 0.9.0", "reqwest", @@ -3550,6 +3552,15 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-src" +version = "300.5.0+3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8ce546f549326b0e6052b649198487d91320875da901e7bd11a06d1ee3f9c2f" +dependencies = [ + "cc", +] + [[package]] name = "openssl-sys" version = "0.9.106" @@ -3558,6 +3569,7 @@ checksum = "8bb61ea9811cc39e3c2069f40b8b8e2e70d8569b361f879786cc7ed48b777cdd" dependencies = [ "cc", "libc", + "openssl-src", "pkg-config", "vcpkg", ] diff --git a/Cargo.toml b/Cargo.toml index aef26a94..14cddf82 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,6 +7,9 @@ edition = "2021" rust-version = "1.83" version = "0.7.0-rc.2" +[workspace.features] +openssl-vendored = ["crates/common/openssl-vendored"] + [workspace.dependencies] aes = "0.8" alloy = { version = "0.12", features = [ diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index df78b046..15c0b8d1 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -41,3 +41,7 @@ tree_hash_derive.workspace = true unicode-normalization.workspace = true url.workspace = true jsonwebtoken.workspace = true +openssl = { version = "0.10", optional = true, features = ["vendored"] } + +[features] +openssl-vendored = ["openssl/vendored"] diff --git a/crates/common/build.rs b/crates/common/build.rs index 9bd10ecb..c24a54cb 100644 --- a/crates/common/build.rs +++ b/crates/common/build.rs @@ -1,6 +1,14 @@ use std::process::Command; fn main() { + let target = std::env::var("TARGET").unwrap(); + let host = std::env::var("HOST").unwrap(); + + if target != host { + println!("cargo:warning=Skipping build script because TARGET != HOST"); + return; + } + let output = Command::new("git").args(["rev-parse", "HEAD"]).output().unwrap(); let git_hash = String::from_utf8(output.stdout).unwrap(); println!("cargo:rustc-env=GIT_HASH={git_hash}"); diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index bc258b47..523a2ff4 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -35,16 +35,17 @@ RUN apt update && apt install -y unzip curl ca-certificates && \ # Build the application RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ # We're on x64, cross-compiling for arm64 - get OpenSSL and zlib for arm64, and set up the GCC vars - dpkg --add-architecture arm64 && \ + rustup target add aarch64-unknown-linux-gnu && \ + #dpkg --add-architecture arm64 && \ apt update && \ apt install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 && \ - rustup target add aarch64-unknown-linux-gnu && \ TARGET="aarch64-unknown-linux-gnu" && \ TARGET_FLAG="--target=${TARGET}" && \ - export PKG_CONFIG_ALLOW_CROSS="true" && \ - export PKG_CONFIG_PATH="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ - export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/aarch64-linux-gnu-ld" && \ - export RUSTFLAGS="-L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))"; \ + # export PKG_CONFIG_ALLOW_CROSS="true" && \ + # export PKG_CONFIG_LIBDIR="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ + export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/aarch64-linux-gnu-gcc" && \ + export RUSTFLAGS="-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))" && \ + FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ # We're on arm64, cross-compiling for x64 - get OpenSSL and zlib for x64, and set up the GCC vars dpkg --add-architecture amd64 && \ @@ -55,12 +56,13 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ TARGET_FLAG="--target=${TARGET}" && \ export PKG_CONFIG_ALLOW_CROSS="true" && \ export PKG_CONFIG_PATH="/usr/lib/x86_64-linux-gnu/pkgconfig"; \ - export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/x86_64-linux-gnu-ld"; \ + export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/x86_64-linux-gnu-gcc"; \ export RUSTFLAGS="-L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))"; \ fi && \ # Build the signer - general setup that works with or without cross-compilation - cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json && \ - cargo build ${TARGET_FLAG} --release --bin commit-boost-signer && \ + # cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo build ${TARGET_FLAG} --release --bin commit-boost-signer ${FEATURE_OPENSSL_VENDORED} && \ if [ ! -z "$TARGET" ]; then \ # If we're cross-compiling, we need to move the binary out of the target dir mv target/${TARGET}/release/commit-boost-signer target/release/commit-boost-signer; \ From 91eefe2de57a28c6ddbda38666046cbc711f93d6 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 6 May 2025 04:43:02 -0400 Subject: [PATCH 04/67] Got dynamic linking working, added a feature flag to toggle dynamic vs. static --- provisioning/signer.Dockerfile | 49 ++++++++++++++++++++++------------ 1 file changed, 32 insertions(+), 17 deletions(-) diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 523a2ff4..3c29075d 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -1,17 +1,17 @@ # This will be the main build image FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef -ARG TARGETOS TARGETARCH BUILDPLATFORM +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED WORKDIR /app # Planner stage FROM --platform=${BUILDPLATFORM} chef AS planner -ARG TARGETOS TARGETARCH BUILDPLATFORM +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED COPY . . RUN cargo chef prepare --recipe-path recipe.json # Builder stage FROM --platform=${BUILDPLATFORM} chef AS builder -ARG TARGETOS TARGETARCH BUILDPLATFORM +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED COPY --from=planner /app/recipe.json recipe.json COPY . . @@ -34,30 +34,45 @@ RUN apt update && apt install -y unzip curl ca-certificates && \ # Build the application RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ - # We're on x64, cross-compiling for arm64 - get OpenSSL and zlib for arm64, and set up the GCC vars + # We're on x64, cross-compiling for arm64 rustup target add aarch64-unknown-linux-gnu && \ - #dpkg --add-architecture arm64 && \ apt update && \ - apt install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 && \ + apt install -y gcc-aarch64-linux-gnu && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture arm64 && \ + apt update && \ + apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ + export PKG_CONFIG_ALLOW_CROSS="true" && \ + export PKG_CONFIG_LIBDIR="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ + export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu && \ + export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu && \ + FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ + fi && \ TARGET="aarch64-unknown-linux-gnu" && \ TARGET_FLAG="--target=${TARGET}" && \ - # export PKG_CONFIG_ALLOW_CROSS="true" && \ - # export PKG_CONFIG_LIBDIR="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/aarch64-linux-gnu-gcc" && \ - export RUSTFLAGS="-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))" && \ - FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ + export RUSTFLAGS="-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))"; \ elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ - # We're on arm64, cross-compiling for x64 - get OpenSSL and zlib for x64, and set up the GCC vars - dpkg --add-architecture amd64 && \ - apt update && \ - apt install -y gcc-x86-64-linux-gnu libssl-dev:amd64 zlib1g-dev:amd64 && \ + # We're on arm64, cross-compiling for x64 rustup target add x86_64-unknown-linux-gnu && \ + apt update && \ + apt install -y gcc-x86-64-linux-gnu && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture amd64 && \ + apt update && \ + apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ + export PKG_CONFIG_ALLOW_CROSS="true" && \ + export PKG_CONFIG_LIBDIR="/usr/lib/x86_64-linux-gnu/pkgconfig" && \ + export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu && \ + export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu && \ + FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ + fi && \ TARGET="x86_64-unknown-linux-gnu" && \ TARGET_FLAG="--target=${TARGET}" && \ - export PKG_CONFIG_ALLOW_CROSS="true" && \ - export PKG_CONFIG_PATH="/usr/lib/x86_64-linux-gnu/pkgconfig"; \ export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/x86_64-linux-gnu-gcc"; \ - export RUSTFLAGS="-L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))"; \ + export RUSTFLAGS="-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))"; \ fi && \ # Build the signer - general setup that works with or without cross-compilation # cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json && \ From de09415b8fd994f1b74ed772787aabfd4ac52234 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 6 May 2025 13:13:55 -0400 Subject: [PATCH 05/67] Fixed the vendored build arg --- provisioning/signer.Dockerfile | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 3c29075d..984ba9b4 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -46,7 +46,8 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ export PKG_CONFIG_ALLOW_CROSS="true" && \ export PKG_CONFIG_LIBDIR="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu && \ - export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu && \ + export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu; \ + else \ FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ fi && \ TARGET="aarch64-unknown-linux-gnu" && \ @@ -66,7 +67,8 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ export PKG_CONFIG_ALLOW_CROSS="true" && \ export PKG_CONFIG_LIBDIR="/usr/lib/x86_64-linux-gnu/pkgconfig" && \ export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu && \ - export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu && \ + export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu; \ + else \ FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ fi && \ TARGET="x86_64-unknown-linux-gnu" && \ From 3aee63d1a00c70fce4e86a1a1600f134a2437b41 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 6 May 2025 15:35:58 -0400 Subject: [PATCH 06/67] Reintroduced the cargo chef setup --- provisioning/signer.Dockerfile | 104 ++++++++++++++++++++------------- 1 file changed, 63 insertions(+), 41 deletions(-) diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 984ba9b4..6de707f0 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -12,72 +12,94 @@ RUN cargo chef prepare --recipe-path recipe.json # Builder stage FROM --platform=${BUILDPLATFORM} chef AS builder ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED +ENV BUILD_VAR_SCRIPT=/tmp/env.sh COPY --from=planner /app/recipe.json recipe.json -COPY . . - -# Get the latest Protoc since the one in the Debian repo is incredibly old -RUN apt update && apt install -y unzip curl ca-certificates && \ - PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ - if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ - PROTOC_ARCH=x86_64; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ - PROTOC_ARCH=aarch_64; \ - else \ - echo "${BUILDPLATFORM} is not supported."; \ - exit 1; \ - fi && \ - curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ - unzip -q protoc.zip bin/protoc -d /usr && \ - unzip -q protoc.zip "include/google/*" -d /usr && \ - chmod a+x /usr/bin/protoc && \ - rm -rf protoc.zip -# Build the application +# Set up the build environment for cross-compilation if needed RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ # We're on x64, cross-compiling for arm64 rustup target add aarch64-unknown-linux-gnu && \ apt update && \ apt install -y gcc-aarch64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ if [ "$OPENSSL_VENDORED" != "true" ]; then \ # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation dpkg --add-architecture arm64 && \ apt update && \ apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ - export PKG_CONFIG_ALLOW_CROSS="true" && \ - export PKG_CONFIG_LIBDIR="/usr/lib/aarch64-linux-gnu/pkgconfig" && \ - export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu && \ - export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu; \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ else \ - FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ - fi && \ - TARGET="aarch64-unknown-linux-gnu" && \ - TARGET_FLAG="--target=${TARGET}" && \ - export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/aarch64-linux-gnu-gcc" && \ - export RUSTFLAGS="-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))"; \ + echo "export FEATURE_OPENSSL_VENDORED='--features openssl-vendored'" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ # We're on arm64, cross-compiling for x64 rustup target add x86_64-unknown-linux-gnu && \ apt update && \ apt install -y gcc-x86-64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ if [ "$OPENSSL_VENDORED" != "true" ]; then \ # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation dpkg --add-architecture amd64 && \ apt update && \ apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ - export PKG_CONFIG_ALLOW_CROSS="true" && \ - export PKG_CONFIG_LIBDIR="/usr/lib/x86_64-linux-gnu/pkgconfig" && \ - export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu && \ - export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu; \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ else \ - FEATURE_OPENSSL_VENDORED="--features openssl-vendored"; \ - fi && \ - TARGET="x86_64-unknown-linux-gnu" && \ - TARGET_FLAG="--target=${TARGET}" && \ - export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER="/usr/bin/x86_64-linux-gnu-gcc"; \ - export RUSTFLAGS="-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))"; \ + echo "export FEATURE_OPENSSL_VENDORED='--features openssl-vendored'" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ + fi + +# Run cook to prep the build +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + source ${BUILD_VAR_SCRIPT}; \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ + fi && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} + +# Now we can copy the source files +COPY . . + +# Get the latest Protoc since the one in the Debian repo is incredibly old +RUN apt update && apt install -y unzip curl ca-certificates && \ + PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + PROTOC_ARCH=x86_64; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ + PROTOC_ARCH=aarch_64; \ + else \ + echo "${BUILDPLATFORM} is not supported."; \ + exit 1; \ + fi && \ + curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ + unzip -q protoc.zip bin/protoc -d /usr && \ + unzip -q protoc.zip "include/google/*" -d /usr && \ + chmod a+x /usr/bin/protoc && \ + rm -rf protoc.zip + +# Build the application +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + chmod +x ${BUILD_VAR_SCRIPT} && \ + . ${BUILD_VAR_SCRIPT}; \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ fi && \ - # Build the signer - general setup that works with or without cross-compilation - # cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json && \ export GIT_HASH=$(git rev-parse HEAD) && \ cargo build ${TARGET_FLAG} --release --bin commit-boost-signer ${FEATURE_OPENSSL_VENDORED} && \ if [ ! -z "$TARGET" ]; then \ From c07c71784ee4c557f8fb778f9db2ef0b328624ae Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 6 May 2025 16:36:57 -0400 Subject: [PATCH 07/67] Ported the cross-compilation stuff into PBS --- provisioning/pbs.Dockerfile | 112 ++++++++++++++++++++++++++++++--- provisioning/signer.Dockerfile | 22 ++++--- 2 files changed, 120 insertions(+), 14 deletions(-) diff --git a/provisioning/pbs.Dockerfile b/provisioning/pbs.Dockerfile index 200c95d2..cac14de0 100644 --- a/provisioning/pbs.Dockerfile +++ b/provisioning/pbs.Dockerfile @@ -1,22 +1,120 @@ -FROM lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +# This will be the main build image +FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED WORKDIR /app -FROM chef AS planner +FROM --platform=${BUILDPLATFORM} chef AS planner +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED COPY . . RUN cargo chef prepare --recipe-path recipe.json -FROM chef AS builder +FROM --platform=${BUILDPLATFORM} chef AS builder +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED +ENV BUILD_VAR_SCRIPT=/tmp/env.sh COPY --from=planner /app/recipe.json recipe.json -RUN cargo chef cook --release --recipe-path recipe.json +# Set up the build environment for cross-compilation if needed +RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ + # We're on x64, cross-compiling for arm64 + rustup target add aarch64-unknown-linux-gnu && \ + apt update && \ + apt install -y gcc-aarch64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture arm64 && \ + apt update && \ + apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ + # We're on arm64, cross-compiling for x64 + rustup target add x86_64-unknown-linux-gnu && \ + apt update && \ + apt install -y gcc-x86-64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture amd64 && \ + apt update && \ + apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ + fi -RUN apt-get update && apt-get install -y protobuf-compiler +# Run cook to prep the build +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + . ${BUILD_VAR_SCRIPT} && \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ + fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} +# Now we can copy the source files - chef cook wants to run before this step COPY . . -RUN cargo build --release --bin commit-boost-pbs +# Get the latest Protoc since the one in the Debian repo is incredibly old +RUN apt update && apt install -y unzip curl ca-certificates && \ + PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + PROTOC_ARCH=x86_64; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ + PROTOC_ARCH=aarch_64; \ + else \ + echo "${BUILDPLATFORM} is not supported."; \ + exit 1; \ + fi && \ + curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ + unzip -q protoc.zip bin/protoc -d /usr && \ + unzip -q protoc.zip "include/google/*" -d /usr && \ + chmod a+x /usr/bin/protoc && \ + rm -rf protoc.zip -FROM debian:bookworm-20240904-slim AS runtime +# Build the application +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + chmod +x ${BUILD_VAR_SCRIPT} && \ + . ${BUILD_VAR_SCRIPT} && \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ + fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo build ${TARGET_FLAG} --release --bin commit-boost-pbs ${FEATURE_OPENSSL_VENDORED} && \ + if [ ! -z "$TARGET" ]; then \ + # If we're cross-compiling, we need to move the binary out of the target dir + mv target/${TARGET}/release/commit-boost-pbs target/release/commit-boost-pbs; \ + fi + +# Assemble the runner image +FROM debian:bookworm-slim AS runtime WORKDIR /app RUN apt-get update && apt-get install -y \ diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 6de707f0..354afee0 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -35,8 +35,6 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - else \ - echo "export FEATURE_OPENSSL_VENDORED='--features openssl-vendored'" >> ${BUILD_VAR_SCRIPT}; \ fi; \ elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ # We're on arm64, cross-compiling for x64 @@ -57,22 +55,26 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - else \ - echo "export FEATURE_OPENSSL_VENDORED='--features openssl-vendored'" >> ${BUILD_VAR_SCRIPT}; \ fi; \ fi # Run cook to prep the build RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ - source ${BUILD_VAR_SCRIPT}; \ + . ${BUILD_VAR_SCRIPT} && \ echo "Cross-compilation environment set up for ${TARGET}"; \ else \ echo "No cross-compilation needed"; \ fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ export GIT_HASH=$(git rev-parse HEAD) && \ cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} -# Now we can copy the source files +# Now we can copy the source files - chef cook wants to run before this step COPY . . # Get the latest Protoc since the one in the Debian repo is incredibly old @@ -95,11 +97,17 @@ RUN apt update && apt install -y unzip curl ca-certificates && \ # Build the application RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ chmod +x ${BUILD_VAR_SCRIPT} && \ - . ${BUILD_VAR_SCRIPT}; \ + . ${BUILD_VAR_SCRIPT} && \ echo "Cross-compilation environment set up for ${TARGET}"; \ else \ echo "No cross-compilation needed"; \ fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ export GIT_HASH=$(git rev-parse HEAD) && \ cargo build ${TARGET_FLAG} --release --bin commit-boost-signer ${FEATURE_OPENSSL_VENDORED} && \ if [ ! -z "$TARGET" ]; then \ From 699b7ec9eeb4fe2c5d1398095047b82df81afc26 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 7 May 2025 13:52:08 -0400 Subject: [PATCH 08/67] Split the dockerfiles into separate builder / image definitions --- .gitignore | 3 +- build-linux.sh | 144 +++++++++++++++++++++++++++++++++ provisioning/build.Dockerfile | 120 +++++++++++++++++++++++++++ provisioning/cli.Dockerfile | 0 provisioning/pbs.Dockerfile | 137 +++---------------------------- provisioning/signer.Dockerfile | 6 +- 6 files changed, 277 insertions(+), 133 deletions(-) create mode 100755 build-linux.sh create mode 100644 provisioning/build.Dockerfile create mode 100644 provisioning/cli.Dockerfile diff --git a/.gitignore b/.gitignore index b8eaa77a..e48792b4 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ # will have compiled files and executables debug/ target/ +build/ # These are backup files generated by rustfmt **/*.rs.bk @@ -14,4 +15,4 @@ cb.docker-compose.yml targets.json .idea/ logs -.vscode/ \ No newline at end of file +.vscode/ diff --git a/build-linux.sh b/build-linux.sh new file mode 100755 index 00000000..a7266bd9 --- /dev/null +++ b/build-linux.sh @@ -0,0 +1,144 @@ +#!/bin/bash + +# This script will build the Commit-Boost applications and modules for local Linux development. + +# ================= +# === Functions === +# ================= + +# Print a failure message to stderr and exit +fail() { + MESSAGE=$1 + RED='\033[0;31m' + RESET='\033[;0m' + >&2 echo -e "\n${RED}**ERROR**\n$MESSAGE${RESET}\n" + exit 1 +} + + +# Builds the CLI binaries for Linux +# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static +build_cli() { + echo "Building CLI binaries..." + docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-cli . || fail "Error building CLI." + echo "done!" + + # Flatten the folder structure for easier referencing + mv build/$VERSION/linux_amd64/commit-boost-cli build/$VERSION/commit-boost-cli-linux-amd64 + mv build/$VERSION/linux_arm64/commit-boost-cli build/$VERSION/commit-boost-cli-linux-arm64 + + # Clean up the empty directories + rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 + echo "done!" +} + + +# Builds the PBS module binaries for Linux and the Docker image(s) +# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static +build_pbs() { + echo "Building PBS binaries..." + docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-pbs . || fail "Error building PBS binaries." + echo "done!" + + # Flatten the folder structure for easier referencing + mv build/$VERSION/linux_amd64/commit-boost-pbs build/$VERSION/commit-boost-pbs-linux-amd64 + mv build/$VERSION/linux_arm64/commit-boost-pbs build/$VERSION/commit-boost-pbs-linux-arm64 + + # Clean up the empty directories + rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 + + echo "Building PBS Docker image..." + # If uploading, make and push a manifest + if [ "$LOCAL_UPLOAD" = true ]; then + if [ -z "$LOCAL_DOCKER_REGISTRY" ]; then + fail "LOCAL_DOCKER_REGISTRY must be set to upload to a local registry." + fi + docker buildx build --rm --platform=linux/amd64,linux/arm64 --build-arg BINARIES_PATH=build/$VERSION -t $LOCAL_DOCKER_REGISTRY/commit-boost/pbs:$VERSION -f provisioning/pbs.Dockerfile --push . || fail "Error building PBS image." + else + docker buildx build --rm --load --build-arg BINARIES_PATH=build/$VERSION -t commit-boost/pbs:$VERSION -f provisioning/pbs.Dockerfile . || fail "Error building PBS image." + fi + echo "done!" +} + + +# Builds the Signer module binaries for Linux and the Docker image(s) +# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static +build_signer() { + echo "Building Signer binaries..." + docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-signer . || fail "Error building Signer binaries." + echo "done!" + + # Flatten the folder structure for easier referencing + mv build/$VERSION/linux_amd64/commit-boost-signer build/$VERSION/commit-boost-signer-linux-amd64 + mv build/$VERSION/linux_arm64/commit-boost-signer build/$VERSION/commit-boost-signer-linux-arm64 + + # Clean up the empty directories + rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 + + echo "Building Signer Docker image..." + # If uploading, make and push a manifest + if [ "$LOCAL_UPLOAD" = true ]; then + if [ -z "$LOCAL_DOCKER_REGISTRY" ]; then + fail "LOCAL_DOCKER_REGISTRY must be set to upload to a local registry." + fi + docker buildx build --rm --platform=linux/amd64,linux/arm64 --build-arg BINARIES_PATH=build/$VERSION -t $LOCAL_DOCKER_REGISTRY/commit-boost/signer:$VERSION -f provisioning/signer.Dockerfile --push . || fail "Error building Signer image." + else + docker buildx build --rm --load --build-arg BINARIES_PATH=build/$VERSION -t commit-boost/signer:$VERSION -f provisioning/signer.Dockerfile . || fail "Error building Signer image." + fi + echo "done!" +} + + +# Print usage +usage() { + echo "Usage: build.sh [options] -v " + echo "This script assumes it is in the commit-boost-client repository directory." + echo "Options:" + echo $'\t-a\tBuild all of the artifacts (CLI, PBS, and Signer, along with Docker images)' + echo $'\t-c\tBuild the Commit-Boost CLI binaries' + echo $'\t-p\tBuild the PBS module binary and its Docker container' + echo $'\t-s\tBuild the Signer module binary and its Docker container' + echo $'\t-o\tWhen passed with a build, upload the resulting image tags to a local Docker registry specified in $LOCAL_DOCKER_REGISTRY' + exit 0 +} + + +# ================= +# === Main Body === +# ================= + +# Parse arguments +while getopts "acpsov:" FLAG; do + case "$FLAG" in + a) CLI=true PBS=true SIGNER=true ;; + c) CLI=true ;; + p) PBS=true ;; + s) SIGNER=true ;; + o) LOCAL_UPLOAD=true ;; + v) VERSION="$OPTARG" ;; + *) usage ;; + esac +done +if [ -z "$VERSION" ]; then + usage +fi + +# Cleanup old artifacts +rm -rf build/$VERSION/* +mkdir -p build/$VERSION + +# Make a multiarch builder, ignore if it's already there +docker buildx create --name multiarch-builder --driver docker-container --use > /dev/null 2>&1 +# NOTE: if using a local repo with a private CA, you will have to follow these steps to add the CA to the builder: +# https://stackoverflow.com/a/73585243 + +# Build the artifacts +if [ "$CLI" = true ]; then + build_cli +fi +if [ "$PBS" = true ]; then + build_pbs +fi +if [ "$SIGNER" = true ]; then + build_signer +fi diff --git a/provisioning/build.Dockerfile b/provisioning/build.Dockerfile new file mode 100644 index 00000000..83679ed5 --- /dev/null +++ b/provisioning/build.Dockerfile @@ -0,0 +1,120 @@ +# This will be the main build image +FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +WORKDIR /app + +FROM --platform=${BUILDPLATFORM} chef AS planner +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +COPY . . +RUN cargo chef prepare --recipe-path recipe.json + +FROM --platform=${BUILDPLATFORM} chef AS builder +ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +RUN test -n "$TARGET_CRATE" || (echo "TARGET_CRATE must be set to the service / binary you want to build" && false) +ENV BUILD_VAR_SCRIPT=/tmp/env.sh +COPY --from=planner /app/recipe.json recipe.json + +# Get the latest Protoc since the one in the Debian repo is incredibly old +RUN apt update && apt install -y unzip curl ca-certificates && \ + PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ + if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ + PROTOC_ARCH=x86_64; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ + PROTOC_ARCH=aarch_64; \ + else \ + echo "${BUILDPLATFORM} is not supported."; \ + exit 1; \ + fi && \ + curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ + unzip -q protoc.zip bin/protoc -d /usr && \ + unzip -q protoc.zip "include/google/*" -d /usr && \ + chmod a+x /usr/bin/protoc && \ + rm -rf protoc.zip + +# Set up the build environment for cross-compilation if needed +RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ + # We're on x64, cross-compiling for arm64 + rustup target add aarch64-unknown-linux-gnu && \ + apt update && \ + apt install -y gcc-aarch64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture arm64 && \ + apt update && \ + apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ + elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ + # We're on arm64, cross-compiling for x64 + rustup target add x86_64-unknown-linux-gnu && \ + apt update && \ + apt install -y gcc-x86-64-linux-gnu && \ + echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ + echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ + if [ "$OPENSSL_VENDORED" != "true" ]; then \ + # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation + dpkg --add-architecture amd64 && \ + apt update && \ + apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ + fi; \ + fi + +# Run cook to prep the build +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + . ${BUILD_VAR_SCRIPT} && \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ + fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} + +# Now we can copy the source files - chef cook wants to run before this step +COPY . . + +# Build the application +RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + chmod +x ${BUILD_VAR_SCRIPT} && \ + . ${BUILD_VAR_SCRIPT} && \ + echo "Cross-compilation environment set up for ${TARGET}"; \ + else \ + echo "No cross-compilation needed"; \ + fi && \ + if [ "$OPENSSL_VENDORED" = "true" ]; then \ + echo "Using vendored OpenSSL" && \ + FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ + else \ + echo "Using system OpenSSL"; \ + fi && \ + export GIT_HASH=$(git rev-parse HEAD) && \ + cargo build ${TARGET_FLAG} --release --bin ${TARGET_CRATE} ${FEATURE_OPENSSL_VENDORED} && \ + if [ ! -z "$TARGET" ]; then \ + # If we're cross-compiling, we need to move the binary out of the target dir + mv target/${TARGET}/release/${TARGET_CRATE} target/release/${TARGET_CRATE}; \ + fi + +# Copy the output +FROM scratch AS output +ARG TARGET_CRATE +COPY --from=builder /app/target/release/${TARGET_CRATE} /${TARGET_CRATE} diff --git a/provisioning/cli.Dockerfile b/provisioning/cli.Dockerfile new file mode 100644 index 00000000..e69de29b diff --git a/provisioning/pbs.Dockerfile b/provisioning/pbs.Dockerfile index cac14de0..9eff5890 100644 --- a/provisioning/pbs.Dockerfile +++ b/provisioning/pbs.Dockerfile @@ -1,138 +1,19 @@ -# This will be the main build image -FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -WORKDIR /app - -FROM --platform=${BUILDPLATFORM} chef AS planner -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -COPY . . -RUN cargo chef prepare --recipe-path recipe.json - -FROM --platform=${BUILDPLATFORM} chef AS builder -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -ENV BUILD_VAR_SCRIPT=/tmp/env.sh -COPY --from=planner /app/recipe.json recipe.json - -# Set up the build environment for cross-compilation if needed -RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ - # We're on x64, cross-compiling for arm64 - rustup target add aarch64-unknown-linux-gnu && \ - apt update && \ - apt install -y gcc-aarch64-linux-gnu && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ - echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture arm64 && \ - apt update && \ - apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ - # We're on arm64, cross-compiling for x64 - rustup target add x86_64-unknown-linux-gnu && \ - apt update && \ - apt install -y gcc-x86-64-linux-gnu && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ - echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture amd64 && \ - apt update && \ - apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ - fi - -# Run cook to prep the build -RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ - . ${BUILD_VAR_SCRIPT} && \ - echo "Cross-compilation environment set up for ${TARGET}"; \ - else \ - echo "No cross-compilation needed"; \ - fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ - export GIT_HASH=$(git rev-parse HEAD) && \ - cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} - -# Now we can copy the source files - chef cook wants to run before this step -COPY . . - -# Get the latest Protoc since the one in the Debian repo is incredibly old -RUN apt update && apt install -y unzip curl ca-certificates && \ - PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ - if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ - PROTOC_ARCH=x86_64; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ - PROTOC_ARCH=aarch_64; \ - else \ - echo "${BUILDPLATFORM} is not supported."; \ - exit 1; \ - fi && \ - curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ - unzip -q protoc.zip bin/protoc -d /usr && \ - unzip -q protoc.zip "include/google/*" -d /usr && \ - chmod a+x /usr/bin/protoc && \ - rm -rf protoc.zip - -# Build the application -RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ - chmod +x ${BUILD_VAR_SCRIPT} && \ - . ${BUILD_VAR_SCRIPT} && \ - echo "Cross-compilation environment set up for ${TARGET}"; \ - else \ - echo "No cross-compilation needed"; \ - fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ - export GIT_HASH=$(git rev-parse HEAD) && \ - cargo build ${TARGET_FLAG} --release --bin commit-boost-pbs ${FEATURE_OPENSSL_VENDORED} && \ - if [ ! -z "$TARGET" ]; then \ - # If we're cross-compiling, we need to move the binary out of the target dir - mv target/${TARGET}/release/commit-boost-pbs target/release/commit-boost-pbs; \ - fi - -# Assemble the runner image -FROM debian:bookworm-slim AS runtime -WORKDIR /app - +FROM debian:bookworm-slim +ARG BINARIES_PATH TARGETOS TARGETARCH +COPY ${BINARIES_PATH}/commit-boost-pbs-${TARGETOS}-${TARGETARCH} /usr/local/bin RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ libssl3 \ libssl-dev \ - curl \ - && apt-get clean autoclean \ - && rm -rf /var/lib/apt/lists/* - -COPY --from=builder /app/target/release/commit-boost-pbs /usr/local/bin + curl && \ + # Cleanup + apt-get clean autoclean && \ + rm -rf /var/lib/apt/lists/* +# Create a non-root user to run the application RUN groupadd -g 10001 commitboost && \ useradd -u 10001 -g commitboost -s /sbin/nologin commitboost USER commitboost -ENTRYPOINT ["/usr/local/bin/commit-boost-pbs"] - - - +ENTRYPOINT ["/usr/local/bin/commit-boost-pbs"] \ No newline at end of file diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 354afee0..6c5ac045 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -115,6 +115,7 @@ RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ mv target/${TARGET}/release/commit-boost-signer target/release/commit-boost-signer; \ fi +# Assemble the runner image FROM debian:bookworm-slim AS runtime WORKDIR /app @@ -133,7 +134,4 @@ RUN groupadd -g 10001 commitboost && \ useradd -u 10001 -g commitboost -s /sbin/nologin commitboost USER commitboost -ENTRYPOINT ["/usr/local/bin/commit-boost-signer"] - - - +ENTRYPOINT ["/usr/local/bin/commit-boost-signer"] \ No newline at end of file From 7165f129ae7a299b69649c7904ef3b30787ee86e Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 7 May 2025 17:48:42 -0400 Subject: [PATCH 09/67] Added a build guide --- docs/docs/get_started/building.md | 185 ++++++++++++++++++++++++++++++ 1 file changed, 185 insertions(+) create mode 100644 docs/docs/get_started/building.md diff --git a/docs/docs/get_started/building.md b/docs/docs/get_started/building.md new file mode 100644 index 00000000..d38b447f --- /dev/null +++ b/docs/docs/get_started/building.md @@ -0,0 +1,185 @@ +# Building Commit-Boost from Source + +Commit-Boost's components are all written in [Rust](https://www.rust-lang.org/). This guide will walk you through the setup required to build them from source. It assumes you are on a Debian or Debian-based system (e.g., Ubuntu, Linux Mint, Pop OS). For other systems, please adapt the steps for your system's package manager accordingly. + + +## Building via the Docker Builder + +For convenience, Commit-Boost has Dockerized the build environment for Linux `x64` and `arm64` platforms. All of the prerequisites, cross-compilation tooling, and configuration are handled by the builder image. If you would like to build the CLI, PBS module, or Signer binaries and Docker images from source, you are welcome to use the Docker builder process. + +To use the builder, you will need to have [Docker Engine](https://docs.docker.com/engine/install/) installed on your system. Please follow the instructions to install it first. + +:::note +The build script assumes that you've added your user account to the `docker` group with the Linux [post-install steps](https://docs.docker.com/engine/install/linux-postinstall/). If you haven't, then you'll need to run the build script below as `root` or modify it so each call to `docker` within it is run as the root user (e.g., with `sudo`). +::: + +We provide a build script called `build-linux.sh` to automate the process: + +``` +$ ./build-linux.sh +Usage: build.sh [options] -v +This script assumes it is in the commit-boost-client repository directory. +Options: + -a Build all of the artifacts (CLI, PBS, and Signer, along with Docker images) + -c Build the Commit-Boost CLI binaries + -p Build the PBS module binary and its Docker container + -s Build the Signer module binary and its Docker container + -o When passed with a build, upload the resulting image tags to a local Docker registry specified in $LOCAL_DOCKER_REGISTRY +``` + +The script utilizes Docker's [buildx](https://docs.docker.com/reference/cli/docker/buildx/) system to both create a multiarch-capable builder and cross-compile for both Linux architectures. You are free to modify it to produce only the artifacts relevant to you if so desired. + +The `version` provided will be used to house the output binaries in `./build/$VERSION`, and act as the version tag for the Docker images when they're added to your local system or uploaded to your local Docker repository. + + +## Building Manually + +If you don't want to use the Docker builder, you can compile the Commit-Boost artifacts locally. The following instructions assume a Debian or Debian-based system (e.g., Ubuntu, Linux Mint, Pop OS) for simplicity. For other systems, please adapt any relevant instructions to your environment accordingly. + + +### Prerequisites + +Requirements: + +- Rust 1.83+ +- GCC (or another C compiler of your choice) +- OpenSSL development libraries +- Protobuf Compiler (`protoc`) + +Start by installing Rust if you don't already have it. Follow [the official directions](https://www.rust-lang.org/learn/get-started) to install it and bring it up to date. + +Install the dependencies: + +```bash +sudo apt update && sudo apt install -y openssl ca-certificates libssl3 libssl-dev build-essential pkg-config curl +``` + +Install the Protobuf compiler: + +:::note +While many package repositories provide a `protobuf-compiler` package in lieu of manually installing protoc, we've found at the time of this writing that most of them use v3.21 which is quite out of date. We recommend getting the latest version manually. +::: + +```bash +PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') +MACHINE_ARCH=$(uname -m) +case "${MACHINE_ARCH}" in + aarch64) PROTOC_ARCH=aarch_64;; + x86_64) PROTOC_ARCH=x86_64;; + *) echo "${MACHINE_ARCH} is not supported."; exit 1;; +esac +curl -sLo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip +sudo unzip -q protoc.zip bin/protoc -d /usr +sudo unzip -q protoc.zip "include/google/*" -d /usr +sudo chmod a+x /usr/bin/protoc +rm -rf protoc.zip +``` + +With the prerequisites set up, pull the repository: +```bash +git clone https://github.com/Commit-Boost/commit-boost-client +``` + +Check out the `stable` branch which houses the latest release: +```bash +cd commit-boost-client && git checkout stable +``` + +Finally, update the submodules: +``` +git submodule update --init --recursive +``` + +Your build environment should now be ready to use. + + +### Building the CLI + +To build the CLI, run: +``` +cargo build --release --bin commit-boost-cli +``` + +This will create a binary in `./target/release/commit-boost-cli`. Confirm that it works: +``` +./target/release/commit-boost-cli --version +``` + +You can now use this to generate the Docker Compose file to drive the other modules if desired. See the [configuration](./configuration.md) guide for more information. + + +### Building the PBS Module + +To build PBS, run: +``` +cargo build --release --bin commit-boost-pbs +``` + +This will create a binary in `./target/release/commit-boost-pbs`. To verify it works, create [a TOML configuration](./configuration.md) for the PBS module (e.g., `cb-config.toml`). + +As a quick example, we'll use this configuration that connects to the Flashbots relay on the Hoodi network: +```toml +chain = "Hoodi" + +[pbs] +port = 18550 +with_signer = true + +[[relays]] +url = "https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-hoodi.flashbots.net" + +[metrics] +enabled = true + +[signer] +[signer.local.loader] +format = "lighthouse" +keys_path = "/tmp/keys" +secrets_path = "/tmp/secrets" +``` + +Set the path to it in the `CB_CONFIG` environment variable and run the binary: +``` +CB_CONFIG=cb-config.toml ./target/release/commit-boost-pbs +``` + +If it works, you should see output like this: +``` +2025-05-07T21:09:17.407245Z WARN No metrics server configured +2025-05-07T21:09:17.407257Z INFO starting PBS service version="0.7.0" commit_hash="58082edb1213596667afe8c3950cd997ab85f4f3" addr=127.0.0.1:18550 events_subs=0 chain=Hoodi +2025-05-07T21:09:17.746855Z INFO : new request ua="" relay_check=true method=/eth/v1/builder/status req_id=5c405c33-0496-42ea-a35d-a7a01dbba356 +2025-05-07T21:09:17.896196Z INFO : relay check successful method=/eth/v1/builder/status req_id=5c405c33-0496-42ea-a35d-a7a01dbba356 +``` + +If you do, then the binary works. + + +### Building the Signer Module + +To build the Signer, run: +``` +cargo build --release --bin commit-boost-signer +``` + +This will create a binary in `./target/release/commit-boost-signer`. To verify it works, create [a TOML configuration](./configuration.md) for the Signer module (e.g., `cb-config.toml`). We'll use the example in the PBS build section above. + +The signer needs the following environment variables set: +- `CB_CONFIG` = path of your config file. +- `CB_JWTS` = a dummy key-value pair of [JWT](https://en.wikipedia.org/wiki/JSON_Web_Token) values for various services. Since we don't need them for the sake of just testing the binary, we can use something like `"test_jwts=dummy"`. +- `CB_SIGNER_PORT` = the network port to listen for signer requests on. Default is `20000`. + +Set these values, create the `keys` and `secrets` directories listed in the configuration file, and run the binary: + +``` +mkdir -p /tmp/keys && mkdir -p /tmp/secrets +CB_CONFIG=cb-config.toml CB_JWTS="test_jwts=dummy" CB_SIGNER_PORT=20000 ./target/release/commit-boost-signer +``` + +You should see output like this: +``` +2025-05-07T21:43:46.385535Z WARN Proxy store not configured. Proxies keys and delegations will not be persisted +2025-05-07T21:43:46.393507Z INFO Starting signing service version="0.7.0" commit_hash="58082edb1213596667afe8c3950cd997ab85f4f3" modules=["test_jwts"] port=20000 loaded_consensus=0 loaded_proxies=0 +2025-05-07T21:43:46.393574Z WARN No metrics server configured +``` + +If you do, then the binary works. \ No newline at end of file From 9438dae97bbb5d13032519c34ca9ad4e7c468137 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 13 May 2025 02:25:24 -0400 Subject: [PATCH 10/67] Refactored the Github release action to use the Docker builder --- .github/workflows/release.yml | 159 ++++++++++++++++++++++++++------- provisioning/build.Dockerfile | 21 +---- provisioning/cli.Dockerfile | 0 provisioning/protoc.sh | 57 ++++++++++++ provisioning/signer.Dockerfile | 134 ++------------------------- 5 files changed, 194 insertions(+), 177 deletions(-) delete mode 100644 provisioning/cli.Dockerfile create mode 100755 provisioning/protoc.sh diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 906c01f3..40745fbb 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,11 +10,86 @@ permissions: packages: write jobs: - build-binaries: + # Builds the x64 and arm64 binaries for Linux, for all 3 crates, via the Docker builder + build-binaries-linux: strategy: matrix: target: - - x86_64-unknown-linux-gnu + - amd64 + - arm64 + name: + - commit-boost-cli + - commit-boost-pbs + - commit-boost-signer + include: + - target: amd64 + package-suffix: x86-64 + - target: arm64 + package-suffix: arm64 + - name: commit-boost-cli + target-crate: cli + - name: commit-boost-pbs + target-crate: pbs + - name: commit-boost-signer + target-crate: signer + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: "stable" + fetch-depth: 0 + submodules: true + + - name: Log commit hash + run: | + echo "Releasing commit: $(git rev-parse HEAD)" + + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build binary (Linux) + uses: docker/build-push-action@v6 + with: + context: . + push: false + platforms: linux/amd64,linux/arm64 + cache-from: type=registry,ref=ghcr.io/commit-boost/buildcache:${{ matrix.target-crate}} + cache-to: type=registry,ref=ghcr.io/commit-boost/buildcache:${{ matrix.target-crate }},mode=max + file: provisioning/build.Dockerfile + outputs: type=local,dest=build + build-args: | + TARGET_CRATE=${{ matrix.name }} + + - name: Package binary (Linux) + run: | + cd build/linux_${{ matrix.target }} + tar -czvf ${{ matrix.name }}-${{ github.ref_name }}-linux_${{ matrix.package-suffix }}.tar.gz ${{ matrix.name }} + mv ${{ matrix.name }}-${{ github.ref_name }}-linux_${{ matrix.package-suffix }}.tar.gz ../../ + + - name: Upload artifact + uses: actions/upload-artifact@v4 + with: + name: ${{ matrix.name }}-${{ github.ref_name }}-linux_${{ matrix.package-suffix }} + path: | + ${{ matrix.name }}-${{ github.ref_name }}-linux_${{ matrix.package-suffix }}.tar.gz + + # Builds the arm64 binaries for Darwin, for all 3 crates, natively + build-binaries-darwin: + strategy: + matrix: + target: + # x64 requires macos-latest-large which is not available in the free tier # - x86_64-apple-darwin - aarch64-apple-darwin name: @@ -22,10 +97,8 @@ jobs: - commit-boost-pbs - commit-boost-signer include: - - target: x86_64-unknown-linux-gnu - os: ubuntu-latest # - target: x86_64-apple-darwin - # os: macos-latest + # os: macos-latest-large - target: aarch64-apple-darwin os: macos-latest runs-on: ${{ matrix.os }} @@ -41,6 +114,12 @@ jobs: run: | echo "Releasing commit: $(git rev-parse HEAD)" + - name: Install Protoc + run: + # Brew's version is much more up to date than the Linux ones, and installing the latest via script runs into curl issues so for now, brew's easier to use + # provisioning/protoc.sh + brew install protobuf + - name: Cache Cargo registry uses: actions/cache@v3 with: @@ -63,48 +142,25 @@ jobs: ${{ runner.os }}-cargo-build-${{ matrix.target }}- ${{ runner.os }}-cargo-build- - - name: Install protoc (Ubuntu) - if: runner.os == 'Linux' - run: sudo apt-get install protobuf-compiler - - - name: Install protoc (macOS) - if: runner.os == 'macOS' - run: brew install protobuf - - - name: Set up Rust - uses: actions-rs/toolchain@v1 - with: - profile: minimal - toolchain: stable - override: true - target: ${{ matrix.target }} - - - name: Build binary + - name: Build binary (Darwin) run: cargo build --release --target ${{ matrix.target }} --bin ${{ matrix.name }} - env: - CARGO_TARGET_X86_64_PC_WINDOWS_GNU_LINKER: gcc - name: Package binary (Unix) - if: runner.os != 'Windows' run: | cd target/${{ matrix.target }}/release tar -czvf ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz ${{ matrix.name }} mv ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz ../../../ - - name: Package binary (Windows) - if: runner.os == 'Windows' - run: | - 7z a ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.zip target\${{ matrix.target }}\release\${{ matrix.name }}.exe - - name: Upload artifact uses: actions/upload-artifact@v4 with: name: ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }} path: | - ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.${{ runner.os == 'Windows' && 'zip' || 'tar.gz' }} + ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz + # Builds the PBS Docker image build-and-push-pbs-docker: - needs: [build-binaries] + needs: [build-binaries-linux] runs-on: ubuntu-latest steps: - name: Checkout code @@ -114,6 +170,20 @@ jobs: fetch-depth: 0 submodules: true + - name: Download binary archives + uses: actions/download-artifact@v4 + with: + path: ./artifacts + pattern: "commit-boost-*" + + - name: Extract binaries + run: | + mkdir -p ./artifacts/bin + tar -xzf ./artifacts/commit-boost-pbs-${{ github.ref_name }}-linux_x86-64/commit-boost-pbs-${{ github.ref_name }}-linux_x86-64.tar.gz -C ./artifacts/bin + mv ./artifacts/bin/commit-boost-pbs ./artifacts/bin/commit-boost-pbs-linux-amd64 + tar -xzf ./artifacts/commit-boost-pbs-${{ github.ref_name }}-linux_arm64/commit-boost-pbs-${{ github.ref_name }}-linux_arm64.tar.gz -C ./artifacts/bin + mv ./artifacts/bin/commit-boost-pbs ./artifacts/bin/commit-boost-pbs-linux-arm64 + - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -133,6 +203,8 @@ jobs: context: . push: true platforms: linux/amd64,linux/arm64 + build-args: | + BINARIES_PATH=./artifacts/bin tags: | ghcr.io/commit-boost/pbs:${{ github.ref_name }} ${{ !contains(github.ref_name, 'rc') && 'ghcr.io/commit-boost/pbs:latest' || '' }} @@ -140,8 +212,9 @@ jobs: cache-to: type=registry,ref=ghcr.io/commit-boost/pbs:buildcache,mode=max file: provisioning/pbs.Dockerfile + # Builds the Signer Docker image build-and-push-signer-docker: - needs: [build-binaries] + needs: [build-binaries-linux] runs-on: ubuntu-latest steps: - name: Checkout code @@ -151,6 +224,20 @@ jobs: fetch-depth: 0 submodules: true + - name: Download binary archives + uses: actions/download-artifact@v4 + with: + path: ./artifacts + pattern: "commit-boost-*" + + - name: Extract binaries + run: | + mkdir -p ./artifacts/bin + tar -xzf ./artifacts/commit-boost-signer-${{ github.ref_name }}-linux_x86-64/commit-boost-signer-${{ github.ref_name }}-linux_x86-64.tar.gz -C ./artifacts/bin + mv ./artifacts/bin/commit-boost-signer ./artifacts/bin/commit-boost-signer-linux-amd64 + tar -xzf ./artifacts/commit-boost-signer-${{ github.ref_name }}-linux_arm64/commit-boost-signer-${{ github.ref_name }}-linux_arm64.tar.gz -C ./artifacts/bin + mv ./artifacts/bin/commit-boost-signer ./artifacts/bin/commit-boost-signer-linux-arm64 + - name: Set up QEMU uses: docker/setup-qemu-action@v3 @@ -170,6 +257,8 @@ jobs: context: . push: true platforms: linux/amd64,linux/arm64 + build-args: | + BINARIES_PATH=./artifacts/bin tags: | ghcr.io/commit-boost/signer:${{ github.ref_name }} ${{ !contains(github.ref_name, 'rc') && 'ghcr.io/commit-boost/signer:latest' || '' }} @@ -177,9 +266,11 @@ jobs: cache-to: type=registry,ref=ghcr.io/commit-boost/signer:buildcache,mode=max file: provisioning/signer.Dockerfile + # Creates a draft release on GitHub with the binaries finalize-release: needs: - - build-binaries + - build-binaries-linux + - build-binaries-darwin - build-and-push-pbs-docker - build-and-push-signer-docker runs-on: ubuntu-latest diff --git a/provisioning/build.Dockerfile b/provisioning/build.Dockerfile index 83679ed5..a4eb3723 100644 --- a/provisioning/build.Dockerfile +++ b/provisioning/build.Dockerfile @@ -14,23 +14,6 @@ RUN test -n "$TARGET_CRATE" || (echo "TARGET_CRATE must be set to the service / ENV BUILD_VAR_SCRIPT=/tmp/env.sh COPY --from=planner /app/recipe.json recipe.json -# Get the latest Protoc since the one in the Debian repo is incredibly old -RUN apt update && apt install -y unzip curl ca-certificates && \ - PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ - if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ - PROTOC_ARCH=x86_64; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ - PROTOC_ARCH=aarch_64; \ - else \ - echo "${BUILDPLATFORM} is not supported."; \ - exit 1; \ - fi && \ - curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ - unzip -q protoc.zip bin/protoc -d /usr && \ - unzip -q protoc.zip "include/google/*" -d /usr && \ - chmod a+x /usr/bin/protoc && \ - rm -rf protoc.zip - # Set up the build environment for cross-compilation if needed RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ # We're on x64, cross-compiling for arm64 @@ -90,6 +73,10 @@ RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ export GIT_HASH=$(git rev-parse HEAD) && \ cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} +# Get the latest Protoc since the one in the Debian repo is incredibly old +COPY provisioning/protoc.sh provisioning/protoc.sh +RUN provisioning/protoc.sh + # Now we can copy the source files - chef cook wants to run before this step COPY . . diff --git a/provisioning/cli.Dockerfile b/provisioning/cli.Dockerfile deleted file mode 100644 index e69de29b..00000000 diff --git a/provisioning/protoc.sh b/provisioning/protoc.sh new file mode 100755 index 00000000..7f66a656 --- /dev/null +++ b/provisioning/protoc.sh @@ -0,0 +1,57 @@ +#!/bin/sh + +# This script installs the latest version of protoc (Protocol Buffers Compiler) from the official GitHub repository. + +# Print a failure message to stderr and exit +fail() { + MESSAGE=$1 + RED='\033[0;31m' + RESET='\033[;0m' + >&2 echo -e "\n${RED}**ERROR**\n$MESSAGE${RESET}\n" + exit 1 +} + +# Get the OS +case "$(uname)" in + Darwin*) + PROTOC_OS="osx" ; + TARGET_DIR="/opt/homebrew" ; # Emulating a homebrew install so we don't need elevated permissions + # Darwin comes with unzip and curl already + brew install jq ;; + Linux*) + PROTOC_OS="linux" ; + TARGET_DIR="/usr" ; # Assumes the script is run as root or the user can do it manually + apt update && apt install -y unzip curl ca-certificates jq ;; + *) + echo "Unsupported OS: $(uname)" ; + exit 1 ;; +esac + +# Get the architecture +case "$(uname -m)" in + x86_64) PROTOC_ARCH="x86_64" ;; + aarch64) PROTOC_ARCH="aarch_64" ;; + arm64) PROTOC_ARCH="aarch_64" ;; + *) echo "Unsupported architecture: [$(uname -m)]"; exit 1 ;; +esac + +# Get the latest version +PROTOC_RAW_VERSION=$(curl --retry 10 --retry-delay 2 --retry-all-errors -fsL "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | jq -r .tag_name) || fail "Failed to get the latest version of protoc" +if [ "$PROTOC_RAW_VERSION" = "null" ]; then + fail "Failed to get the latest version of protoc" +fi +echo "Latest version of protoc: [$PROTOC_RAW_VERSION]" +PROTOC_VERSION=$(echo $PROTOC_RAW_VERSION | sed 's/^v//') || fail "Failed to parse the latest version of protoc" +if [ -z "$PROTOC_VERSION" ]; then + fail "Latest version of protoc was empty" +fi + +echo "Installing protoc: $PROTOC_VERSION-$PROTOC_OS-$PROTOC_ARCH" + +# Download and install protoc +curl --retry 10 --retry-delay 2 --retry-all-errors -fsLo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-$PROTOC_OS-$PROTOC_ARCH.zip || fail "Failed to download protoc" +unzip -q protoc.zip bin/protoc -d $TARGET_DIR || fail "Failed to unzip protoc" +unzip -q protoc.zip "include/google/*" -d $TARGET_DIR || fail "Failed to unzip protoc includes" +chmod a+x $TARGET_DIR/bin/protoc || fail "Failed to set executable permissions for protoc" +rm -rf protoc.zip || fail "Failed to remove protoc zip file" +echo "protoc ${PROTOC_VERSION} installed successfully for ${PROTOC_OS} ${PROTOC_ARCH}" \ No newline at end of file diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 6c5ac045..f9824e7a 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -1,135 +1,17 @@ -# This will be the main build image -FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -WORKDIR /app - -# Planner stage -FROM --platform=${BUILDPLATFORM} chef AS planner -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -COPY . . -RUN cargo chef prepare --recipe-path recipe.json - -# Builder stage -FROM --platform=${BUILDPLATFORM} chef AS builder -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED -ENV BUILD_VAR_SCRIPT=/tmp/env.sh -COPY --from=planner /app/recipe.json recipe.json - -# Set up the build environment for cross-compilation if needed -RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ - # We're on x64, cross-compiling for arm64 - rustup target add aarch64-unknown-linux-gnu && \ - apt update && \ - apt install -y gcc-aarch64-linux-gnu && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ - echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture arm64 && \ - apt update && \ - apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ - # We're on arm64, cross-compiling for x64 - rustup target add x86_64-unknown-linux-gnu && \ - apt update && \ - apt install -y gcc-x86-64-linux-gnu && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ - echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture amd64 && \ - apt update && \ - apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ - fi - -# Run cook to prep the build -RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ - . ${BUILD_VAR_SCRIPT} && \ - echo "Cross-compilation environment set up for ${TARGET}"; \ - else \ - echo "No cross-compilation needed"; \ - fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ - export GIT_HASH=$(git rev-parse HEAD) && \ - cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} - -# Now we can copy the source files - chef cook wants to run before this step -COPY . . - -# Get the latest Protoc since the one in the Debian repo is incredibly old -RUN apt update && apt install -y unzip curl ca-certificates && \ - PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') && \ - if [ "$BUILDPLATFORM" = "linux/amd64" ]; then \ - PROTOC_ARCH=x86_64; \ - elif [ "$BUILDPLATFORM" = "linux/arm64" ]; then \ - PROTOC_ARCH=aarch_64; \ - else \ - echo "${BUILDPLATFORM} is not supported."; \ - exit 1; \ - fi && \ - curl -Lo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip && \ - unzip -q protoc.zip bin/protoc -d /usr && \ - unzip -q protoc.zip "include/google/*" -d /usr && \ - chmod a+x /usr/bin/protoc && \ - rm -rf protoc.zip - -# Build the application -RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ - chmod +x ${BUILD_VAR_SCRIPT} && \ - . ${BUILD_VAR_SCRIPT} && \ - echo "Cross-compilation environment set up for ${TARGET}"; \ - else \ - echo "No cross-compilation needed"; \ - fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ - export GIT_HASH=$(git rev-parse HEAD) && \ - cargo build ${TARGET_FLAG} --release --bin commit-boost-signer ${FEATURE_OPENSSL_VENDORED} && \ - if [ ! -z "$TARGET" ]; then \ - # If we're cross-compiling, we need to move the binary out of the target dir - mv target/${TARGET}/release/commit-boost-signer target/release/commit-boost-signer; \ - fi - -# Assemble the runner image -FROM debian:bookworm-slim AS runtime -WORKDIR /app - +FROM debian:bookworm-slim +ARG BINARIES_PATH TARGETOS TARGETARCH +COPY ${BINARIES_PATH}/commit-boost-signer-${TARGETOS}-${TARGETARCH} /usr/local/bin RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ libssl3 \ libssl-dev \ - curl \ - && apt-get clean autoclean \ - && rm -rf /var/lib/apt/lists/* - -COPY --from=builder /app/target/release/commit-boost-signer /usr/local/bin + curl && \ + # Cleanup + apt-get clean autoclean && \ + rm -rf /var/lib/apt/lists/* +# Create a non-root user to run the application RUN groupadd -g 10001 commitboost && \ useradd -u 10001 -g commitboost -s /sbin/nologin commitboost USER commitboost From 12c020a20af91f673e348b12f2bc561fe57a6ae4 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 13 May 2025 02:53:02 -0400 Subject: [PATCH 11/67] Fixed the Docker image binary filenames --- provisioning/pbs.Dockerfile | 2 +- provisioning/signer.Dockerfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/provisioning/pbs.Dockerfile b/provisioning/pbs.Dockerfile index 9eff5890..9eb72702 100644 --- a/provisioning/pbs.Dockerfile +++ b/provisioning/pbs.Dockerfile @@ -1,6 +1,6 @@ FROM debian:bookworm-slim ARG BINARIES_PATH TARGETOS TARGETARCH -COPY ${BINARIES_PATH}/commit-boost-pbs-${TARGETOS}-${TARGETARCH} /usr/local/bin +COPY ${BINARIES_PATH}/commit-boost-pbs-${TARGETOS}-${TARGETARCH} /usr/local/bin/commit-boost-pbs RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index f9824e7a..05679762 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -1,6 +1,6 @@ FROM debian:bookworm-slim ARG BINARIES_PATH TARGETOS TARGETARCH -COPY ${BINARIES_PATH}/commit-boost-signer-${TARGETOS}-${TARGETARCH} /usr/local/bin +COPY ${BINARIES_PATH}/commit-boost-signer-${TARGETOS}-${TARGETARCH} /usr/local/bin/commit-boost-signer RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ From 53cafc039a747e61a92b9fb41a9a53a395f1a1a0 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 13 May 2025 17:42:28 -0400 Subject: [PATCH 12/67] Cleaned up the Darwin artifact step --- .github/workflows/release.yml | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 40745fbb..5be42110 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -98,9 +98,11 @@ jobs: - commit-boost-signer include: # - target: x86_64-apple-darwin - # os: macos-latest-large + # os: macos-latest-large + # package-suffix: x86-64 - target: aarch64-apple-darwin os: macos-latest + package-suffix: arm64 runs-on: ${{ matrix.os }} steps: - name: Checkout code @@ -145,18 +147,18 @@ jobs: - name: Build binary (Darwin) run: cargo build --release --target ${{ matrix.target }} --bin ${{ matrix.name }} - - name: Package binary (Unix) + - name: Package binary (Darwin) run: | cd target/${{ matrix.target }}/release - tar -czvf ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz ${{ matrix.name }} - mv ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz ../../../ + tar -czvf ${{ matrix.name }}-${{ github.ref_name }}-darwin_${{ matrix.package-suffix }}.tar.gz ${{ matrix.name }} + mv ${{ matrix.name }}-${{ github.ref_name }}-darwin_${{ matrix.package-suffix }}.tar.gz ../../../ - name: Upload artifact uses: actions/upload-artifact@v4 with: - name: ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }} + name: ${{ matrix.name }}-${{ github.ref_name }}-darwin_${{ matrix.package-suffix }} path: | - ${{ matrix.name }}-${{ github.ref_name }}-${{ matrix.target }}.tar.gz + ${{ matrix.name }}-${{ github.ref_name }}-darwin_${{ matrix.package-suffix }}.tar.gz # Builds the PBS Docker image build-and-push-pbs-docker: @@ -208,8 +210,6 @@ jobs: tags: | ghcr.io/commit-boost/pbs:${{ github.ref_name }} ${{ !contains(github.ref_name, 'rc') && 'ghcr.io/commit-boost/pbs:latest' || '' }} - cache-from: type=registry,ref=ghcr.io/commit-boost/pbs:buildcache - cache-to: type=registry,ref=ghcr.io/commit-boost/pbs:buildcache,mode=max file: provisioning/pbs.Dockerfile # Builds the Signer Docker image @@ -262,8 +262,6 @@ jobs: tags: | ghcr.io/commit-boost/signer:${{ github.ref_name }} ${{ !contains(github.ref_name, 'rc') && 'ghcr.io/commit-boost/signer:latest' || '' }} - cache-from: type=registry,ref=ghcr.io/commit-boost/signer:buildcache - cache-to: type=registry,ref=ghcr.io/commit-boost/signer:buildcache,mode=max file: provisioning/signer.Dockerfile # Creates a draft release on GitHub with the binaries From 58c61174c138f61a775031c2c28b00dac5038c64 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 14 May 2025 00:24:35 -0400 Subject: [PATCH 13/67] Made the CI workflow and justfile use the same toolchain as the source --- .github/workflows/ci.yml | 4 ++-- justfile | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 894d13da..ae9bad89 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -26,11 +26,11 @@ jobs: - name: Install Rust toolchain uses: dtolnay/rust-toolchain@master with: - toolchain: nightly-2025-02-26 + toolchain: 1.83 components: clippy, rustfmt - name: Install protoc - run: sudo apt-get install protobuf-compiler + run: sudo provisioning/protoc.sh - name: Setup just uses: extractions/setup-just@v2 diff --git a/justfile b/justfile index e6d11f62..b9250870 100644 --- a/justfile +++ b/justfile @@ -1,5 +1,5 @@ -# Makes sure the nightly-2025-02-26 toolchain is installed -toolchain := "nightly-2025-02-26" +# Makes sure the same toolchain as the source is installed +toolchain := 1.83 fmt: rustup toolchain install {{toolchain}} > /dev/null 2>&1 && \ From 45e581baabbed9ba7987c3260b286a877a22480b Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 14 May 2025 01:06:05 -0400 Subject: [PATCH 14/67] Revert "Made the CI workflow and justfile use the same toolchain as the source" This reverts commit 58c61174c138f61a775031c2c28b00dac5038c64. --- .github/workflows/ci.yml | 4 ++-- justfile | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ae9bad89..894d13da 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -26,11 +26,11 @@ jobs: - name: Install Rust toolchain uses: dtolnay/rust-toolchain@master with: - toolchain: 1.83 + toolchain: nightly-2025-02-26 components: clippy, rustfmt - name: Install protoc - run: sudo provisioning/protoc.sh + run: sudo apt-get install protobuf-compiler - name: Setup just uses: extractions/setup-just@v2 diff --git a/justfile b/justfile index b9250870..e6d11f62 100644 --- a/justfile +++ b/justfile @@ -1,5 +1,5 @@ -# Makes sure the same toolchain as the source is installed -toolchain := 1.83 +# Makes sure the nightly-2025-02-26 toolchain is installed +toolchain := "nightly-2025-02-26" fmt: rustup toolchain install {{toolchain}} > /dev/null 2>&1 && \ From 24a10c55f3bd558ad976852255b351bed31ef641 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 14 May 2025 02:26:15 -0400 Subject: [PATCH 15/67] Testing removal of OpenSSL vendored option --- Cargo.lock | 12 -------- Cargo.toml | 3 -- crates/common/Cargo.toml | 4 --- provisioning/build.Dockerfile | 56 +++++++++++------------------------ 4 files changed, 17 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 436d3b65..5ebc811a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1494,8 +1494,6 @@ dependencies = [ "ethereum_ssz_derive", "eyre", "jsonwebtoken", - "k256", - "openssl", "pbkdf2 0.12.2", "rand 0.9.0", "reqwest", @@ -3552,15 +3550,6 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" -[[package]] -name = "openssl-src" -version = "300.5.0+3.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8ce546f549326b0e6052b649198487d91320875da901e7bd11a06d1ee3f9c2f" -dependencies = [ - "cc", -] - [[package]] name = "openssl-sys" version = "0.9.106" @@ -3569,7 +3558,6 @@ checksum = "8bb61ea9811cc39e3c2069f40b8b8e2e70d8569b361f879786cc7ed48b777cdd" dependencies = [ "cc", "libc", - "openssl-src", "pkg-config", "vcpkg", ] diff --git a/Cargo.toml b/Cargo.toml index 14cddf82..aef26a94 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,9 +7,6 @@ edition = "2021" rust-version = "1.83" version = "0.7.0-rc.2" -[workspace.features] -openssl-vendored = ["crates/common/openssl-vendored"] - [workspace.dependencies] aes = "0.8" alloy = { version = "0.12", features = [ diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index 15c0b8d1..df78b046 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -41,7 +41,3 @@ tree_hash_derive.workspace = true unicode-normalization.workspace = true url.workspace = true jsonwebtoken.workspace = true -openssl = { version = "0.10", optional = true, features = ["vendored"] } - -[features] -openssl-vendored = ["openssl/vendored"] diff --git a/provisioning/build.Dockerfile b/provisioning/build.Dockerfile index a4eb3723..34ad27a5 100644 --- a/provisioning/build.Dockerfile +++ b/provisioning/build.Dockerfile @@ -1,15 +1,15 @@ # This will be the main build image FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +ARG TARGETOS TARGETARCH BUILDPLATFORM TARGET_CRATE WORKDIR /app FROM --platform=${BUILDPLATFORM} chef AS planner -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +ARG TARGETOS TARGETARCH BUILDPLATFORM TARGET_CRATE COPY . . RUN cargo chef prepare --recipe-path recipe.json FROM --platform=${BUILDPLATFORM} chef AS builder -ARG TARGETOS TARGETARCH BUILDPLATFORM OPENSSL_VENDORED TARGET_CRATE +ARG TARGETOS TARGETARCH BUILDPLATFORM TARGET_CRATE RUN test -n "$TARGET_CRATE" || (echo "TARGET_CRATE must be set to the service / binary you want to build" && false) ENV BUILD_VAR_SCRIPT=/tmp/env.sh COPY --from=planner /app/recipe.json recipe.json @@ -18,43 +18,33 @@ COPY --from=planner /app/recipe.json recipe.json RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ # We're on x64, cross-compiling for arm64 rustup target add aarch64-unknown-linux-gnu && \ + dpkg --add-architecture arm64 && \ apt update && \ - apt install -y gcc-aarch64-linux-gnu && \ + apt install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 && \ echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ echo "export RUSTFLAGS=\"-L /usr/aarch64-linux-gnu/lib -L $(dirname $(aarch64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture arm64 && \ - apt update && \ - apt install -y libssl-dev:arm64 zlib1g-dev:arm64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/aarch64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/aarch64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ elif [ "$BUILDPLATFORM" = "linux/arm64" -a "$TARGETARCH" = "amd64" ]; then \ # We're on arm64, cross-compiling for x64 rustup target add x86_64-unknown-linux-gnu && \ + dpkg --add-architecture amd64 && \ apt update && \ - apt install -y gcc-x86-64-linux-gnu && \ + apt install -y gcc-x86-64-linux-gnu libssl-dev:amd64 zlib1g-dev:amd64 && \ echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ echo "export RUSTFLAGS=\"-L /usr/x86_64-linux-gnu/lib -L $(dirname $(x86_64-linux-gnu-gcc -print-libgcc-file-name))\"" >> ${BUILD_VAR_SCRIPT} && \ - if [ "$OPENSSL_VENDORED" != "true" ]; then \ - # If we're linking to OpenSSL dynamically, we have to set it up for cross-compilation - dpkg --add-architecture amd64 && \ - apt update && \ - apt install -y libssl-dev:amd64 zlib1g-dev:amd64 && \ - echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ - echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ - echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ - fi; \ + echo "export PKG_CONFIG_ALLOW_CROSS=true" >> ${BUILD_VAR_SCRIPT} && \ + echo "export PKG_CONFIG_LIBDIR=/usr/lib/x86_64-linux-gnu/pkgconfig" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_INCLUDE_DIR=/usr/include/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ + echo "export OPENSSL_LIB_DIR=/usr/lib/x86_64-linux-gnu" >> ${BUILD_VAR_SCRIPT}; \ fi # Run cook to prep the build @@ -64,14 +54,8 @@ RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ else \ echo "No cross-compilation needed"; \ fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ export GIT_HASH=$(git rev-parse HEAD) && \ - cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json ${FEATURE_OPENSSL_VENDORED} + cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json # Get the latest Protoc since the one in the Debian repo is incredibly old COPY provisioning/protoc.sh provisioning/protoc.sh @@ -88,14 +72,8 @@ RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ else \ echo "No cross-compilation needed"; \ fi && \ - if [ "$OPENSSL_VENDORED" = "true" ]; then \ - echo "Using vendored OpenSSL" && \ - FEATURE_OPENSSL_VENDORED='--features openssl-vendored'; \ - else \ - echo "Using system OpenSSL"; \ - fi && \ export GIT_HASH=$(git rev-parse HEAD) && \ - cargo build ${TARGET_FLAG} --release --bin ${TARGET_CRATE} ${FEATURE_OPENSSL_VENDORED} && \ + cargo build ${TARGET_FLAG} --release --bin ${TARGET_CRATE} && \ if [ ! -z "$TARGET" ]; then \ # If we're cross-compiling, we need to move the binary out of the target dir mv target/${TARGET}/release/${TARGET_CRATE} target/release/${TARGET_CRATE}; \ From e36da545b00929146efbfa60eac1df0efb512d5e Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 14 May 2025 02:59:32 -0400 Subject: [PATCH 16/67] Updating just in the CI workflow --- .github/workflows/ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 894d13da..0b15367f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,12 +30,12 @@ jobs: components: clippy, rustfmt - name: Install protoc - run: sudo apt-get install protobuf-compiler + run: sudo provisioning/protoc.sh - name: Setup just - uses: extractions/setup-just@v2 + uses: extractions/setup-just@v3 with: - just-version: 1.5.0 + just-version: 1.40.0 - name: Check compilation run: cargo check From e7c6d193b15232dfa51e09f61c075e3c9941a18d Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 21 May 2025 02:56:00 -0400 Subject: [PATCH 17/67] Refactored the signer to support host and port config settings --- crates/cli/src/docker_init.rs | 34 +++++++++++++++++------ crates/common/src/config/constants.rs | 2 +- crates/common/src/config/signer.rs | 37 +++++++++++++++++++------ crates/common/src/signer/constants.rs | 1 + crates/common/src/signer/mod.rs | 2 ++ crates/signer/src/service.rs | 7 ++--- docs/docs/get_started/configuration.md | 8 ++++++ docs/docs/get_started/running/binary.md | 4 +-- 8 files changed, 72 insertions(+), 23 deletions(-) create mode 100644 crates/common/src/signer/constants.rs diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index 4453f597..652e3448 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -14,11 +14,11 @@ use cb_common::{ PBS_ENDPOINT_ENV, PBS_MODULE_NAME, PROXY_DIR_DEFAULT, PROXY_DIR_ENV, PROXY_DIR_KEYS_DEFAULT, PROXY_DIR_KEYS_ENV, PROXY_DIR_SECRETS_DEFAULT, PROXY_DIR_SECRETS_ENV, SIGNER_DEFAULT, SIGNER_DIR_KEYS_DEFAULT, SIGNER_DIR_KEYS_ENV, - SIGNER_DIR_SECRETS_DEFAULT, SIGNER_DIR_SECRETS_ENV, SIGNER_JWT_SECRET_ENV, SIGNER_KEYS_ENV, - SIGNER_MODULE_NAME, SIGNER_PORT_ENV, SIGNER_URL_ENV, + SIGNER_DIR_SECRETS_DEFAULT, SIGNER_DIR_SECRETS_ENV, SIGNER_ENDPOINT_ENV, + SIGNER_JWT_SECRET_ENV, SIGNER_KEYS_ENV, SIGNER_MODULE_NAME, SIGNER_URL_ENV, }, pbs::{BUILDER_API_PATH, GET_STATUS_PATH}, - signer::{ProxyStore, SignerLoader}, + signer::{ProxyStore, SignerLoader, DEFAULT_SIGNER_PORT}, types::ModuleId, utils::random_jwt_secret, }; @@ -73,7 +73,11 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut targets = Vec::new(); // address for signer API communication - let signer_port = 20000; + let signer_port = if let Some(signer_config) = &cb_config.signer { + signer_config.port + } else { + DEFAULT_SIGNER_PORT + }; let signer_server = if let Some(SignerConfig { inner: SignerType::Remote { url }, .. }) = &cb_config.signer { url.to_string() @@ -334,10 +338,17 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut signer_envs = IndexMap::from([ get_env_val(CONFIG_ENV, CONFIG_DEFAULT), get_env_same(JWTS_ENV), - get_env_uval(SIGNER_PORT_ENV, signer_port as u64), ]); - let mut ports = vec![]; + // Bind the signer API to 0.0.0.0 + let container_endpoint = + SocketAddr::from((Ipv4Addr::UNSPECIFIED, signer_config.port)); + let (key, val) = get_env_val(SIGNER_ENDPOINT_ENV, &container_endpoint.to_string()); + signer_envs.insert(key, val); + + let host_endpoint = SocketAddr::from((signer_config.host, signer_config.port)); + let mut ports = vec![format!("{}:{}", host_endpoint, signer_config.port)]; + warnings.push(format!("cb_signer has an exported port on {}", signer_config.port)); if let Some((key, val)) = chain_spec_env.clone() { signer_envs.insert(key, val); @@ -459,13 +470,20 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut signer_envs = IndexMap::from([ get_env_val(CONFIG_ENV, CONFIG_DEFAULT), get_env_same(JWTS_ENV), - get_env_uval(SIGNER_PORT_ENV, signer_port as u64), get_env_val(DIRK_CERT_ENV, DIRK_CERT_DEFAULT), get_env_val(DIRK_KEY_ENV, DIRK_KEY_DEFAULT), get_env_val(DIRK_DIR_SECRETS_ENV, DIRK_DIR_SECRETS_DEFAULT), ]); - let mut ports = vec![]; + // Bind the signer API to 0.0.0.0 + let container_endpoint = + SocketAddr::from((Ipv4Addr::UNSPECIFIED, signer_config.port)); + let (key, val) = get_env_val(SIGNER_ENDPOINT_ENV, &container_endpoint.to_string()); + signer_envs.insert(key, val); + + let host_endpoint = SocketAddr::from((signer_config.host, signer_config.port)); + let mut ports = vec![format!("{}:{}", host_endpoint, signer_config.port)]; + warnings.push(format!("cb_signer has an exported port on {}", signer_config.port)); if let Some((key, val)) = chain_spec_env.clone() { signer_envs.insert(key, val); diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index 422af7e7..d7799146 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -33,7 +33,7 @@ pub const SIGNER_IMAGE_DEFAULT: &str = "ghcr.io/commit-boost/signer:latest"; pub const SIGNER_MODULE_NAME: &str = "signer"; /// Where the signer module should open the server -pub const SIGNER_PORT_ENV: &str = "CB_SIGNER_PORT"; +pub const SIGNER_ENDPOINT_ENV: &str = "CB_SIGNER_ENDPOINT"; /// Comma separated list module_id=jwt_secret pub const JWTS_ENV: &str = "CB_JWTS"; diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 9df6b948..dce97666 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -1,4 +1,8 @@ -use std::{collections::HashMap, path::PathBuf}; +use std::{ + collections::HashMap, + net::{Ipv4Addr, SocketAddr}, + path::PathBuf, +}; use eyre::{bail, OptionExt, Result}; use serde::{Deserialize, Serialize}; @@ -6,18 +10,25 @@ use tonic::transport::{Certificate, Identity}; use url::Url; use super::{ - constants::SIGNER_IMAGE_DEFAULT, load_jwt_secrets, utils::load_env_var, CommitBoostConfig, - SIGNER_PORT_ENV, + load_jwt_secrets, load_optional_env_var, utils::load_env_var, CommitBoostConfig, + SIGNER_ENDPOINT_ENV, SIGNER_IMAGE_DEFAULT, }; use crate::{ config::{DIRK_CA_CERT_ENV, DIRK_CERT_ENV, DIRK_DIR_SECRETS_ENV, DIRK_KEY_ENV}, - signer::{ProxyStore, SignerLoader}, + signer::{ProxyStore, SignerLoader, DEFAULT_SIGNER_PORT}, types::{Chain, ModuleId}, + utils::{default_host, default_u16}, }; #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "snake_case")] pub struct SignerConfig { + /// Host address to listen for signer API calls on + #[serde(default = "default_host")] + pub host: Ipv4Addr, + /// Port to listen for signer API calls on + #[serde(default = "default_u16::")] + pub port: u16, /// Docker image of the module #[serde(default = "default_signer")] pub docker_image: String, @@ -87,7 +98,7 @@ pub struct StartSignerConfig { pub chain: Chain, pub loader: Option, pub store: Option, - pub server_port: u16, + pub endpoint: SocketAddr, pub jwts: HashMap, pub dirk: Option, } @@ -97,7 +108,17 @@ impl StartSignerConfig { let config = CommitBoostConfig::from_env_path()?; let jwts = load_jwt_secrets()?; - let server_port = load_env_var(SIGNER_PORT_ENV)?.parse()?; + + // Load the server endpoint first from the env var, then the config, and finally + // the defaults + let endpoint = if let Some(endpoint) = load_optional_env_var(SIGNER_ENDPOINT_ENV) { + endpoint.parse()? + } else { + match config.signer { + Some(ref signer) => SocketAddr::from((signer.host, signer.port)), + None => SocketAddr::from((default_host(), DEFAULT_SIGNER_PORT)), + } + }; let signer = config.signer.ok_or_eyre("Signer config is missing")?.inner; @@ -105,7 +126,7 @@ impl StartSignerConfig { SignerType::Local { loader, store, .. } => Ok(StartSignerConfig { chain: config.chain, loader: Some(loader), - server_port, + endpoint, jwts, store, dirk: None, @@ -133,7 +154,7 @@ impl StartSignerConfig { Ok(StartSignerConfig { chain: config.chain, - server_port, + endpoint, jwts, loader: None, store, diff --git a/crates/common/src/signer/constants.rs b/crates/common/src/signer/constants.rs new file mode 100644 index 00000000..aa834f91 --- /dev/null +++ b/crates/common/src/signer/constants.rs @@ -0,0 +1 @@ +pub const DEFAULT_SIGNER_PORT: u16 = 20000; diff --git a/crates/common/src/signer/mod.rs b/crates/common/src/signer/mod.rs index e0a164a7..b6dce29d 100644 --- a/crates/common/src/signer/mod.rs +++ b/crates/common/src/signer/mod.rs @@ -1,8 +1,10 @@ +mod constants; mod loader; mod schemes; mod store; mod types; +pub use constants::*; pub use loader::*; pub use schemes::*; pub use store::*; diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index 28a1d934..a965f057 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, net::SocketAddr, sync::Arc}; +use std::{collections::HashMap, sync::Arc}; use axum::{ extract::{Request, State}, @@ -67,7 +67,7 @@ impl SigningService { let loaded_consensus = state.manager.read().await.available_consensus_signers(); let loaded_proxies = state.manager.read().await.available_proxy_signers(); - info!(version = COMMIT_BOOST_VERSION, commit_hash = COMMIT_BOOST_COMMIT, modules =? module_ids, port =? config.server_port, loaded_consensus, loaded_proxies, "Starting signing service"); + info!(version = COMMIT_BOOST_VERSION, commit_hash = COMMIT_BOOST_COMMIT, modules =? module_ids, endpoint =? config.endpoint, loaded_consensus, loaded_proxies, "Starting signing service"); SigningService::init_metrics(config.chain)?; @@ -81,8 +81,7 @@ impl SigningService { .route_layer(middleware::from_fn(log_request)) .route(STATUS_PATH, get(handle_status)); - let address = SocketAddr::from(([0, 0, 0, 0], config.server_port)); - let listener = TcpListener::bind(address).await?; + let listener = TcpListener::bind(config.endpoint).await?; axum::serve(listener, app).await.wrap_err("signer server exited") } diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index 4e642205..5d196619 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -65,6 +65,8 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml [signer] + port = 20000 + [signer.local.loader] format = "lighthouse" keys_path = "keys" @@ -111,6 +113,8 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml [signer] + port = 20000 + [signer.local.loader] format = "teku" keys_path = "keys" @@ -133,6 +137,8 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml [signer] + port = 20000 + [signer.local.loader] format = "lodestar" keys_path = "keys" @@ -299,6 +305,8 @@ port = 18550 url = "" [signer] +port = 20000 + [signer.loader] format = "lighthouse" keys_path = "/path/to/keys" diff --git a/docs/docs/get_started/running/binary.md b/docs/docs/get_started/running/binary.md index 3708ab19..ea5138c6 100644 --- a/docs/docs/get_started/running/binary.md +++ b/docs/docs/get_started/running/binary.md @@ -22,12 +22,12 @@ Modules need some environment variables to work correctly. ### PBS Module - `CB_BUILDER_URLS`: optional, comma-separated list of urls to `events` modules where to post builder events. -- `CB_PBS_ENDPOINT`: optional, override the endpoint where the PBS module will open the port for the beacon node. +- `CB_PBS_ENDPOINT`: optional, override to specify the `IP:port` endpoint where the PBS module will open the port for the beacon node. - `CB_MUX_PATH_{ID}`: optional, override where to load mux validator keys for mux with `id=\{ID\}`. ### Signer Module - `CB_SIGNER_JWT_SECRET`: secret to use for JWT authentication with the Signer module. -- `CB_SIGNER_PORT`: required, port to open the signer server on. +- `CB_SIGNER_ENDPOINT`: optional, override to specify the `IP:port` endpoint to bind the signer server to. - For loading keys we currently support: - `CB_SIGNER_LOADER_FILE`: path to a `.json` with plaintext keys (for testing purposes only). - `CB_SIGNER_LOADER_FORMAT`, `CB_SIGNER_LOADER_KEYS_DIR` and `CB_SIGNER_LOADER_SECRETS_DIR`: paths to the `keys` and `secrets` directories or files (ERC-2335 style keystores, see [Signer config](../configuration/#signer-module) for more info). From 6117219d62f6243d263fdbabddc5bb387bfd2857 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 21 May 2025 12:24:02 -0400 Subject: [PATCH 18/67] Updated docs --- docs/docs/get_started/building.md | 1 - docs/docs/get_started/configuration.md | 25 +++++++++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/docs/docs/get_started/building.md b/docs/docs/get_started/building.md index d38b447f..f831de57 100644 --- a/docs/docs/get_started/building.md +++ b/docs/docs/get_started/building.md @@ -166,7 +166,6 @@ This will create a binary in `./target/release/commit-boost-signer`. To verify i The signer needs the following environment variables set: - `CB_CONFIG` = path of your config file. - `CB_JWTS` = a dummy key-value pair of [JWT](https://en.wikipedia.org/wiki/JSON_Web_Token) values for various services. Since we don't need them for the sake of just testing the binary, we can use something like `"test_jwts=dummy"`. -- `CB_SIGNER_PORT` = the network port to listen for signer requests on. Default is `20000`. Set these values, create the `keys` and `secrets` directories listed in the configuration file, and run the binary: diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index 5d196619..efe9da3f 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -39,6 +39,13 @@ Commit-Boost supports both local and remote signers. The signer module is respon To start a local signer module, you need to include its parameters in the config file ```toml +[pbs] +... +with_signer = true + +[signer] +port = 20000 + [signer.local.loader] format = "lighthouse" keys_path = "/path/to/keys" @@ -64,6 +71,10 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml + [pbs] + ... + with_signer = true + [signer] port = 20000 @@ -89,7 +100,13 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml + [pbs] + ... + with_signer = true + [signer] + port = 20000 + [signer.local.loader] format = "prysm" keys_path = "wallet/direct/accounts/all-accounts.keystore.json" @@ -112,6 +129,10 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml + [pbs] + ... + with_signer = true + [signer] port = 20000 @@ -136,6 +157,10 @@ We currently support Lighthouse, Prysm, Teku and Lodestar's keystores so it's ea #### Config: ```toml + [pbs] + ... + with_signer = true + [signer] port = 20000 From c0f591d5656aed3f2b705583bcd95d88abe45394 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 21 May 2025 12:41:09 -0400 Subject: [PATCH 19/67] Fixing Clippy in CI workflow --- .github/workflows/ci.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0b15367f..3be3a7da 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,6 +28,9 @@ jobs: with: toolchain: nightly-2025-02-26 components: clippy, rustfmt + + - name: Install Clippy on prod toolchain + run: rustup component add --toolchain 1.83.0-x86_64-unknown-linux-gnu clippy - name: Install protoc run: sudo provisioning/protoc.sh From adbd34a02d52a86251258cc82be5f1ebf47474fe Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 01:07:51 -0400 Subject: [PATCH 20/67] Removed obviated CI setup --- .github/workflows/ci.yml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 3be3a7da..0b15367f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,9 +28,6 @@ jobs: with: toolchain: nightly-2025-02-26 components: clippy, rustfmt - - - name: Install Clippy on prod toolchain - run: rustup component add --toolchain 1.83.0-x86_64-unknown-linux-gnu clippy - name: Install protoc run: sudo provisioning/protoc.sh From e3488b34f8629fe65071688165c17579d4b9fd23 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 20 May 2025 15:40:27 -0400 Subject: [PATCH 21/67] Minor dedup of RwLock guard acquisition --- crates/signer/src/service.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index a965f057..cce8038e 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -64,8 +64,14 @@ impl SigningService { jwts: config.jwts.into(), }; - let loaded_consensus = state.manager.read().await.available_consensus_signers(); - let loaded_proxies = state.manager.read().await.available_proxy_signers(); + // Get the signer counts + let loaded_consensus: usize; + let loaded_proxies: usize; + { + let manager = state.manager.read().await; + loaded_consensus = manager.available_consensus_signers(); + loaded_proxies = manager.available_proxy_signers(); + } info!(version = COMMIT_BOOST_VERSION, commit_hash = COMMIT_BOOST_COMMIT, modules =? module_ids, endpoint =? config.endpoint, loaded_consensus, loaded_proxies, "Starting signing service"); From c3d7ec40f92a4dc2c4481afd517d81ebf9e9b7cc Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 22 May 2025 00:58:49 -0400 Subject: [PATCH 22/67] Added rate limiting for signer clients with repeated JWT auth failures --- crates/common/src/config/constants.rs | 5 ++ crates/common/src/config/signer.rs | 51 ++++++++++- crates/common/src/signer/constants.rs | 5 ++ crates/common/src/utils.rs | 4 + crates/signer/src/error.rs | 6 ++ crates/signer/src/service.rs | 116 ++++++++++++++++++++++++-- 6 files changed, 176 insertions(+), 11 deletions(-) diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index d7799146..5941a42b 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -35,6 +35,11 @@ pub const SIGNER_MODULE_NAME: &str = "signer"; /// Where the signer module should open the server pub const SIGNER_ENDPOINT_ENV: &str = "CB_SIGNER_ENDPOINT"; +// JWT authentication settings +pub const SIGNER_JWT_AUTH_FAIL_LIMIT_ENV: &str = "CB_SIGNER_JWT_AUTH_FAIL_LIMIT"; +pub const SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV: &str = + "CB_SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS"; + /// Comma separated list module_id=jwt_secret pub const JWTS_ENV: &str = "CB_JWTS"; /// The JWT secret for the signer to validate the modules requests diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index dce97666..6eb870cf 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -11,13 +11,17 @@ use url::Url; use super::{ load_jwt_secrets, load_optional_env_var, utils::load_env_var, CommitBoostConfig, - SIGNER_ENDPOINT_ENV, SIGNER_IMAGE_DEFAULT, + SIGNER_ENDPOINT_ENV, SIGNER_IMAGE_DEFAULT, SIGNER_JWT_AUTH_FAIL_LIMIT_ENV, + SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV, }; use crate::{ config::{DIRK_CA_CERT_ENV, DIRK_CERT_ENV, DIRK_DIR_SECRETS_ENV, DIRK_KEY_ENV}, - signer::{ProxyStore, SignerLoader, DEFAULT_SIGNER_PORT}, + signer::{ + ProxyStore, SignerLoader, DEFAULT_JWT_AUTH_FAIL_LIMIT, + DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, DEFAULT_SIGNER_PORT, + }, types::{Chain, ModuleId}, - utils::{default_host, default_u16}, + utils::{default_host, default_u16, default_u32}, }; #[derive(Debug, Serialize, Deserialize, Clone)] @@ -26,12 +30,24 @@ pub struct SignerConfig { /// Host address to listen for signer API calls on #[serde(default = "default_host")] pub host: Ipv4Addr, + /// Port to listen for signer API calls on #[serde(default = "default_u16::")] pub port: u16, + /// Docker image of the module #[serde(default = "default_signer")] pub docker_image: String, + + /// Number of JWT auth failures before rate limiting an endpoint + #[serde(default = "default_u32::")] + pub jwt_auth_fail_limit: u32, + + /// Duration in seconds to rate limit an endpoint after the JWT auth failure + /// limit has been reached + #[serde(default = "default_u32::")] + pub jwt_auth_fail_timeout_seconds: u32, + /// Inner type-specific configuration #[serde(flatten)] pub inner: SignerType, @@ -100,6 +116,8 @@ pub struct StartSignerConfig { pub store: Option, pub endpoint: SocketAddr, pub jwts: HashMap, + pub jwt_auth_fail_limit: u32, + pub jwt_auth_fail_timeout_seconds: u32, pub dirk: Option, } @@ -120,6 +138,29 @@ impl StartSignerConfig { } }; + // Load the JWT auth fail limit the same way + let jwt_auth_fail_limit = + if let Some(limit) = load_optional_env_var(SIGNER_JWT_AUTH_FAIL_LIMIT_ENV) { + limit.parse()? + } else { + match config.signer { + Some(ref signer) => signer.jwt_auth_fail_limit, + None => DEFAULT_JWT_AUTH_FAIL_LIMIT, + } + }; + + // Load the JWT auth fail timeout the same way + let jwt_auth_fail_timeout_seconds = if let Some(timeout) = + load_optional_env_var(SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV) + { + timeout.parse()? + } else { + match config.signer { + Some(ref signer) => signer.jwt_auth_fail_timeout_seconds, + None => DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, + } + }; + let signer = config.signer.ok_or_eyre("Signer config is missing")?.inner; match signer { @@ -128,6 +169,8 @@ impl StartSignerConfig { loader: Some(loader), endpoint, jwts, + jwt_auth_fail_limit, + jwt_auth_fail_timeout_seconds, store, dirk: None, }), @@ -156,6 +199,8 @@ impl StartSignerConfig { chain: config.chain, endpoint, jwts, + jwt_auth_fail_limit, + jwt_auth_fail_timeout_seconds, loader: None, store, dirk: Some(DirkConfig { diff --git a/crates/common/src/signer/constants.rs b/crates/common/src/signer/constants.rs index aa834f91..45e3ce23 100644 --- a/crates/common/src/signer/constants.rs +++ b/crates/common/src/signer/constants.rs @@ -1 +1,6 @@ pub const DEFAULT_SIGNER_PORT: u16 = 20000; + +// Rate limit signer API requests for 5 minutes after the endpoint has 3 JWT +// auth failures +pub const DEFAULT_JWT_AUTH_FAIL_LIMIT: u32 = 3; +pub const DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS: u32 = 5 * 60; diff --git a/crates/common/src/utils.rs b/crates/common/src/utils.rs index 37119580..a1dcb7cb 100644 --- a/crates/common/src/utils.rs +++ b/crates/common/src/utils.rs @@ -137,6 +137,10 @@ pub const fn default_u64() -> u64 { U } +pub const fn default_u32() -> u32 { + U +} + pub const fn default_u16() -> u16 { U } diff --git a/crates/signer/src/error.rs b/crates/signer/src/error.rs index 477e9e42..a2a113f3 100644 --- a/crates/signer/src/error.rs +++ b/crates/signer/src/error.rs @@ -27,6 +27,9 @@ pub enum SignerModuleError { #[error("internal error: {0}")] Internal(String), + + #[error("rate limited for {0} more seconds")] + RateLimited(f64), } impl IntoResponse for SignerModuleError { @@ -45,6 +48,9 @@ impl IntoResponse for SignerModuleError { (StatusCode::INTERNAL_SERVER_ERROR, "internal error".to_string()) } SignerModuleError::SignerError(err) => (StatusCode::BAD_REQUEST, err.to_string()), + SignerModuleError::RateLimited(duration) => { + (StatusCode::TOO_MANY_REQUESTS, format!("rate limited for {duration:?}")) + } } .into_response() } diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index cce8038e..3ca1d5ac 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -1,7 +1,12 @@ -use std::{collections::HashMap, sync::Arc}; +use std::{ + collections::HashMap, + net::SocketAddr, + sync::Arc, + time::{Duration, Instant}, +}; use axum::{ - extract::{Request, State}, + extract::{ConnectInfo, Request, State}, http::StatusCode, middleware::{self, Next}, response::{IntoResponse, Response}, @@ -41,13 +46,30 @@ use crate::{ /// Implements the Signer API and provides a service for signing requests pub struct SigningService; +// Tracker for a peer's JWT failures +struct JwtAuthFailureInfo { + // Number of auth failures since the first failure was tracked + failure_count: u32, + + // Time of the last auth failure + last_failure: Instant, +} + #[derive(Clone)] struct SigningState { /// Manager handling different signing methods manager: Arc>, + /// Map of modules ids to JWT secrets. This also acts as registry of all /// modules running jwts: Arc>, + + /// Map of JWT failures per peer + jwt_auth_failures: Arc>>, + + // JWT auth failure settings + jwt_auth_fail_limit: u32, + jwt_auth_fail_timeout: Duration, } impl SigningService { @@ -62,6 +84,9 @@ impl SigningService { let state = SigningState { manager: Arc::new(RwLock::new(start_manager(config.clone()).await?)), jwts: config.jwts.into(), + jwt_auth_failures: Arc::new(RwLock::new(HashMap::new())), + jwt_auth_fail_limit: config.jwt_auth_fail_limit, + jwt_auth_fail_timeout: Duration::from_secs(config.jwt_auth_fail_timeout_seconds as u64), }; // Get the signer counts @@ -73,7 +98,17 @@ impl SigningService { loaded_proxies = manager.available_proxy_signers(); } - info!(version = COMMIT_BOOST_VERSION, commit_hash = COMMIT_BOOST_COMMIT, modules =? module_ids, endpoint =? config.endpoint, loaded_consensus, loaded_proxies, "Starting signing service"); + info!( + version = COMMIT_BOOST_VERSION, + commit_hash = COMMIT_BOOST_COMMIT, + modules =? module_ids, + endpoint =? config.endpoint, + loaded_consensus, + loaded_proxies, + jwt_auth_fail_limit =? state.jwt_auth_fail_limit, + jwt_auth_fail_timeout =? state.jwt_auth_fail_timeout, + "Starting signing service" + ); SigningService::init_metrics(config.chain)?; @@ -85,7 +120,8 @@ impl SigningService { .route(RELOAD_PATH, post(handle_reload)) .with_state(state.clone()) .route_layer(middleware::from_fn(log_request)) - .route(STATUS_PATH, get(handle_status)); + .route(STATUS_PATH, get(handle_status)) + .into_make_service_with_connect_info::(); let listener = TcpListener::bind(config.endpoint).await?; @@ -101,9 +137,76 @@ impl SigningService { async fn jwt_auth( State(state): State, TypedHeader(auth): TypedHeader>, + addr: ConnectInfo, mut req: Request, next: Next, ) -> Result { + // Check if the request needs to be rate limited + let client_ip = addr.ip().to_string(); + check_jwt_rate_limit(&state, &client_ip).await?; + + // Process JWT authorization + match check_jwt_auth(&auth, &state).await { + Ok(module_id) => { + req.extensions_mut().insert(module_id); + Ok(next.run(req).await) + } + Err(SignerModuleError::Unauthorized) => { + let mut failures = state.jwt_auth_failures.write().await; + let failure_info = failures + .entry(client_ip) + .or_insert(JwtAuthFailureInfo { failure_count: 0, last_failure: Instant::now() }); + failure_info.failure_count += 1; + failure_info.last_failure = Instant::now(); + Err(SignerModuleError::Unauthorized) + } + Err(err) => Err(err), + } +} + +/// Checks if the incoming request needs to be rate limited due to previous JWT +/// authentication failures +async fn check_jwt_rate_limit( + state: &SigningState, + client_ip: &String, +) -> Result<(), SignerModuleError> { + let mut failures = state.jwt_auth_failures.write().await; + + // Ignore clients that don't have any failures + if let Some(failure_info) = failures.get(client_ip) { + // If the last failure was more than the timeout ago, remove this entry so it's + // eligible again + let elapsed = failure_info.last_failure.elapsed(); + if elapsed > state.jwt_auth_fail_timeout { + debug!("Removing {client_ip} from JWT auth failure list"); + failures.remove(client_ip); + return Ok(()); + } + + // If the failure threshold hasn't been met yet, don't rate limit + if failure_info.failure_count < state.jwt_auth_fail_limit { + debug!( + "Client {client_ip} has {}/{} JWT auth failures, no rate limit applied", + failure_info.failure_count, state.jwt_auth_fail_limit + ); + return Ok(()); + } + + // Rate limit the request + let remaining = state.jwt_auth_fail_timeout - elapsed; + warn!("Client {client_ip} is rate limited for {remaining:?} more seconds due to JWT auth failures"); + return Err(SignerModuleError::RateLimited(remaining.as_secs_f64())); + } + + debug!("Client {client_ip} has no JWT auth failures, no rate limit applied"); + Ok(()) +} + +/// Checks if a request can successfully authenticate with the JWT secret +async fn check_jwt_auth( + auth: &Authorization, + state: &SigningState, +) -> Result { let jwt: Jwt = auth.token().to_string().into(); // We first need to decode it to get the module id and then validate it @@ -122,10 +225,7 @@ async fn jwt_auth( error!("Unauthorized request. Invalid JWT: {e}"); SignerModuleError::Unauthorized })?; - - req.extensions_mut().insert(module_id); - - Ok(next.run(req).await) + Ok(module_id) } /// Requests logging middleware layer From 9ddad6426a1fcdeb441adb259b6b2408729f1937 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 22 May 2025 02:06:21 -0400 Subject: [PATCH 23/67] Added Signer config validation --- Cargo.lock | 11 +++++++++++ Cargo.toml | 1 + crates/common/Cargo.toml | 1 + crates/common/src/config/mod.rs | 3 +++ crates/common/src/config/signer.rs | 21 ++++++++++++++++++++- tests/tests/config.rs | 12 ++++++------ 6 files changed, 42 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5ebc811a..b80a4542 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1488,6 +1488,7 @@ dependencies = [ "cipher 0.4.4", "ctr 0.9.2", "derive_more 2.0.1", + "docker-image", "eth2_keystore", "ethereum_serde_utils", "ethereum_ssz 0.8.3", @@ -2158,6 +2159,16 @@ dependencies = [ "serde_yaml", ] +[[package]] +name = "docker-image" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ed901b8f2157bafce6e96f39217f7b1a4af32d84266d251ed7c22ce001f0b" +dependencies = [ + "lazy_static", + "regex", +] + [[package]] name = "doctest-file" version = "1.0.0" diff --git a/Cargo.toml b/Cargo.toml index aef26a94..b02ad0da 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,7 @@ color-eyre = "0.6.3" ctr = "0.9.2" derive_more = { version = "2.0.1", features = ["deref", "display", "from", "into"] } docker-compose-types = "0.16.0" +docker-image = "0.2.1" eth2_keystore = { git = "https://github.com/sigp/lighthouse", rev = "8d058e4040b765a96aa4968f4167af7571292be2" } ethereum_serde_utils = "0.7.0" ethereum_ssz = "0.8" diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index df78b046..c3955d4a 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -16,6 +16,7 @@ blst.workspace = true cipher.workspace = true ctr.workspace = true derive_more.workspace = true +docker-image.workspace = true eth2_keystore.workspace = true ethereum_serde_utils.workspace = true ethereum_ssz.workspace = true diff --git a/crates/common/src/config/mod.rs b/crates/common/src/config/mod.rs index 75fd3c9d..b782999b 100644 --- a/crates/common/src/config/mod.rs +++ b/crates/common/src/config/mod.rs @@ -41,6 +41,9 @@ impl CommitBoostConfig { /// Validate config pub async fn validate(&self) -> Result<()> { self.pbs.pbs_config.validate(self.chain).await?; + if let Some(signer) = &self.signer { + signer.validate().await?; + } Ok(()) } diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 6eb870cf..01b50cde 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -4,7 +4,8 @@ use std::{ path::PathBuf, }; -use eyre::{bail, OptionExt, Result}; +use docker_image::DockerImage; +use eyre::{bail, ensure, OptionExt, Result}; use serde::{Deserialize, Serialize}; use tonic::transport::{Certificate, Identity}; use url::Url; @@ -40,6 +41,7 @@ pub struct SignerConfig { pub docker_image: String, /// Number of JWT auth failures before rate limiting an endpoint + /// If set to 0, no rate limiting will be applied #[serde(default = "default_u32::")] pub jwt_auth_fail_limit: u32, @@ -53,6 +55,23 @@ pub struct SignerConfig { pub inner: SignerType, } +impl SignerConfig { + /// Validate the signer config + pub async fn validate(&self) -> Result<()> { + // Port must be positive + ensure!(self.port > 0, "Port must be positive"); + + // The Docker tag must parse + ensure!(!self.docker_image.is_empty(), "Docker image is empty"); + ensure!( + DockerImage::parse(&self.docker_image).is_ok(), + format!("Invalid Docker image: {}", self.docker_image) + ); + + Ok(()) + } +} + fn default_signer() -> String { SIGNER_IMAGE_DEFAULT.to_string() } diff --git a/tests/tests/config.rs b/tests/tests/config.rs index dafd96d9..f6f31d96 100644 --- a/tests/tests/config.rs +++ b/tests/tests/config.rs @@ -37,11 +37,11 @@ async fn test_load_pbs_happy() -> Result<()> { // Docker and general settings assert_eq!(config.pbs.docker_image, "ghcr.io/commit-boost/pbs:latest"); - assert_eq!(config.pbs.with_signer, false); + assert!(!config.pbs.with_signer); assert_eq!(config.pbs.pbs_config.host, "127.0.0.1".parse::().unwrap()); assert_eq!(config.pbs.pbs_config.port, 18550); - assert_eq!(config.pbs.pbs_config.relay_check, true); - assert_eq!(config.pbs.pbs_config.wait_all_registrations, true); + assert!(config.pbs.pbs_config.relay_check); + assert!(config.pbs.pbs_config.wait_all_registrations); // Timeouts assert_eq!(config.pbs.pbs_config.timeout_get_header_ms, 950); @@ -49,12 +49,12 @@ async fn test_load_pbs_happy() -> Result<()> { assert_eq!(config.pbs.pbs_config.timeout_register_validator_ms, 3000); // Bid settings and validation - assert_eq!(config.pbs.pbs_config.skip_sigverify, false); + assert!(!config.pbs.pbs_config.skip_sigverify); dbg!(&config.pbs.pbs_config.min_bid_wei); dbg!(&U256::from(0.5)); assert_eq!(config.pbs.pbs_config.min_bid_wei, U256::from((0.5 * WEI_PER_ETH as f64) as u64)); assert_eq!(config.pbs.pbs_config.late_in_slot_time_ms, 2000); - assert_eq!(config.pbs.pbs_config.extra_validation_enabled, false); + assert!(!config.pbs.pbs_config.extra_validation_enabled); assert_eq!( config.pbs.pbs_config.rpc_url, Some("https://ethereum-holesky-rpc.publicnode.com".parse::().unwrap()) @@ -64,7 +64,7 @@ async fn test_load_pbs_happy() -> Result<()> { let relay = &config.relays[0]; assert_eq!(relay.id, Some("example-relay".to_string())); assert_eq!(relay.entry.url, "http://0xa1cec75a3f0661e99299274182938151e8433c61a19222347ea1313d839229cb4ce4e3e5aa2bdeb71c8fcf1b084963c2@abc.xyz".parse::().unwrap()); - assert_eq!(relay.enable_timing_games, false); + assert!(!relay.enable_timing_games); assert_eq!(relay.target_first_request_ms, Some(200)); assert_eq!(relay.frequency_get_header_ms, Some(300)); From c62185e13f301a3abcab32f9a28ed42f1185d7e3 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 22 May 2025 06:54:50 -0400 Subject: [PATCH 24/67] Started unit test setup for the Signer --- Cargo.lock | 6 +++-- Cargo.toml | 1 + tests/Cargo.toml | 2 ++ tests/src/utils.rs | 44 +++++++++++++++++++++++++++++-- tests/tests/pbs_get_header.rs | 2 +- tests/tests/signer_jwt_auth.rs | 47 ++++++++++++++++++++++++++++++++++ 6 files changed, 97 insertions(+), 5 deletions(-) create mode 100644 tests/tests/signer_jwt_auth.rs diff --git a/Cargo.lock b/Cargo.lock index b80a4542..17d43e3e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1590,9 +1590,11 @@ dependencies = [ "axum 0.8.1", "cb-common", "cb-pbs", + "cb-signer", "eyre", "reqwest", "serde_json", + "tempfile", "tokio", "tracing", "tracing-subscriber", @@ -4874,9 +4876,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.19.0" +version = "3.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "488960f40a3fd53d72c2a29a58722561dee8afdd175bd88e3db4677d7b2ba600" +checksum = "e8a64e3985349f2441a1a9ef0b853f869006c3855f2cda6862a94d26ebb9d6a1" dependencies = [ "fastrand", "getrandom 0.3.1", diff --git a/Cargo.toml b/Cargo.toml index b02ad0da..5294508f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -57,6 +57,7 @@ serde_json = "1.0.117" serde_yaml = "0.9.33" sha2 = "0.10.8" ssz_types = "0.10" +tempfile = "3.20.0" thiserror = "2.0.12" tokio = { version = "1.37.0", features = ["full"] } toml = "0.8.13" diff --git a/tests/Cargo.toml b/tests/Cargo.toml index ce273ae7..f1b5c9d9 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -9,9 +9,11 @@ alloy.workspace = true axum.workspace = true cb-common.workspace = true cb-pbs.workspace = true +cb-signer.workspace = true eyre.workspace = true reqwest.workspace = true serde_json.workspace = true +tempfile.workspace = true tokio.workspace = true tracing.workspace = true tracing-subscriber.workspace = true diff --git a/tests/src/utils.rs b/tests/src/utils.rs index f2ae9157..e8561931 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -1,13 +1,22 @@ use std::{ + collections::HashMap, net::{Ipv4Addr, SocketAddr}, sync::{Arc, Once}, }; use alloy::{primitives::U256, rpc::types::beacon::BlsPublicKey}; use cb_common::{ - config::{PbsConfig, PbsModuleConfig, RelayConfig}, + config::{ + PbsConfig, PbsModuleConfig, RelayConfig, SignerConfig, SignerType, StartSignerConfig, + SIGNER_IMAGE_DEFAULT, + }, pbs::{RelayClient, RelayEntry}, - types::Chain, + signer::{ + SignerLoader, DEFAULT_JWT_AUTH_FAIL_LIMIT, DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, + DEFAULT_SIGNER_PORT, + }, + types::{Chain, ModuleId}, + utils::default_host, }; use eyre::Result; @@ -91,3 +100,34 @@ pub fn to_pbs_config( muxes: None, } } + +pub fn get_signer_config(loader: SignerLoader) -> SignerConfig { + SignerConfig { + host: default_host(), + port: DEFAULT_SIGNER_PORT, + docker_image: SIGNER_IMAGE_DEFAULT.to_string(), + jwt_auth_fail_limit: DEFAULT_JWT_AUTH_FAIL_LIMIT, + jwt_auth_fail_timeout_seconds: DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, + inner: SignerType::Local { loader, store: None }, + } +} + +pub fn get_start_signer_config( + signer_config: SignerConfig, + chain: Chain, + jwts: HashMap, +) -> StartSignerConfig { + match signer_config.inner { + SignerType::Local { loader, .. } => StartSignerConfig { + chain, + loader: Some(loader), + store: None, + endpoint: SocketAddr::new(signer_config.host.into(), signer_config.port), + jwts, + jwt_auth_fail_limit: signer_config.jwt_auth_fail_limit, + jwt_auth_fail_timeout_seconds: signer_config.jwt_auth_fail_timeout_seconds, + dirk: None, + }, + _ => panic!("Only local signers are supported in tests"), + } +} diff --git a/tests/tests/pbs_get_header.rs b/tests/tests/pbs_get_header.rs index 422a71a3..747d460c 100644 --- a/tests/tests/pbs_get_header.rs +++ b/tests/tests/pbs_get_header.rs @@ -23,7 +23,7 @@ use tree_hash::TreeHash; async fn test_get_header() -> Result<()> { setup_test_env(); let signer = random_secret(); - let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()).into(); + let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()); let chain = Chain::Holesky; let pbs_port = 3200; diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs new file mode 100644 index 00000000..989cdb61 --- /dev/null +++ b/tests/tests/signer_jwt_auth.rs @@ -0,0 +1,47 @@ +use std::{collections::HashMap, fs, time::Duration}; + +use cb_common::{ + signer::{SignerLoader, ValidatorKeysFormat}, + types::{Chain, ModuleId}, +}; +use cb_signer::service::SigningService; +use cb_tests::utils::{get_signer_config, get_start_signer_config, setup_test_env}; +use eyre::Result; +use tempfile::tempdir; + +#[tokio::test] +async fn test_signer_jwt_auth_success() -> Result<()> { + setup_test_env(); + let chain = Chain::Hoodi; + + // Mock JWT secrets + let mut jwts = HashMap::new(); + jwts.insert(ModuleId("test-module".to_string()), "test-jwt-secret".to_string()); + + // Create a temp folder and key structure + let test_folder = tempdir()?; + let test_path = test_folder.path(); + let keys_path = test_path.join("keys"); + let secrets_path = test_path.join("secrets"); + fs::create_dir_all(&keys_path)?; + fs::create_dir_all(&secrets_path)?; + + // Create a signer config + let loader = SignerLoader::ValidatorsDir { + keys_path, + secrets_path, + format: ValidatorKeysFormat::Lighthouse, + }; + let config = get_signer_config(loader); + let start_config = get_start_signer_config(config, chain, jwts); + + // Run the Signer + tokio::spawn(SigningService::run(start_config)); + + // leave some time to start servers + tokio::time::sleep(Duration::from_millis(100)).await; + + // TODO: simple client to test the JWT auth endpoint + + Ok(()) +} From dc73c6215d604cd6f0165801ac7213b462953ebd Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 00:52:33 -0400 Subject: [PATCH 25/67] Finished a basic signer module unit test --- tests/tests/signer_jwt_auth.rs | 62 +++++++++++++++++++++++++--------- 1 file changed, 46 insertions(+), 16 deletions(-) diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index 989cdb61..0e9e97eb 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -1,13 +1,19 @@ -use std::{collections::HashMap, fs, time::Duration}; +use std::{collections::HashMap, time::Duration}; +use alloy::{hex, primitives::FixedBytes}; use cb_common::{ + commit::{constants::GET_PUBKEYS_PATH, request::GetPubkeysResponse}, signer::{SignerLoader, ValidatorKeysFormat}, types::{Chain, ModuleId}, + utils::create_jwt, }; use cb_signer::service::SigningService; use cb_tests::utils::{get_signer_config, get_start_signer_config, setup_test_env}; use eyre::Result; -use tempfile::tempdir; +use tracing::info; + +const JWT_MODULE: &str = "test-module"; +const JWT_SECRET: &str = "test-jwt-secret"; #[tokio::test] async fn test_signer_jwt_auth_success() -> Result<()> { @@ -15,33 +21,57 @@ async fn test_signer_jwt_auth_success() -> Result<()> { let chain = Chain::Hoodi; // Mock JWT secrets + let module_id = ModuleId(JWT_MODULE.to_string()); let mut jwts = HashMap::new(); - jwts.insert(ModuleId("test-module".to_string()), "test-jwt-secret".to_string()); - - // Create a temp folder and key structure - let test_folder = tempdir()?; - let test_path = test_folder.path(); - let keys_path = test_path.join("keys"); - let secrets_path = test_path.join("secrets"); - fs::create_dir_all(&keys_path)?; - fs::create_dir_all(&secrets_path)?; + jwts.insert(module_id.clone(), JWT_SECRET.to_string()); // Create a signer config let loader = SignerLoader::ValidatorsDir { - keys_path, - secrets_path, + keys_path: "data/keystores/keys".into(), + secrets_path: "data/keystores/secrets".into(), format: ValidatorKeysFormat::Lighthouse, }; let config = get_signer_config(loader); + let host = config.host; + let port = config.port; let start_config = get_start_signer_config(config, chain, jwts); // Run the Signer - tokio::spawn(SigningService::run(start_config)); + let server_handle = tokio::spawn(SigningService::run(start_config)); - // leave some time to start servers + // Make sure the server is running tokio::time::sleep(Duration::from_millis(100)).await; + if server_handle.is_finished() { + return Err(eyre::eyre!( + "Signer service failed to start: {}", + server_handle.await.unwrap_err() + )); + } + + // Create a JWT header + let jwt = create_jwt(&module_id, JWT_SECRET)?; + + // Run a pubkeys request + let client = reqwest::Client::new(); + let url = format!("http://{}:{}{}", host, port, GET_PUBKEYS_PATH); + let response = client.get(&url).bearer_auth(jwt).send().await?; + assert!(response.status().is_success(), "Failed to authenticate with JWT"); + let pubkey_json = response.json::().await?; - // TODO: simple client to test the JWT auth endpoint + // Verify the expected pubkeys are returned + assert_eq!(pubkey_json.keys.len(), 2); + let expected_pubkeys = vec![ + FixedBytes::new(hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4")), + FixedBytes::new(hex!("b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9")), + ]; + for expected in expected_pubkeys { + assert!( + pubkey_json.keys.iter().any(|k| k.consensus == expected), + "Expected pubkey not found: {:?}", + expected + ); + info!("Server returned expected pubkey: {:?}", expected); + } Ok(()) } From 6c3d9670f4ff7d9e6fa7e5b8b497deea01043347 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 01:15:44 -0400 Subject: [PATCH 26/67] Added a JWT failure unit test --- tests/tests/signer_jwt_auth.rs | 49 ++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index 0e9e97eb..fd111814 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -75,3 +75,52 @@ async fn test_signer_jwt_auth_success() -> Result<()> { Ok(()) } + +#[tokio::test] +async fn test_signer_jwt_auth_fail() -> Result<()> { + setup_test_env(); + let chain = Chain::Hoodi; + + // Mock JWT secrets + let module_id = ModuleId(JWT_MODULE.to_string()); + let mut jwts = HashMap::new(); + jwts.insert(module_id.clone(), JWT_SECRET.to_string()); + + // Create a signer config + let loader = SignerLoader::ValidatorsDir { + keys_path: "data/keystores/keys".into(), + secrets_path: "data/keystores/secrets".into(), + format: ValidatorKeysFormat::Lighthouse, + }; + let config = get_signer_config(loader); + let host = config.host; + let port = config.port; + let start_config = get_start_signer_config(config, chain, jwts); + + // Run the Signer + let server_handle = tokio::spawn(SigningService::run(start_config)); + + // Make sure the server is running + tokio::time::sleep(Duration::from_millis(100)).await; + if server_handle.is_finished() { + return Err(eyre::eyre!( + "Signer service failed to start: {}", + server_handle.await.unwrap_err() + )); + } + + // Create a JWT header + let jwt = create_jwt(&module_id, "incorrect secret")?; + + // Run a pubkeys request + let client = reqwest::Client::new(); + let url = format!("http://{}:{}{}", host, port, GET_PUBKEYS_PATH); + let response = client.get(&url).bearer_auth(jwt).send().await?; + assert!(response.status().is_client_error(), "Failed to authenticate with JWT"); + info!( + "Server returned expected error code {} for invalid JWT: {}", + response.status(), + response.text().await.unwrap_or_else(|_| "No response body".to_string()) + ); + Ok(()) +} From 6464638a443b63e58ed3cfef381210aa13f963b2 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 02:05:28 -0400 Subject: [PATCH 27/67] Added a rate limit test and cleaned up a bit --- tests/tests/signer_jwt_auth.rs | 145 +++++++++++++++++++-------------- 1 file changed, 82 insertions(+), 63 deletions(-) diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index fd111814..961afb3e 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -3,6 +3,7 @@ use std::{collections::HashMap, time::Duration}; use alloy::{hex, primitives::FixedBytes}; use cb_common::{ commit::{constants::GET_PUBKEYS_PATH, request::GetPubkeysResponse}, + config::StartSignerConfig, signer::{SignerLoader, ValidatorKeysFormat}, types::{Chain, ModuleId}, utils::create_jwt, @@ -10,6 +11,7 @@ use cb_common::{ use cb_signer::service::SigningService; use cb_tests::utils::{get_signer_config, get_start_signer_config, setup_test_env}; use eyre::Result; +use reqwest::{Response, StatusCode}; use tracing::info; const JWT_MODULE: &str = "test-module"; @@ -18,66 +20,75 @@ const JWT_SECRET: &str = "test-jwt-secret"; #[tokio::test] async fn test_signer_jwt_auth_success() -> Result<()> { setup_test_env(); - let chain = Chain::Hoodi; + let module_id = ModuleId(JWT_MODULE.to_string()); + let start_config = start_server().await?; - // Mock JWT secrets + // Run a pubkeys request + let jwt = create_jwt(&module_id, JWT_SECRET)?; + let client = reqwest::Client::new(); + let url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + let response = client.get(&url).bearer_auth(&jwt).send().await?; + + // Verify the expected pubkeys are returned + verify_pubkeys(response).await?; + + Ok(()) +} + +#[tokio::test] +async fn test_signer_jwt_auth_fail() -> Result<()> { + setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let mut jwts = HashMap::new(); - jwts.insert(module_id.clone(), JWT_SECRET.to_string()); + let start_config = start_server().await?; - // Create a signer config - let loader = SignerLoader::ValidatorsDir { - keys_path: "data/keystores/keys".into(), - secrets_path: "data/keystores/secrets".into(), - format: ValidatorKeysFormat::Lighthouse, - }; - let config = get_signer_config(loader); - let host = config.host; - let port = config.port; - let start_config = get_start_signer_config(config, chain, jwts); + // Run a pubkeys request - this should fail due to invalid JWT + let jwt = create_jwt(&module_id, "incorrect secret")?; + let client = reqwest::Client::new(); + let url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + let response = client.get(&url).bearer_auth(&jwt).send().await?; + assert!(response.status() == StatusCode::UNAUTHORIZED); + info!( + "Server returned expected error code {} for invalid JWT: {}", + response.status(), + response.text().await.unwrap_or_else(|_| "No response body".to_string()) + ); + Ok(()) +} - // Run the Signer - let server_handle = tokio::spawn(SigningService::run(start_config)); +#[tokio::test] +async fn test_signer_jwt_rate_limit() -> Result<()> { + setup_test_env(); + let module_id = ModuleId(JWT_MODULE.to_string()); + let start_config = start_server().await?; - // Make sure the server is running - tokio::time::sleep(Duration::from_millis(100)).await; - if server_handle.is_finished() { - return Err(eyre::eyre!( - "Signer service failed to start: {}", - server_handle.await.unwrap_err() - )); + // Run as many pubkeys requests as the fail limit + let jwt = create_jwt(&module_id, "incorrect secret")?; + let client = reqwest::Client::new(); + let url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + for _ in 0..start_config.jwt_auth_fail_limit { + let response = client.get(&url).bearer_auth(&jwt).send().await?; + assert!(response.status() == StatusCode::UNAUTHORIZED); } - // Create a JWT header + // Run another request - this should fail due to rate limiting now let jwt = create_jwt(&module_id, JWT_SECRET)?; + let response = client.get(&url).bearer_auth(&jwt).send().await?; + assert!(response.status() == StatusCode::TOO_MANY_REQUESTS); - // Run a pubkeys request - let client = reqwest::Client::new(); - let url = format!("http://{}:{}{}", host, port, GET_PUBKEYS_PATH); - let response = client.get(&url).bearer_auth(jwt).send().await?; - assert!(response.status().is_success(), "Failed to authenticate with JWT"); - let pubkey_json = response.json::().await?; + // Wait for the rate limit timeout + tokio::time::sleep(Duration::from_secs(start_config.jwt_auth_fail_timeout_seconds as u64)) + .await; - // Verify the expected pubkeys are returned - assert_eq!(pubkey_json.keys.len(), 2); - let expected_pubkeys = vec![ - FixedBytes::new(hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4")), - FixedBytes::new(hex!("b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9")), - ]; - for expected in expected_pubkeys { - assert!( - pubkey_json.keys.iter().any(|k| k.consensus == expected), - "Expected pubkey not found: {:?}", - expected - ); - info!("Server returned expected pubkey: {:?}", expected); - } + // Now the next request should succeed + let response = client.get(&url).bearer_auth(&jwt).send().await?; + verify_pubkeys(response).await?; Ok(()) } -#[tokio::test] -async fn test_signer_jwt_auth_fail() -> Result<()> { +// Starts the signer moduler server on a separate task and returns its +// configuration +async fn start_server() -> Result { setup_test_env(); let chain = Chain::Hoodi; @@ -92,13 +103,13 @@ async fn test_signer_jwt_auth_fail() -> Result<()> { secrets_path: "data/keystores/secrets".into(), format: ValidatorKeysFormat::Lighthouse, }; - let config = get_signer_config(loader); - let host = config.host; - let port = config.port; + let mut config = get_signer_config(loader); + config.jwt_auth_fail_limit = 3; // Set a low fail limit for testing + config.jwt_auth_fail_timeout_seconds = 3; // Set a short timeout for testing let start_config = get_start_signer_config(config, chain, jwts); // Run the Signer - let server_handle = tokio::spawn(SigningService::run(start_config)); + let server_handle = tokio::spawn(SigningService::run(start_config.clone())); // Make sure the server is running tokio::time::sleep(Duration::from_millis(100)).await; @@ -108,19 +119,27 @@ async fn test_signer_jwt_auth_fail() -> Result<()> { server_handle.await.unwrap_err() )); } + Ok(start_config) +} - // Create a JWT header - let jwt = create_jwt(&module_id, "incorrect secret")?; - - // Run a pubkeys request - let client = reqwest::Client::new(); - let url = format!("http://{}:{}{}", host, port, GET_PUBKEYS_PATH); - let response = client.get(&url).bearer_auth(jwt).send().await?; - assert!(response.status().is_client_error(), "Failed to authenticate with JWT"); - info!( - "Server returned expected error code {} for invalid JWT: {}", - response.status(), - response.text().await.unwrap_or_else(|_| "No response body".to_string()) - ); +// Verifies that the pubkeys returned by the server match the pubkeys in the +// test data +async fn verify_pubkeys(response: Response) -> Result<()> { + // Verify the expected pubkeys are returned + assert!(response.status() == StatusCode::OK); + let pubkey_json = response.json::().await?; + assert_eq!(pubkey_json.keys.len(), 2); + let expected_pubkeys = vec![ + FixedBytes::new(hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4")), + FixedBytes::new(hex!("b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9")), + ]; + for expected in expected_pubkeys { + assert!( + pubkey_json.keys.iter().any(|k| k.consensus == expected), + "Expected pubkey not found: {:?}", + expected + ); + info!("Server returned expected pubkey: {:?}", expected); + } Ok(()) } From 0313f18c27880a85d5c9af7698884f27b7cf895e Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 03:30:34 -0400 Subject: [PATCH 28/67] Added unique ports to unit tests for parallel execution --- tests/tests/signer_jwt_auth.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index 961afb3e..90a0365f 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -21,7 +21,7 @@ const JWT_SECRET: &str = "test-jwt-secret"; async fn test_signer_jwt_auth_success() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let start_config = start_server().await?; + let start_config = start_server(20100).await?; // Run a pubkeys request let jwt = create_jwt(&module_id, JWT_SECRET)?; @@ -39,7 +39,7 @@ async fn test_signer_jwt_auth_success() -> Result<()> { async fn test_signer_jwt_auth_fail() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let start_config = start_server().await?; + let start_config = start_server(20200).await?; // Run a pubkeys request - this should fail due to invalid JWT let jwt = create_jwt(&module_id, "incorrect secret")?; @@ -59,7 +59,7 @@ async fn test_signer_jwt_auth_fail() -> Result<()> { async fn test_signer_jwt_rate_limit() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let start_config = start_server().await?; + let start_config = start_server(20300).await?; // Run as many pubkeys requests as the fail limit let jwt = create_jwt(&module_id, "incorrect secret")?; @@ -88,7 +88,7 @@ async fn test_signer_jwt_rate_limit() -> Result<()> { // Starts the signer moduler server on a separate task and returns its // configuration -async fn start_server() -> Result { +async fn start_server(port: u16) -> Result { setup_test_env(); let chain = Chain::Hoodi; @@ -104,6 +104,7 @@ async fn start_server() -> Result { format: ValidatorKeysFormat::Lighthouse, }; let mut config = get_signer_config(loader); + config.port = port; config.jwt_auth_fail_limit = 3; // Set a low fail limit for testing config.jwt_auth_fail_timeout_seconds = 3; // Set a short timeout for testing let start_config = get_start_signer_config(config, chain, jwts); From 346eea4c0ee7c6e7e53ec1f6c950e3289be214dd Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 28 May 2025 16:36:11 -0400 Subject: [PATCH 29/67] Cleaned up the build Dockerfile and removed an extra dependency layer --- provisioning/build.Dockerfile | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/provisioning/build.Dockerfile b/provisioning/build.Dockerfile index 34ad27a5..43713cc5 100644 --- a/provisioning/build.Dockerfile +++ b/provisioning/build.Dockerfile @@ -1,7 +1,10 @@ # This will be the main build image -FROM --platform=${BUILDPLATFORM} lukemathwalker/cargo-chef:latest-rust-1.83 AS chef +FROM --platform=${BUILDPLATFORM} rust:1.83-slim-bookworm AS chef ARG TARGETOS TARGETARCH BUILDPLATFORM TARGET_CRATE +ENV CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse WORKDIR /app +RUN cargo install cargo-chef --locked && \ + rm -rf $CARGO_HOME/registry/ FROM --platform=${BUILDPLATFORM} chef AS planner ARG TARGETOS TARGETARCH BUILDPLATFORM TARGET_CRATE @@ -20,8 +23,8 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ rustup target add aarch64-unknown-linux-gnu && \ dpkg --add-architecture arm64 && \ apt update && \ - apt install -y gcc-aarch64-linux-gnu libssl-dev:arm64 zlib1g-dev:arm64 && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + apt install -y gcc-aarch64-linux-gnu && \ + echo '#!/bin/sh' > ${BUILD_VAR_SCRIPT} && \ echo "export TARGET=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export TARGET_FLAG=--target=aarch64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/aarch64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ @@ -35,8 +38,8 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ rustup target add x86_64-unknown-linux-gnu && \ dpkg --add-architecture amd64 && \ apt update && \ - apt install -y gcc-x86-64-linux-gnu libssl-dev:amd64 zlib1g-dev:amd64 && \ - echo "#!/bin/sh" > ${BUILD_VAR_SCRIPT} && \ + apt install -y gcc-x86-64-linux-gnu && \ + echo '#!/bin/sh' > ${BUILD_VAR_SCRIPT} && \ echo "export TARGET=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export TARGET_FLAG=--target=x86_64-unknown-linux-gnu" >> ${BUILD_VAR_SCRIPT} && \ echo "export CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=/usr/bin/x86_64-linux-gnu-gcc" >> ${BUILD_VAR_SCRIPT} && \ @@ -49,12 +52,14 @@ RUN if [ "$BUILDPLATFORM" = "linux/amd64" -a "$TARGETARCH" = "arm64" ]; then \ # Run cook to prep the build RUN if [ -f ${BUILD_VAR_SCRIPT} ]; then \ + chmod +x ${BUILD_VAR_SCRIPT} && \ . ${BUILD_VAR_SCRIPT} && \ echo "Cross-compilation environment set up for ${TARGET}"; \ else \ echo "No cross-compilation needed"; \ fi && \ - export GIT_HASH=$(git rev-parse HEAD) && \ + apt update && \ + apt install -y git libssl-dev:${TARGETARCH} zlib1g-dev:${TARGETARCH} pkg-config && \ cargo chef cook ${TARGET_FLAG} --release --recipe-path recipe.json # Get the latest Protoc since the one in the Debian repo is incredibly old From 7b20d2f885efa8591d834d1deebb7b550d89683d Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 29 May 2025 05:03:25 -0400 Subject: [PATCH 30/67] Ported the build script over to the justfile --- build-linux.sh | 144 ------------------------ docs/docs/get_started/building.md | 28 ++--- justfile | 175 +++++++++++++++++++++++++++++- provisioning/pbs.Dockerfile | 2 +- provisioning/signer.Dockerfile | 2 +- 5 files changed, 185 insertions(+), 166 deletions(-) delete mode 100755 build-linux.sh diff --git a/build-linux.sh b/build-linux.sh deleted file mode 100755 index a7266bd9..00000000 --- a/build-linux.sh +++ /dev/null @@ -1,144 +0,0 @@ -#!/bin/bash - -# This script will build the Commit-Boost applications and modules for local Linux development. - -# ================= -# === Functions === -# ================= - -# Print a failure message to stderr and exit -fail() { - MESSAGE=$1 - RED='\033[0;31m' - RESET='\033[;0m' - >&2 echo -e "\n${RED}**ERROR**\n$MESSAGE${RESET}\n" - exit 1 -} - - -# Builds the CLI binaries for Linux -# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static -build_cli() { - echo "Building CLI binaries..." - docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-cli . || fail "Error building CLI." - echo "done!" - - # Flatten the folder structure for easier referencing - mv build/$VERSION/linux_amd64/commit-boost-cli build/$VERSION/commit-boost-cli-linux-amd64 - mv build/$VERSION/linux_arm64/commit-boost-cli build/$VERSION/commit-boost-cli-linux-arm64 - - # Clean up the empty directories - rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 - echo "done!" -} - - -# Builds the PBS module binaries for Linux and the Docker image(s) -# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static -build_pbs() { - echo "Building PBS binaries..." - docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-pbs . || fail "Error building PBS binaries." - echo "done!" - - # Flatten the folder structure for easier referencing - mv build/$VERSION/linux_amd64/commit-boost-pbs build/$VERSION/commit-boost-pbs-linux-amd64 - mv build/$VERSION/linux_arm64/commit-boost-pbs build/$VERSION/commit-boost-pbs-linux-arm64 - - # Clean up the empty directories - rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 - - echo "Building PBS Docker image..." - # If uploading, make and push a manifest - if [ "$LOCAL_UPLOAD" = true ]; then - if [ -z "$LOCAL_DOCKER_REGISTRY" ]; then - fail "LOCAL_DOCKER_REGISTRY must be set to upload to a local registry." - fi - docker buildx build --rm --platform=linux/amd64,linux/arm64 --build-arg BINARIES_PATH=build/$VERSION -t $LOCAL_DOCKER_REGISTRY/commit-boost/pbs:$VERSION -f provisioning/pbs.Dockerfile --push . || fail "Error building PBS image." - else - docker buildx build --rm --load --build-arg BINARIES_PATH=build/$VERSION -t commit-boost/pbs:$VERSION -f provisioning/pbs.Dockerfile . || fail "Error building PBS image." - fi - echo "done!" -} - - -# Builds the Signer module binaries for Linux and the Docker image(s) -# NOTE: You must install qemu first; e.g. sudo apt-get install -y qemu qemu-user-static -build_signer() { - echo "Building Signer binaries..." - docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/$VERSION --target output --build-arg TARGET_CRATE=commit-boost-signer . || fail "Error building Signer binaries." - echo "done!" - - # Flatten the folder structure for easier referencing - mv build/$VERSION/linux_amd64/commit-boost-signer build/$VERSION/commit-boost-signer-linux-amd64 - mv build/$VERSION/linux_arm64/commit-boost-signer build/$VERSION/commit-boost-signer-linux-arm64 - - # Clean up the empty directories - rmdir build/$VERSION/linux_amd64 build/$VERSION/linux_arm64 - - echo "Building Signer Docker image..." - # If uploading, make and push a manifest - if [ "$LOCAL_UPLOAD" = true ]; then - if [ -z "$LOCAL_DOCKER_REGISTRY" ]; then - fail "LOCAL_DOCKER_REGISTRY must be set to upload to a local registry." - fi - docker buildx build --rm --platform=linux/amd64,linux/arm64 --build-arg BINARIES_PATH=build/$VERSION -t $LOCAL_DOCKER_REGISTRY/commit-boost/signer:$VERSION -f provisioning/signer.Dockerfile --push . || fail "Error building Signer image." - else - docker buildx build --rm --load --build-arg BINARIES_PATH=build/$VERSION -t commit-boost/signer:$VERSION -f provisioning/signer.Dockerfile . || fail "Error building Signer image." - fi - echo "done!" -} - - -# Print usage -usage() { - echo "Usage: build.sh [options] -v " - echo "This script assumes it is in the commit-boost-client repository directory." - echo "Options:" - echo $'\t-a\tBuild all of the artifacts (CLI, PBS, and Signer, along with Docker images)' - echo $'\t-c\tBuild the Commit-Boost CLI binaries' - echo $'\t-p\tBuild the PBS module binary and its Docker container' - echo $'\t-s\tBuild the Signer module binary and its Docker container' - echo $'\t-o\tWhen passed with a build, upload the resulting image tags to a local Docker registry specified in $LOCAL_DOCKER_REGISTRY' - exit 0 -} - - -# ================= -# === Main Body === -# ================= - -# Parse arguments -while getopts "acpsov:" FLAG; do - case "$FLAG" in - a) CLI=true PBS=true SIGNER=true ;; - c) CLI=true ;; - p) PBS=true ;; - s) SIGNER=true ;; - o) LOCAL_UPLOAD=true ;; - v) VERSION="$OPTARG" ;; - *) usage ;; - esac -done -if [ -z "$VERSION" ]; then - usage -fi - -# Cleanup old artifacts -rm -rf build/$VERSION/* -mkdir -p build/$VERSION - -# Make a multiarch builder, ignore if it's already there -docker buildx create --name multiarch-builder --driver docker-container --use > /dev/null 2>&1 -# NOTE: if using a local repo with a private CA, you will have to follow these steps to add the CA to the builder: -# https://stackoverflow.com/a/73585243 - -# Build the artifacts -if [ "$CLI" = true ]; then - build_cli -fi -if [ "$PBS" = true ]; then - build_pbs -fi -if [ "$SIGNER" = true ]; then - build_signer -fi diff --git a/docs/docs/get_started/building.md b/docs/docs/get_started/building.md index d38b447f..edf795b2 100644 --- a/docs/docs/get_started/building.md +++ b/docs/docs/get_started/building.md @@ -5,31 +5,27 @@ Commit-Boost's components are all written in [Rust](https://www.rust-lang.org/). ## Building via the Docker Builder -For convenience, Commit-Boost has Dockerized the build environment for Linux `x64` and `arm64` platforms. All of the prerequisites, cross-compilation tooling, and configuration are handled by the builder image. If you would like to build the CLI, PBS module, or Signer binaries and Docker images from source, you are welcome to use the Docker builder process. +For convenience, Commit-Boost has Dockerized the build environment for Linux `x64` and `arm64` platforms. It utilizes Docker's powerful [buildx](https://docs.docker.com/reference/cli/docker/buildx/) system. All of the prerequisites, cross-compilation tooling, and configuration are handled by the builder image. If you would like to build the CLI, PBS module, or Signer binaries and Docker images from source, you are welcome to use the Docker builder process. To use the builder, you will need to have [Docker Engine](https://docs.docker.com/engine/install/) installed on your system. Please follow the instructions to install it first. :::note -The build script assumes that you've added your user account to the `docker` group with the Linux [post-install steps](https://docs.docker.com/engine/install/linux-postinstall/). If you haven't, then you'll need to run the build script below as `root` or modify it so each call to `docker` within it is run as the root user (e.g., with `sudo`). +The build system assumes that you've added your user account to the `docker` group with the Linux [post-install steps](https://docs.docker.com/engine/install/linux-postinstall/). If you haven't, then you'll need to run the build script below as `root` or modify it so each call to `docker` within it is run as the root user (e.g., with `sudo`). ::: -We provide a build script called `build-linux.sh` to automate the process: +The Docker builder is built into the project's `justfile` which is used to invoke many facets of Commit Boost development. To use it, you'll need to install [Just](https://github.com/casey/just) on your system. -``` -$ ./build-linux.sh -Usage: build.sh [options] -v -This script assumes it is in the commit-boost-client repository directory. -Options: - -a Build all of the artifacts (CLI, PBS, and Signer, along with Docker images) - -c Build the Commit-Boost CLI binaries - -p Build the PBS module binary and its Docker container - -s Build the Signer module binary and its Docker container - -o When passed with a build, upload the resulting image tags to a local Docker registry specified in $LOCAL_DOCKER_REGISTRY -``` +Use `just --list` to show all of the actions - there are many. The `justfile` provides granular actions, called "recipes", for building just the binaries of a specific crate (such as the CLI, `pbs`, or `signer`), as well as actions to build the Docker images for the PBS and Signer modules. + +Below is a brief summary of the relevant ones for building the Commit-Boost artifacts: + +- `build-all ` will build the `commit-boost-cli`, `commit-boost-pbs`, and `commit-boost-signer` binaries for your local system architecture. It will also create Docker images called `commit-boost/pbs:` and `commit-boost/signer:` and load them into your local Docker registry for use. +- `build-cli-bin `, `build-pbs-bin `, and `build-signer-bin ` can be used to create the `commit-boost-cli`, `commit-boost-pbs`, and `commit-boost-signer` binaries, respectively. +- `build-pbs-img ` and `build-signer-img ` can be used to create the Docker images for the PBS and Signer modules, respectively. -The script utilizes Docker's [buildx](https://docs.docker.com/reference/cli/docker/buildx/) system to both create a multiarch-capable builder and cross-compile for both Linux architectures. You are free to modify it to produce only the artifacts relevant to you if so desired. +The `version` provided will be used to house the output binaries in `./build/`, and act as the version tag for the Docker images when they're added to your local system or uploaded to your local Docker repository. -The `version` provided will be used to house the output binaries in `./build/$VERSION`, and act as the version tag for the Docker images when they're added to your local system or uploaded to your local Docker repository. +If you're interested in building the binaries and/or Docker images for multiple architectures (currently Linux `amd64` and `arm64`), use the variants of those recipes that have the `-multiarch` suffix. Note that building a multiarch Docker image manifest will require the use of a [custom Docker registry](https://www.digitalocean.com/community/tutorials/how-to-set-up-a-private-docker-registry-on-ubuntu-20-04), as the local registry built into Docker does not have multiarch manifest support. ## Building Manually diff --git a/justfile b/justfile index d13e76ae..ac1314fc 100644 --- a/justfile +++ b/justfile @@ -12,16 +12,183 @@ fmt-check: clippy: cargo +{{toolchain}} clippy --all-features --no-deps -- -D warnings -docker-build-pbs: - docker build -t commitboost_pbs_default . -f ./provisioning/pbs.Dockerfile +# =================================== +# === Build Commands for Services === +# =================================== -docker-build-signer: - docker build -t commitboost_signer . -f ./provisioning/signer.Dockerfile +[doc(""" + Builds the commit-boost-cli binary to './build/'. +""")] +build-cli version: \ + (_docker-build-binary version "cli") + +[doc(""" + Builds amd64 and arm64 binaries for the commit-boost-cli crate to './build//', where '' is + the OS / arch platform of the binary (linux_amd64 and linux_arm64). +""")] +build-cli-multiarch version: \ + (_docker-build-binary-multiarch version "cli") + +[doc(""" + Builds the commit-boost-pbs binary to './build/'. +""")] +build-pbs-bin version: \ + (_docker-build-binary version "pbs") + +[doc(""" + Creates a Docker image named 'commit-boost/pbs:' and loads it to the local Docker repository. + Requires the binary to be built first, but this command won't build it automatically if you just need to build the + Docker image without recompiling the binary. +""")] +build-pbs-img version: \ + (_docker-build-image version "pbs") + +[doc(""" + Builds the commit-boost-pbs binary to './build/' and creates a Docker image named 'commit-boost/pbs:'. +""")] +build-pbs version: \ + (build-pbs-bin version) \ + (build-pbs-img version) + +[doc(""" + Builds amd64 and arm64 binaries for the commit-boost-pbs crate to './build//', where '' is the + OS / arch platform of the binary (linux_amd64 and linux_arm64). + Used when creating the pbs Docker image. +""")] +build-pbs-bin-multiarch version: \ + (_docker-build-binary-multiarch version "pbs") + +[doc(""" + Creates a multiarch Docker image manifest named 'commit-boost/pbs:' and pushes it to a custom Docker registry + (such as '192.168.1.10:5000'). + Used for testing multiarch images locally instead of using a public registry like GHCR or Docker Hub. +""")] +build-pbs-img-multiarch version local-docker-registry: \ + (_docker-build-image-multiarch version "pbs" local-docker-registry) + +[doc(""" + Builds amd64 and arm64 binaries for the commit-boost-pbs crate to './build//', where '' is the + OS / arch platform of the binary (linux_amd64 and linux_arm64). + Creates a multiarch Docker image manifest named 'commit-boost/pbs:' and pushes it to a custom Docker registry + (such as '192.168.1.10:5000'). + Used for testing multiarch images locally instead of using a public registry like GHCR or Docker Hub. +""")] +build-pbs-multiarch version local-docker-registry: \ + (build-pbs-bin-multiarch version) \ + (build-pbs-img-multiarch version local-docker-registry) + +[doc(""" + Builds the commit-boost-signer binary to './build/'. +""")] +build-signer-bin version: \ + (_docker-build-binary version "signer") + +[doc(""" + Creates a Docker image named 'commit-boost/signer:' and loads it to the local Docker repository. + Requires the binary to be built first, but this command won't build it automatically if you just need to build the + Docker image without recompiling the binary. +""")] +build-signer-img version: \ + (_docker-build-image version "signer") + +[doc(""" + Builds the commit-boost-signer binary to './build/' and creates a Docker image named 'commit-boost/signer:'. +""")] +build-signer version: \ + (build-signer-bin version) \ + (build-signer-img version) + +[doc(""" + Builds amd64 and arm64 binaries for the commit-boost-signer crate to './build//', where '' is + the OS / arch platform of the binary (linux_amd64 and linux_arm64). + Used when creating the signer Docker image. +""")] +build-signer-bin-multiarch version: \ + (_docker-build-binary-multiarch version "signer") + +[doc(""" + Creates a multiarch Docker image manifest named 'commit-boost/signer:' and pushes it to a custom Docker registry + (such as '192.168.1.10:5000'). + Used for testing multiarch images locally instead of using a public registry like GHCR or Docker Hub. +""")] +build-signer-img-multiarch version local-docker-registry: \ + (_docker-build-image-multiarch version "signer" local-docker-registry) + +[doc(""" + Builds amd64 and arm64 binaries for the commit-boost-signer crate to './build//', where '' is + the OS / arch platform of the binary (linux_amd64 and linux_arm64). + Creates a multiarch Docker image manifest named 'commit-boost/signer:' and pushes it to a custom Docker registry + (such as '192.168.1.10:5000'). + Used for testing multiarch images locally instead of using a public registry like GHCR or Docker Hub. +""")] +build-signer-multiarch version local-docker-registry: \ + (build-signer-bin-multiarch version) \ + (build-signer-img-multiarch version local-docker-registry) + +[doc(""" + Builds the CLI, PBS, and Signer binaries and Docker images for the specified version. + The binaries will be placed in './build/'. + The Docker images will be named 'commit-boost/cli:', 'commit-boost/pbs:', and + 'commit-boost/signer:'. +""")] +build-all version: \ + (build-cli version) \ + (build-pbs version) \ + (build-signer version) + +[doc(""" + Builds amd64 and arm64 flavors of the CLI, PBS, and Signer binaries and Docker images for the specified version. + The binaries will be placed in './build//', where '' is the + OS / arch platform of the binary (linux_amd64 and linux_arm64). + Also creates multiarch Docker image manifests for each crate and pushes them to a custom Docker registry + (such as '192.168.1.10:5000'). + Used for testing multiarch images locally instead of using a public registry like GHCR or Docker Hub. +""")] +build-all-multiarch version local-docker-registry: \ + (build-cli-multiarch version) \ + (build-pbs-multiarch version local-docker-registry) \ + (build-signer-multiarch version local-docker-registry) + +# =============================== +# === Builder Implementations === +# =============================== + +# Creates a Docker buildx builder if it doesn't already exist +_create-docker-builder: + docker buildx create --name multiarch-builder --driver docker-container --use > /dev/null 2>&1 || true + +# Builds a binary for a specific crate and version +_docker-build-binary version crate: _create-docker-builder + export PLATFORM=$(docker buildx inspect --bootstrap | awk -F': ' '/Platforms/ {print $2}' | cut -d',' -f1 | xargs | tr '/' '_'); \ + docker buildx build --rm --platform=local -f provisioning/build.Dockerfile --output "build/{{version}}/$PLATFORM" --target output --build-arg TARGET_CRATE=commit-boost-{{crate}} . + +# Builds a Docker image for a specific crate and version +_docker-build-image version crate: _create-docker-builder + docker buildx build --rm --load --build-arg BINARIES_PATH=build/{{version}} -t commit-boost/{{crate}}:{{version}} -f provisioning/{{crate}}.Dockerfile . + +# Builds multiple binaries (for Linux amd64 and arm64 architectures) for a specific crate and version +_docker-build-binary-multiarch version crate: _create-docker-builder + docker buildx build --rm --platform=linux/amd64,linux/arm64 -f provisioning/build.Dockerfile --output build/{{version}} --target output --build-arg TARGET_CRATE=commit-boost-{{crate}} . + +# Builds a multi-architecture (Linux amd64 and arm64) Docker manifest for a specific crate and version. +# Uploads to the custom Docker registry (such as '192.168.1.10:5000') instead of a public registry like GHCR or Docker Hub. +_docker-build-image-multiarch version crate local-docker-registry: _create-docker-builder + docker buildx build --rm --platform=linux/amd64,linux/arm64 --build-arg BINARIES_PATH=build/{{version}} -t {{local-docker-registry}}/commit-boost/{{crate}}:{{version}} -f provisioning/{{crate}}.Dockerfile --push . + +# ================= +# === Utilities === +# ================= docker-build-test-modules: docker build -t test_da_commit . -f examples/da_commit/Dockerfile docker build -t test_builder_log . -f examples/builder_log/Dockerfile docker build -t test_status_api . -f examples/status_api/Dockerfile +# Cleans the build directory, removing all built binaries. +# Docker images are not removed by this command. +clean: + rm -rf build + +# Runs the suite of tests for all commit-boost crates. test: cargo test --all-features \ No newline at end of file diff --git a/provisioning/pbs.Dockerfile b/provisioning/pbs.Dockerfile index 9eb72702..6b9496ec 100644 --- a/provisioning/pbs.Dockerfile +++ b/provisioning/pbs.Dockerfile @@ -1,6 +1,6 @@ FROM debian:bookworm-slim ARG BINARIES_PATH TARGETOS TARGETARCH -COPY ${BINARIES_PATH}/commit-boost-pbs-${TARGETOS}-${TARGETARCH} /usr/local/bin/commit-boost-pbs +COPY ${BINARIES_PATH}/${TARGETOS}_${TARGETARCH}/commit-boost-pbs /usr/local/bin/commit-boost-pbs RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ diff --git a/provisioning/signer.Dockerfile b/provisioning/signer.Dockerfile index 05679762..5ea619b2 100644 --- a/provisioning/signer.Dockerfile +++ b/provisioning/signer.Dockerfile @@ -1,6 +1,6 @@ FROM debian:bookworm-slim ARG BINARIES_PATH TARGETOS TARGETARCH -COPY ${BINARIES_PATH}/commit-boost-signer-${TARGETOS}-${TARGETARCH} /usr/local/bin/commit-boost-signer +COPY ${BINARIES_PATH}/${TARGETOS}_${TARGETARCH}/commit-boost-signer /usr/local/bin/commit-boost-signer RUN apt-get update && apt-get install -y \ openssl \ ca-certificates \ From ca9f4a1997103e81427d3c9ca04a54317ce9fb2b Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 29 May 2025 16:08:50 -0400 Subject: [PATCH 31/67] Added a justfile recipe for installing protoc --- docs/docs/get_started/building.md | 19 ++++++------------- justfile | 3 +++ provisioning/protoc.sh | 11 +++++++---- 3 files changed, 16 insertions(+), 17 deletions(-) diff --git a/docs/docs/get_started/building.md b/docs/docs/get_started/building.md index edf795b2..a00b36cf 100644 --- a/docs/docs/get_started/building.md +++ b/docs/docs/get_started/building.md @@ -53,24 +53,17 @@ sudo apt update && sudo apt install -y openssl ca-certificates libssl3 libssl-de Install the Protobuf compiler: :::note -While many package repositories provide a `protobuf-compiler` package in lieu of manually installing protoc, we've found at the time of this writing that most of them use v3.21 which is quite out of date. We recommend getting the latest version manually. +While many package repositories provide a `protobuf-compiler` package in lieu of manually installing protoc, we've found at the time of this writing that Debian-based ones use v3.21 which is quite out of date. We recommend getting the latest version manually. ::: +We provide a convenient recipe to install the latest version directly from the GitHub releases page: + ```bash -PROTOC_VERSION=$(curl -s "https://api.github.com/repos/protocolbuffers/protobuf/releases/latest" | grep -Po '"tag_name": "v\K[0-9.]+') -MACHINE_ARCH=$(uname -m) -case "${MACHINE_ARCH}" in - aarch64) PROTOC_ARCH=aarch_64;; - x86_64) PROTOC_ARCH=x86_64;; - *) echo "${MACHINE_ARCH} is not supported."; exit 1;; -esac -curl -sLo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-linux-$PROTOC_ARCH.zip -sudo unzip -q protoc.zip bin/protoc -d /usr -sudo unzip -q protoc.zip "include/google/*" -d /usr -sudo chmod a+x /usr/bin/protoc -rm -rf protoc.zip +just install-protoc ``` +This works on OSX and Linux systems, but you are welcome to download and install it manually as well. + With the prerequisites set up, pull the repository: ```bash git clone https://github.com/Commit-Boost/commit-boost-client diff --git a/justfile b/justfile index ac1314fc..ee5f4c2d 100644 --- a/justfile +++ b/justfile @@ -179,6 +179,9 @@ _docker-build-image-multiarch version crate local-docker-registry: _create-docke # === Utilities === # ================= +install-protoc: + provisioning/protoc.sh + docker-build-test-modules: docker build -t test_da_commit . -f examples/da_commit/Dockerfile docker build -t test_builder_log . -f examples/builder_log/Dockerfile diff --git a/provisioning/protoc.sh b/provisioning/protoc.sh index 7f66a656..a727a7c1 100755 --- a/provisioning/protoc.sh +++ b/provisioning/protoc.sh @@ -21,7 +21,10 @@ case "$(uname)" in Linux*) PROTOC_OS="linux" ; TARGET_DIR="/usr" ; # Assumes the script is run as root or the user can do it manually - apt update && apt install -y unzip curl ca-certificates jq ;; + if [ $(id -u) != "0" ]; then + CMD_PREFIX="sudo " ; + fi + ${CMD_PREFIX}apt update && ${CMD_PREFIX}apt install -y unzip curl ca-certificates jq ;; *) echo "Unsupported OS: $(uname)" ; exit 1 ;; @@ -50,8 +53,8 @@ echo "Installing protoc: $PROTOC_VERSION-$PROTOC_OS-$PROTOC_ARCH" # Download and install protoc curl --retry 10 --retry-delay 2 --retry-all-errors -fsLo protoc.zip https://github.com/protocolbuffers/protobuf/releases/latest/download/protoc-$PROTOC_VERSION-$PROTOC_OS-$PROTOC_ARCH.zip || fail "Failed to download protoc" -unzip -q protoc.zip bin/protoc -d $TARGET_DIR || fail "Failed to unzip protoc" -unzip -q protoc.zip "include/google/*" -d $TARGET_DIR || fail "Failed to unzip protoc includes" -chmod a+x $TARGET_DIR/bin/protoc || fail "Failed to set executable permissions for protoc" +${CMD_PREFIX}unzip -qo protoc.zip bin/protoc -d $TARGET_DIR || fail "Failed to unzip protoc" +${CMD_PREFIX}unzip -qo protoc.zip "include/google/*" -d $TARGET_DIR || fail "Failed to unzip protoc includes" +${CMD_PREFIX}chmod a+x $TARGET_DIR/bin/protoc || fail "Failed to set executable permissions for protoc" rm -rf protoc.zip || fail "Failed to remove protoc zip file" echo "protoc ${PROTOC_VERSION} installed successfully for ${PROTOC_OS} ${PROTOC_ARCH}" \ No newline at end of file From d53728821c88045dd9f6f87c37a9ad076647d601 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 9 Jun 2025 13:21:12 -0400 Subject: [PATCH 32/67] Update crates/cli/src/docker_init.rs Co-authored-by: ltitanb <163874448+ltitanb@users.noreply.github.com> --- crates/cli/src/docker_init.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index 652e3448..c6fcd533 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -73,11 +73,7 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut targets = Vec::new(); // address for signer API communication - let signer_port = if let Some(signer_config) = &cb_config.signer { - signer_config.port - } else { - DEFAULT_SIGNER_PORT - }; + let signer_port = cb_config.signer.as_ref().map(|s| s.port).unwrap_or(DEFAULT_SIGNER_PORT); let signer_server = if let Some(SignerConfig { inner: SignerType::Remote { url }, .. }) = &cb_config.signer { url.to_string() From 7afb7633fb3f75baacb88eb3d1600bd15c2e2cc6 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 9 Jun 2025 13:22:15 -0400 Subject: [PATCH 33/67] Added example signer config params --- config.example.toml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/config.example.toml b/config.example.toml index ae69c3ff..89d472c1 100644 --- a/config.example.toml +++ b/config.example.toml @@ -148,6 +148,13 @@ url = "http://0xa119589bb33ef52acbb8116832bec2b58fca590fe5c85eac5d3230b44d5bc09f # Docker image to use for the Signer module. # OPTIONAL, DEFAULT: ghcr.io/commit-boost/signer:latest # docker_image = "ghcr.io/commit-boost/signer:latest" +# Host to bind the Signer API server to +# OPTIONAL, DEFAULT: 127.0.0.1 +host = "127.0.0.1" +# Port to listen for Signer API calls on +# OPTIONAL, DEFAULT: 20000 +port = 20000 + # For Remote signer: # [signer.remote] # URL of the Web3Signer instance From 09ac8217f686b378dda48a86fd2b78bad9493b92 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 9 Jun 2025 13:22:31 -0400 Subject: [PATCH 34/67] Cleaned up signer config loading from feedback --- crates/common/src/config/signer.rs | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index dce97666..5618f3ae 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -109,20 +109,17 @@ impl StartSignerConfig { let jwts = load_jwt_secrets()?; - // Load the server endpoint first from the env var, then the config, and finally - // the defaults + let signer_config = config.signer.ok_or_eyre("Signer config is missing")?; + + // Load the server endpoint first from the env var if present, otherwise the + // config let endpoint = if let Some(endpoint) = load_optional_env_var(SIGNER_ENDPOINT_ENV) { endpoint.parse()? } else { - match config.signer { - Some(ref signer) => SocketAddr::from((signer.host, signer.port)), - None => SocketAddr::from((default_host(), DEFAULT_SIGNER_PORT)), - } + SocketAddr::from((signer_config.host, signer_config.port)) }; - let signer = config.signer.ok_or_eyre("Signer config is missing")?.inner; - - match signer { + match signer_config.inner { SignerType::Local { loader, store, .. } => Ok(StartSignerConfig { chain: config.chain, loader: Some(loader), From ccaf97dc48b94583cd90a20a4ac14ef3bf204d33 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 10 Jun 2025 14:30:13 -0400 Subject: [PATCH 35/67] Added JWT auth fields to the example config --- config.example.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/config.example.toml b/config.example.toml index d32dfbf9..899c6a10 100644 --- a/config.example.toml +++ b/config.example.toml @@ -154,6 +154,12 @@ host = "127.0.0.1" # Port to listen for Signer API calls on # OPTIONAL, DEFAULT: 20000 port = 20000 +# Number of JWT authentication attempts a client can fail before blocking that client temporarily from Signer access +# OPTIONAL, DEFAULT: 3 +jwt_auth_fail_limit: 3 +# How long to block a client from Signer access, in seconds, if it failed JWT authentication too many times +# OPTIONAL, DEFAULT: 300 +jwt_auth_fail_timeout_seconds: 300 # For Remote signer: # [signer.remote] From 145ebe8a90395a01b06e7a407c886901aa6be1c8 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 12 Jun 2025 00:39:21 -0400 Subject: [PATCH 36/67] Started building the JWT config file --- crates/common/src/config/jwt.rs | 224 ++++++++++++++++++++++++++++++++ crates/common/src/config/mod.rs | 2 + 2 files changed, 226 insertions(+) create mode 100644 crates/common/src/config/jwt.rs diff --git a/crates/common/src/config/jwt.rs b/crates/common/src/config/jwt.rs new file mode 100644 index 00000000..138a04eb --- /dev/null +++ b/crates/common/src/config/jwt.rs @@ -0,0 +1,224 @@ +use std::{ + collections::HashMap, + io::Read, + path::{Path, PathBuf}, +}; + +use alloy::primitives::B256; +use eyre::{bail, Result}; +use serde::Deserialize; + +use crate::types::{Jwt, ModuleId}; + +/// Underlying implementation of the JWT configuration that's deserialized from +/// disk. +#[derive(Deserialize)] +struct JwtConfigOnDisk { + module_name: ModuleId, + + // One of these must be provided - they're listed here in order of precedence + jwt_env: Option, + jwt_file: Option, + jwt_secret: Option, + + signing_id: B256, +} + +impl JwtConfigOnDisk { + /// Load the JWT secret from the provides sources, in order of precedence. + async fn load_jwt_secret(&self) -> Result { + // Start with the environment variable + let jwt_string = if let Some(jwt_env) = &self.jwt_env { + // Load JWT secret from environment variable + std::env::var(jwt_env).map_err(|e| { + eyre::eyre!( + "Failed to read JWT secret from environment variable '{jwt_env}': {}", + e + ) + })? + } else if let Some(jwt_file) = &self.jwt_file { + // Load JWT secret from file + std::fs::read_to_string(jwt_file).map_err(|e| { + eyre::eyre!("Failed to read JWT secret from file '{}': {}", jwt_file.display(), e) + })? + } else if let Some(jwt_secret) = &self.jwt_secret { + // Use the provided JWT secret directly + jwt_secret.clone() + } else { + bail!("No JWT secret provided"); + }; + + Ok(Jwt(jwt_string)) + } +} + +#[derive(Deserialize)] +struct JwtConfigFile { + modules: Vec, +} + +#[derive(Debug, PartialEq)] +pub struct JwtConfig { + /// Human-readable name of the module. + pub module_name: ModuleId, + + /// The JWT secret for the module to communicate with the signer module. + pub jwt_secret: Jwt, + + /// A unique identifier for the module, which is used when signing requests + /// to generate signatures for this module. Must be a 32-byte hex string. + /// A leading 0x prefix is optional. + pub signing_id: B256, +} + +impl JwtConfig { + // + pub async fn validate(&self) -> Result<()> { + // Ensure the JWT secret is not empty + if self.jwt_secret.is_empty() { + bail!("JWT secret cannot be empty"); + } + + // Ensure the signing ID is a valid B256 + if self.signing_id.is_zero() { + bail!("Signing ID cannot be zero"); + } + + Ok(()) + } +} + +/// Load the JWT configuration from a file. +pub async fn load(config_file_path: &Path) -> Result> { + // Make sure the file is legal + if !config_file_path.is_absolute() { + bail!("JWT config file '{}' must be an absolute path", config_file_path.display()); + } + let config_file_path = config_file_path.canonicalize().map_err(|e| { + eyre::eyre!( + "Failed to canonicalize JWT config path '{}': {}", + config_file_path.display(), + e + ) + })?; + if config_file_path.extension().map_or(true, |ext| ext != "toml") { + bail!("JWT config file '{}' must have a .toml extension", config_file_path.display()); + } + if !config_file_path.exists() { + bail!("JWT config file '{}' does not exist", config_file_path.display()); + } + if !config_file_path.is_file() { + bail!("JWT config file '{}' is not a regular file", config_file_path.display()); + } + + // Parse the JWT config file + let mut file = std::fs::File::open(&config_file_path).map_err(|e| { + eyre::eyre!("Failed to open JWT config file '{}': {}", config_file_path.display(), e) + })?; + let mut contents = String::new(); + file.read_to_string(&mut contents)?; + + let jwt_configs: JwtConfigFile = toml::from_str(&contents).map_err(|e| { + eyre::eyre!("Failed to parse JWT config '{}': {}", config_file_path.display(), e) + })?; + + load_impl(jwt_configs).await +} + +/// Implementation for loading a JWT configuration from a file. +async fn load_impl(config_file: JwtConfigFile) -> Result> { + // Load the JWT secrets and validate them + let mut jwt_configs = HashMap::new(); + for raw_config in config_file.modules { + let jwt_secret = raw_config.load_jwt_secret().await?; + let jwt_config = JwtConfig { + module_name: raw_config.module_name.clone(), + jwt_secret, + signing_id: raw_config.signing_id, + }; + jwt_config.validate().await?; + + // Make sure there are no duplicate module names + if jwt_configs.contains_key(&raw_config.module_name) { + bail!("Duplicate JWT configuration for module '{}'", raw_config.module_name); + } + + // Make sure the signing ID hasn't been used before + if jwt_configs + .values() + .any(|existing_config: &JwtConfig| existing_config.signing_id == jwt_config.signing_id) + { + bail!( + "Duplicate signing ID '{}' for module '{}'", + jwt_config.signing_id, + raw_config.module_name + ); + } + + // Safe to use + jwt_configs.insert(raw_config.module_name, jwt_config); + } + + Ok(jwt_configs) +} + +#[cfg(test)] +mod tests { + use alloy::primitives::b256; + + use super::*; + + #[tokio::test] + async fn test_good_config() -> Result<()> { + let toml_str = r#" + [[modules]] + module_name = "test_module" + jwt_secret = "supersecret" + signing_id = "0101010101010101010101010101010101010101010101010101010101010101" + + [[modules]] + module_name = "2nd_test_module" + jwt_secret = "another-secret" + signing_id = "0202020202020202020202020202020202020202020202020202020202020202" + "#; + + // Load the JWT configuration + let jwt_config_file: JwtConfigFile = + toml::from_str(toml_str).expect("Failed to deserialize JWT config"); + let jwts = load_impl(jwt_config_file).await?; + assert!(jwts.len() == 2, "Expected 2 JWT configurations"); + + // Check the first module + let module_id_1 = ModuleId("test_module".to_string()); + let module_1 = jwts.get(&module_id_1).expect("Missing 'test_module' in JWT configs"); + assert_eq!(module_1.module_name, module_id_1, "Module name mismatch for 'test_module'"); + assert_eq!( + module_1.jwt_secret, + Jwt("supersecret".to_string()), + "JWT secret mismatch for 'test_module'" + ); + assert_eq!( + module_1.signing_id, + b256!("0101010101010101010101010101010101010101010101010101010101010101"), + "Signing ID mismatch for 'test_module'" + ); + + // Check the second module + let module_id_2 = ModuleId("2nd_test_module".to_string()); + assert!(jwts.contains_key(&module_id_2), "Missing '2nd_test_module' in JWT configs"); + let module_2 = jwts.get(&module_id_2).expect("Missing '2nd_test_module' in JWT configs"); + assert_eq!(module_2.module_name, module_id_2, "Module name mismatch for '2nd_test_module'"); + assert_eq!( + module_2.jwt_secret, + Jwt("another-secret".to_string()), + "JWT secret mismatch for '2nd_test_module'" + ); + assert_eq!( + module_2.signing_id, + b256!("0202020202020202020202020202020202020202020202020202020202020202"), + "Signing ID mismatch for '2nd_test_module'" + ); + + Ok(()) + } +} diff --git a/crates/common/src/config/mod.rs b/crates/common/src/config/mod.rs index b782999b..5f1144e3 100644 --- a/crates/common/src/config/mod.rs +++ b/crates/common/src/config/mod.rs @@ -6,6 +6,7 @@ use serde::{Deserialize, Serialize}; use crate::types::{load_chain_from_file, Chain, ChainLoader, ForkVersion}; mod constants; +mod jwt; mod log; mod metrics; mod module; @@ -15,6 +16,7 @@ mod signer; mod utils; pub use constants::*; +pub use jwt::*; pub use log::*; pub use metrics::*; pub use module::*; From bb0e023a05dbbafde2824af912e74a98a939a437 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 12 Jun 2025 03:51:28 -0400 Subject: [PATCH 37/67] Added tests --- crates/common/src/config/jwt.rs | 185 +++++++++++++++++++++++++++++++- tests/data/module-jwt.txt | 1 + 2 files changed, 185 insertions(+), 1 deletion(-) create mode 100644 tests/data/module-jwt.txt diff --git a/crates/common/src/config/jwt.rs b/crates/common/src/config/jwt.rs index 138a04eb..c266f439 100644 --- a/crates/common/src/config/jwt.rs +++ b/crates/common/src/config/jwt.rs @@ -179,7 +179,7 @@ mod tests { [[modules]] module_name = "2nd_test_module" jwt_secret = "another-secret" - signing_id = "0202020202020202020202020202020202020202020202020202020202020202" + signing_id = "0x0202020202020202020202020202020202020202020202020202020202020202" "#; // Load the JWT configuration @@ -221,4 +221,187 @@ mod tests { Ok(()) } + + #[tokio::test] + async fn test_jwt_from_env() -> Result<()> { + let jwt = "supersecret-env"; + let jwt_env = "CB_TEST_MODULE_JTW"; + let toml_str = r#" + [[modules]] + module_name = "test_module" + jwt_env = "CB_TEST_MODULE_JTW" + signing_id = "0101010101010101010101010101010101010101010101010101010101010101" + "#; + + // Set the environment variable + std::env::set_var(jwt_env, jwt); + struct EnvVarGuard { + env_name: &'static str, + } + impl Drop for EnvVarGuard { + fn drop(&mut self) { + std::env::remove_var(self.env_name); + } + } + + // Load the JWT configuration + let jwts: HashMap; + { + let _env_guard = EnvVarGuard { env_name: jwt_env }; + let jwt_config_file: JwtConfigFile = + toml::from_str(toml_str).expect("Failed to deserialize JWT config"); + jwts = load_impl(jwt_config_file).await?; + } + assert!(jwts.len() == 1, "Expected 1 JWT configuration"); + + // Check the module + let module_id = ModuleId("test_module".to_string()); + let module = jwts.get(&module_id).expect("Missing 'test_module' in JWT configs"); + assert_eq!(module.module_name, module_id, "Module name mismatch for 'test_module'"); + assert_eq!( + module.jwt_secret, + Jwt(jwt.to_string()), + "JWT secret mismatch for 'test_module'" + ); + assert_eq!( + module.signing_id, + b256!("0101010101010101010101010101010101010101010101010101010101010101"), + "Signing ID mismatch for 'test_module'" + ); + Ok(()) + } + + #[tokio::test] + async fn test_jwt_from_file() -> Result<()> { + let jwt = "supersecret-file"; + let cwd = std::env::current_dir()?; + let mut jwt_file_path = cwd.join("../../tests/data/module-jwt.txt"); + jwt_file_path = jwt_file_path.canonicalize()?; + let toml_str = format!( + r#" + [[modules]] + module_name = "test_module" + jwt_file = "{}" + signing_id = "0101010101010101010101010101010101010101010101010101010101010101" + "#, + jwt_file_path.display() + ); + + // Load the JWT configuration + let jwt_config_file: JwtConfigFile = + toml::from_str(&toml_str).expect("Failed to deserialize JWT config"); + let jwts = load_impl(jwt_config_file).await?; + assert!(jwts.len() == 1, "Expected 1 JWT configuration"); + + // Check the module + let module_id = ModuleId("test_module".to_string()); + let module = jwts.get(&module_id).expect("Missing 'test_module' in JWT configs"); + assert_eq!(module.module_name, module_id, "Module name mismatch for 'test_module'"); + assert_eq!( + module.jwt_secret, + Jwt(jwt.to_string()), + "JWT secret mismatch for 'test_module'" + ); + assert_eq!( + module.signing_id, + b256!("0101010101010101010101010101010101010101010101010101010101010101"), + "Signing ID mismatch for 'test_module'" + ); + Ok(()) + } + + #[tokio::test] + async fn test_duplicate_module_names() -> Result<()> { + let toml_str = r#" + [[modules]] + module_name = "test_module" + jwt_secret = "supersecret" + signing_id = "0101010101010101010101010101010101010101010101010101010101010101" + + [[modules]] + module_name = "test_module" # Duplicate name + jwt_secret = "another-secret" + signing_id = "0202020202020202020202020202020202020202020202020202020202020202" + "#; + let jwt_config_file: JwtConfigFile = + toml::from_str(toml_str).expect("Failed to deserialize JWT config"); + let result = load_impl(jwt_config_file).await; + assert!(result.is_err(), "Expected error due to duplicate module names"); + if let Err(e) = result { + assert_eq!(&e.to_string(), "Duplicate JWT configuration for module 'test_module'"); + } + Ok(()) + } + + #[tokio::test] + async fn test_duplicate_signing_ids() -> Result<()> { + let toml_str = r#" + [[modules]] + module_name = "test_module" + jwt_secret = "supersecret" + signing_id = "0101010101010101010101010101010101010101010101010101010101010101" + + [[modules]] + module_name = "2nd_test_module" + jwt_secret = "another-secret" + signing_id = "0101010101010101010101010101010101010101010101010101010101010101" # Duplicate signing ID + "#; + let jwt_config_file: JwtConfigFile = + toml::from_str(toml_str).expect("Failed to deserialize JWT config"); + let result = load_impl(jwt_config_file).await; + assert!(result.is_err(), "Expected error due to duplicate signing IDs"); + if let Err(e) = result { + assert_eq!(&e.to_string(),"Duplicate signing ID '0x0101010101010101010101010101010101010101010101010101010101010101' for module '2nd_test_module'"); + } + Ok(()) + } + + #[tokio::test] + async fn test_missing_jwt_secret() -> Result<()> { + let toml_str = r#" + [[modules]] + module_name = "test_module" + signing_id = "0101010101010101010101010101010101010101010101010101010101010101" + "#; + let jwt_config_file: JwtConfigFile = + toml::from_str(toml_str).expect("Failed to deserialize JWT config"); + let result = load_impl(jwt_config_file).await; + assert!(result.is_err(), "Expected error due to missing JWT secret"); + if let Err(e) = result { + assert_eq!(&e.to_string(), "No JWT secret provided"); + } + Ok(()) + } + + #[tokio::test] + async fn test_empty_jwt_secret() -> Result<()> { + let cfg = JwtConfig { + module_name: ModuleId("test_module".to_string()), + jwt_secret: Jwt("".to_string()), + signing_id: b256!("0101010101010101010101010101010101010101010101010101010101010101"), + }; + + let result = cfg.validate().await; + assert!(result.is_err(), "Expected error due to empty JWT secret"); + if let Err(e) = result { + assert_eq!(&e.to_string(), "JWT secret cannot be empty"); + } + + Ok(()) + } + + #[tokio::test] + async fn test_zero_signing_id() -> Result<()> { + let cfg = JwtConfig { + module_name: ModuleId("test_module".to_string()), + jwt_secret: Jwt("supersecret".to_string()), + signing_id: b256!("0000000000000000000000000000000000000000000000000000000000000000"), + }; + let result = cfg.validate().await; + assert!(result.is_err(), "Expected error due to zero signing ID"); + if let Err(e) = result { + assert_eq!(&e.to_string(), "Signing ID cannot be zero"); + } + Ok(()) + } } diff --git a/tests/data/module-jwt.txt b/tests/data/module-jwt.txt new file mode 100644 index 00000000..f837695a --- /dev/null +++ b/tests/data/module-jwt.txt @@ -0,0 +1 @@ +supersecret-file \ No newline at end of file From 71a7605ee9bb853e3c0196d6018bfa745e654232 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 12 Jun 2025 16:45:50 -0400 Subject: [PATCH 38/67] Started migration from JWTS_ENV to the config file --- crates/cli/src/docker_init.rs | 16 ++++---- crates/common/src/config/constants.rs | 11 ++++-- crates/common/src/config/jwt.rs | 57 +++++++++++---------------- crates/common/src/config/signer.rs | 44 +++++++++++++-------- crates/common/src/config/utils.rs | 9 +---- crates/common/src/signer/constants.rs | 6 --- crates/common/src/signer/mod.rs | 2 - crates/signer/src/service.rs | 12 +++--- tests/src/utils.rs | 20 +++++----- tests/tests/signer_jwt_auth.rs | 15 +++++-- 10 files changed, 98 insertions(+), 94 deletions(-) delete mode 100644 crates/common/src/signer/constants.rs diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index 7f418e97..14f5f30e 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -9,16 +9,16 @@ use cb_common::{ CommitBoostConfig, LogsSettings, ModuleKind, SignerConfig, SignerType, BUILDER_PORT_ENV, BUILDER_URLS_ENV, CHAIN_SPEC_ENV, CONFIG_DEFAULT, CONFIG_ENV, DIRK_CA_CERT_DEFAULT, DIRK_CA_CERT_ENV, DIRK_CERT_DEFAULT, DIRK_CERT_ENV, DIRK_DIR_SECRETS_DEFAULT, - DIRK_DIR_SECRETS_ENV, DIRK_KEY_DEFAULT, DIRK_KEY_ENV, JWTS_ENV, LOGS_DIR_DEFAULT, - LOGS_DIR_ENV, METRICS_PORT_ENV, MODULE_ID_ENV, MODULE_JWT_ENV, PBS_ENDPOINT_ENV, - PBS_MODULE_NAME, PROXY_DIR_DEFAULT, PROXY_DIR_ENV, PROXY_DIR_KEYS_DEFAULT, - PROXY_DIR_KEYS_ENV, PROXY_DIR_SECRETS_DEFAULT, PROXY_DIR_SECRETS_ENV, SIGNER_DEFAULT, - SIGNER_DIR_KEYS_DEFAULT, SIGNER_DIR_KEYS_ENV, SIGNER_DIR_SECRETS_DEFAULT, - SIGNER_DIR_SECRETS_ENV, SIGNER_ENDPOINT_ENV, SIGNER_KEYS_ENV, SIGNER_MODULE_NAME, + DIRK_DIR_SECRETS_ENV, DIRK_KEY_DEFAULT, DIRK_KEY_ENV, LOGS_DIR_DEFAULT, LOGS_DIR_ENV, + METRICS_PORT_ENV, MODULE_ID_ENV, MODULE_JWT_ENV, PBS_ENDPOINT_ENV, PBS_MODULE_NAME, + PROXY_DIR_DEFAULT, PROXY_DIR_ENV, PROXY_DIR_KEYS_DEFAULT, PROXY_DIR_KEYS_ENV, + PROXY_DIR_SECRETS_DEFAULT, PROXY_DIR_SECRETS_ENV, SIGNER_DEFAULT, SIGNER_DIR_KEYS_DEFAULT, + SIGNER_DIR_KEYS_ENV, SIGNER_DIR_SECRETS_DEFAULT, SIGNER_DIR_SECRETS_ENV, + SIGNER_ENDPOINT_ENV, SIGNER_KEYS_ENV, SIGNER_MODULE_NAME, SIGNER_PORT_DEFAULT, SIGNER_URL_ENV, }, pbs::{BUILDER_API_PATH, GET_STATUS_PATH}, - signer::{ProxyStore, SignerLoader, DEFAULT_SIGNER_PORT}, + signer::{ProxyStore, SignerLoader}, types::ModuleId, utils::random_jwt_secret, }; @@ -73,7 +73,7 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut targets = Vec::new(); // address for signer API communication - let signer_port = cb_config.signer.as_ref().map(|s| s.port).unwrap_or(DEFAULT_SIGNER_PORT); + let signer_port = cb_config.signer.as_ref().map(|s| s.port).unwrap_or(SIGNER_PORT_DEFAULT); let signer_server = if let Some(SignerConfig { inner: SignerType::Remote { url }, .. }) = &cb_config.signer { url.to_string() diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index 39773cf6..a2f6124b 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -34,14 +34,19 @@ pub const SIGNER_MODULE_NAME: &str = "signer"; /// Where the signer module should open the server pub const SIGNER_ENDPOINT_ENV: &str = "CB_SIGNER_ENDPOINT"; +pub const SIGNER_PORT_DEFAULT: u16 = 20000; -// JWT authentication settings +/// Number of auth failures before rate limiting the client pub const SIGNER_JWT_AUTH_FAIL_LIMIT_ENV: &str = "CB_SIGNER_JWT_AUTH_FAIL_LIMIT"; +pub const SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT: u32 = 3; + +/// How long to rate limit the client after auth failures pub const SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV: &str = "CB_SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS"; +pub const SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT: u32 = 5 * 60; -/// Comma separated list module_id=jwt_secret -pub const JWTS_ENV: &str = "CB_JWTS"; +/// JWT file settings +pub const SIGNER_JWT_CONFIG_FILE_ENV: &str = "CB_SIGNER_JWT_CONFIG_FILE"; /// Path to json file with plaintext keys (testing only) pub const SIGNER_KEYS_ENV: &str = "CB_SIGNER_LOADER_FILE"; diff --git a/crates/common/src/config/jwt.rs b/crates/common/src/config/jwt.rs index c266f439..c72b5617 100644 --- a/crates/common/src/config/jwt.rs +++ b/crates/common/src/config/jwt.rs @@ -26,7 +26,7 @@ struct JwtConfigOnDisk { impl JwtConfigOnDisk { /// Load the JWT secret from the provides sources, in order of precedence. - async fn load_jwt_secret(&self) -> Result { + fn load_jwt_secret(&self) -> Result { // Start with the environment variable let jwt_string = if let Some(jwt_env) = &self.jwt_env { // Load JWT secret from environment variable @@ -48,7 +48,7 @@ impl JwtConfigOnDisk { bail!("No JWT secret provided"); }; - Ok(Jwt(jwt_string)) + Ok(jwt_string) } } @@ -57,13 +57,13 @@ struct JwtConfigFile { modules: Vec, } -#[derive(Debug, PartialEq)] +#[derive(Clone, Debug, PartialEq)] pub struct JwtConfig { /// Human-readable name of the module. pub module_name: ModuleId, /// The JWT secret for the module to communicate with the signer module. - pub jwt_secret: Jwt, + pub jwt_secret: String, /// A unique identifier for the module, which is used when signing requests /// to generate signatures for this module. Must be a 32-byte hex string. @@ -72,8 +72,7 @@ pub struct JwtConfig { } impl JwtConfig { - // - pub async fn validate(&self) -> Result<()> { + pub fn validate(&self) -> Result<()> { // Ensure the JWT secret is not empty if self.jwt_secret.is_empty() { bail!("JWT secret cannot be empty"); @@ -89,7 +88,7 @@ impl JwtConfig { } /// Load the JWT configuration from a file. -pub async fn load(config_file_path: &Path) -> Result> { +pub fn load(config_file_path: &Path) -> Result> { // Make sure the file is legal if !config_file_path.is_absolute() { bail!("JWT config file '{}' must be an absolute path", config_file_path.display()); @@ -122,21 +121,21 @@ pub async fn load(config_file_path: &Path) -> Result Result> { +fn load_impl(config_file: JwtConfigFile) -> Result> { // Load the JWT secrets and validate them let mut jwt_configs = HashMap::new(); for raw_config in config_file.modules { - let jwt_secret = raw_config.load_jwt_secret().await?; + let jwt_secret = raw_config.load_jwt_secret()?; let jwt_config = JwtConfig { module_name: raw_config.module_name.clone(), jwt_secret, signing_id: raw_config.signing_id, }; - jwt_config.validate().await?; + jwt_config.validate()?; // Make sure there are no duplicate module names if jwt_configs.contains_key(&raw_config.module_name) { @@ -185,7 +184,7 @@ mod tests { // Load the JWT configuration let jwt_config_file: JwtConfigFile = toml::from_str(toml_str).expect("Failed to deserialize JWT config"); - let jwts = load_impl(jwt_config_file).await?; + let jwts = load_impl(jwt_config_file)?; assert!(jwts.len() == 2, "Expected 2 JWT configurations"); // Check the first module @@ -194,7 +193,7 @@ mod tests { assert_eq!(module_1.module_name, module_id_1, "Module name mismatch for 'test_module'"); assert_eq!( module_1.jwt_secret, - Jwt("supersecret".to_string()), + "supersecret".to_string(), "JWT secret mismatch for 'test_module'" ); assert_eq!( @@ -210,7 +209,7 @@ mod tests { assert_eq!(module_2.module_name, module_id_2, "Module name mismatch for '2nd_test_module'"); assert_eq!( module_2.jwt_secret, - Jwt("another-secret".to_string()), + "another-secret".to_string(), "JWT secret mismatch for '2nd_test_module'" ); assert_eq!( @@ -250,7 +249,7 @@ mod tests { let _env_guard = EnvVarGuard { env_name: jwt_env }; let jwt_config_file: JwtConfigFile = toml::from_str(toml_str).expect("Failed to deserialize JWT config"); - jwts = load_impl(jwt_config_file).await?; + jwts = load_impl(jwt_config_file)?; } assert!(jwts.len() == 1, "Expected 1 JWT configuration"); @@ -258,11 +257,7 @@ mod tests { let module_id = ModuleId("test_module".to_string()); let module = jwts.get(&module_id).expect("Missing 'test_module' in JWT configs"); assert_eq!(module.module_name, module_id, "Module name mismatch for 'test_module'"); - assert_eq!( - module.jwt_secret, - Jwt(jwt.to_string()), - "JWT secret mismatch for 'test_module'" - ); + assert_eq!(module.jwt_secret, jwt.to_string(), "JWT secret mismatch for 'test_module'"); assert_eq!( module.signing_id, b256!("0101010101010101010101010101010101010101010101010101010101010101"), @@ -290,18 +285,14 @@ mod tests { // Load the JWT configuration let jwt_config_file: JwtConfigFile = toml::from_str(&toml_str).expect("Failed to deserialize JWT config"); - let jwts = load_impl(jwt_config_file).await?; + let jwts = load_impl(jwt_config_file)?; assert!(jwts.len() == 1, "Expected 1 JWT configuration"); // Check the module let module_id = ModuleId("test_module".to_string()); let module = jwts.get(&module_id).expect("Missing 'test_module' in JWT configs"); assert_eq!(module.module_name, module_id, "Module name mismatch for 'test_module'"); - assert_eq!( - module.jwt_secret, - Jwt(jwt.to_string()), - "JWT secret mismatch for 'test_module'" - ); + assert_eq!(module.jwt_secret, jwt.to_string(), "JWT secret mismatch for 'test_module'"); assert_eq!( module.signing_id, b256!("0101010101010101010101010101010101010101010101010101010101010101"), @@ -325,7 +316,7 @@ mod tests { "#; let jwt_config_file: JwtConfigFile = toml::from_str(toml_str).expect("Failed to deserialize JWT config"); - let result = load_impl(jwt_config_file).await; + let result = load_impl(jwt_config_file); assert!(result.is_err(), "Expected error due to duplicate module names"); if let Err(e) = result { assert_eq!(&e.to_string(), "Duplicate JWT configuration for module 'test_module'"); @@ -348,7 +339,7 @@ mod tests { "#; let jwt_config_file: JwtConfigFile = toml::from_str(toml_str).expect("Failed to deserialize JWT config"); - let result = load_impl(jwt_config_file).await; + let result = load_impl(jwt_config_file); assert!(result.is_err(), "Expected error due to duplicate signing IDs"); if let Err(e) = result { assert_eq!(&e.to_string(),"Duplicate signing ID '0x0101010101010101010101010101010101010101010101010101010101010101' for module '2nd_test_module'"); @@ -365,7 +356,7 @@ mod tests { "#; let jwt_config_file: JwtConfigFile = toml::from_str(toml_str).expect("Failed to deserialize JWT config"); - let result = load_impl(jwt_config_file).await; + let result = load_impl(jwt_config_file); assert!(result.is_err(), "Expected error due to missing JWT secret"); if let Err(e) = result { assert_eq!(&e.to_string(), "No JWT secret provided"); @@ -377,11 +368,11 @@ mod tests { async fn test_empty_jwt_secret() -> Result<()> { let cfg = JwtConfig { module_name: ModuleId("test_module".to_string()), - jwt_secret: Jwt("".to_string()), + jwt_secret: "".to_string(), signing_id: b256!("0101010101010101010101010101010101010101010101010101010101010101"), }; - let result = cfg.validate().await; + let result = cfg.validate(); assert!(result.is_err(), "Expected error due to empty JWT secret"); if let Err(e) = result { assert_eq!(&e.to_string(), "JWT secret cannot be empty"); @@ -394,10 +385,10 @@ mod tests { async fn test_zero_signing_id() -> Result<()> { let cfg = JwtConfig { module_name: ModuleId("test_module".to_string()), - jwt_secret: Jwt("supersecret".to_string()), + jwt_secret: "supersecret".to_string(), signing_id: b256!("0000000000000000000000000000000000000000000000000000000000000000"), }; - let result = cfg.validate().await; + let result = cfg.validate(); assert!(result.is_err(), "Expected error due to zero signing ID"); if let Err(e) = result { assert_eq!(&e.to_string(), "Signing ID cannot be zero"); diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 7e5fbd58..367a67a5 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -5,22 +5,23 @@ use std::{ }; use docker_image::DockerImage; -use eyre::{bail, ensure, OptionExt, Result}; +use eyre::{bail, ensure, Context, OptionExt, Result}; use serde::{Deserialize, Serialize}; use tonic::transport::{Certificate, Identity}; use url::Url; use super::{ - load_jwt_secrets, load_optional_env_var, utils::load_env_var, CommitBoostConfig, - SIGNER_ENDPOINT_ENV, SIGNER_IMAGE_DEFAULT, SIGNER_JWT_AUTH_FAIL_LIMIT_ENV, - SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV, + load_optional_env_var, utils::load_env_var, CommitBoostConfig, SIGNER_ENDPOINT_ENV, + SIGNER_IMAGE_DEFAULT, SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT, SIGNER_JWT_AUTH_FAIL_LIMIT_ENV, + SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT, SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV, + SIGNER_PORT_DEFAULT, }; use crate::{ - config::{DIRK_CA_CERT_ENV, DIRK_CERT_ENV, DIRK_DIR_SECRETS_ENV, DIRK_KEY_ENV}, - signer::{ - ProxyStore, SignerLoader, DEFAULT_JWT_AUTH_FAIL_LIMIT, - DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, DEFAULT_SIGNER_PORT, + config::{ + jwt, JwtConfig, DIRK_CA_CERT_ENV, DIRK_CERT_ENV, DIRK_DIR_SECRETS_ENV, DIRK_KEY_ENV, + SIGNER_JWT_CONFIG_FILE_ENV, }, + signer::{ProxyStore, SignerLoader}, types::{Chain, ModuleId}, utils::{default_host, default_u16, default_u32}, }; @@ -32,22 +33,25 @@ pub struct SignerConfig { #[serde(default = "default_host")] pub host: Ipv4Addr, /// Port to listen for signer API calls on - #[serde(default = "default_u16::")] + #[serde(default = "default_u16::")] pub port: u16, /// Docker image of the module - #[serde(default = "default_signer")] + #[serde(default = "default_signer_image")] pub docker_image: String, /// Number of JWT auth failures before rate limiting an endpoint /// If set to 0, no rate limiting will be applied - #[serde(default = "default_u32::")] + #[serde(default = "default_u32::")] pub jwt_auth_fail_limit: u32, /// Duration in seconds to rate limit an endpoint after the JWT auth failure /// limit has been reached - #[serde(default = "default_u32::")] + #[serde(default = "default_u32::")] pub jwt_auth_fail_timeout_seconds: u32, + /// Path to the JWT config file - must be set explicitly + pub jwt_config_file: PathBuf, + /// Inner type-specific configuration #[serde(flatten)] pub inner: SignerType, @@ -70,7 +74,7 @@ impl SignerConfig { } } -fn default_signer() -> String { +fn default_signer_image() -> String { SIGNER_IMAGE_DEFAULT.to_string() } @@ -132,7 +136,7 @@ pub struct StartSignerConfig { pub loader: Option, pub store: Option, pub endpoint: SocketAddr, - pub jwts: HashMap, + pub jwts: HashMap, pub jwt_auth_fail_limit: u32, pub jwt_auth_fail_timeout_seconds: u32, pub dirk: Option, @@ -142,8 +146,6 @@ impl StartSignerConfig { pub fn load_from_env() -> Result { let config = CommitBoostConfig::from_env_path()?; - let jwts = load_jwt_secrets()?; - let signer_config = config.signer.ok_or_eyre("Signer config is missing")?; // Load the server endpoint first from the env var if present, otherwise the @@ -171,6 +173,16 @@ impl StartSignerConfig { signer_config.jwt_auth_fail_timeout_seconds }; + // Load the JWT config file + let jwt_config_path = if let Some(path) = load_optional_env_var(SIGNER_JWT_CONFIG_FILE_ENV) + { + PathBuf::from(path) + } else { + signer_config.jwt_config_file + }; + let jwts = jwt::load(&jwt_config_path) + .wrap_err_with(|| format!("Failed to load JWT config from {jwt_config_path:?}"))?; + match signer_config.inner { SignerType::Local { loader, store, .. } => Ok(StartSignerConfig { chain: config.chain, diff --git a/crates/common/src/config/utils.rs b/crates/common/src/config/utils.rs index 67c367c5..86b3be0d 100644 --- a/crates/common/src/config/utils.rs +++ b/crates/common/src/config/utils.rs @@ -3,7 +3,6 @@ use std::{collections::HashMap, path::Path}; use eyre::{bail, Context, Result}; use serde::de::DeserializeOwned; -use super::JWTS_ENV; use crate::types::ModuleId; pub fn load_env_var(env: &str) -> Result { @@ -24,12 +23,7 @@ pub fn load_file_from_env(env: &str) -> Result { load_from_file(&path) } -/// Loads a map of module id -> jwt secret from a json env -pub fn load_jwt_secrets() -> Result> { - let jwt_secrets = std::env::var(JWTS_ENV).wrap_err(format!("{JWTS_ENV} is not set"))?; - decode_string_to_map(&jwt_secrets) -} - +/// TODO: This was only used by the old JWT loader, can it be removed now? fn decode_string_to_map(raw: &str) -> Result> { // trim the string and split for comma raw.trim() @@ -48,6 +42,7 @@ fn decode_string_to_map(raw: &str) -> Result> { mod tests { use super::*; + /// TODO: This was only used by the old JWT loader, can it be removed now? #[test] fn test_decode_string_to_map() { let raw = " KEY=VALUE , KEY2=value2 "; diff --git a/crates/common/src/signer/constants.rs b/crates/common/src/signer/constants.rs deleted file mode 100644 index 45e3ce23..00000000 --- a/crates/common/src/signer/constants.rs +++ /dev/null @@ -1,6 +0,0 @@ -pub const DEFAULT_SIGNER_PORT: u16 = 20000; - -// Rate limit signer API requests for 5 minutes after the endpoint has 3 JWT -// auth failures -pub const DEFAULT_JWT_AUTH_FAIL_LIMIT: u32 = 3; -pub const DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS: u32 = 5 * 60; diff --git a/crates/common/src/signer/mod.rs b/crates/common/src/signer/mod.rs index b6dce29d..e0a164a7 100644 --- a/crates/common/src/signer/mod.rs +++ b/crates/common/src/signer/mod.rs @@ -1,10 +1,8 @@ -mod constants; mod loader; mod schemes; mod store; mod types; -pub use constants::*; pub use loader::*; pub use schemes::*; pub use store::*; diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index 3ca1d5ac..08bb13cd 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -25,7 +25,7 @@ use cb_common::{ SignProxyRequest, SignRequest, }, }, - config::StartSignerConfig, + config::{JwtConfig, StartSignerConfig}, constants::{COMMIT_BOOST_COMMIT, COMMIT_BOOST_VERSION}, types::{Chain, Jwt, ModuleId}, utils::{decode_jwt, validate_jwt}, @@ -60,9 +60,9 @@ struct SigningState { /// Manager handling different signing methods manager: Arc>, - /// Map of modules ids to JWT secrets. This also acts as registry of all - /// modules running - jwts: Arc>, + /// Map of modules ids to JWT configurations. This also acts as registry of + /// all modules running + jwts: Arc>, /// Map of JWT failures per peer jwt_auth_failures: Arc>>, @@ -216,12 +216,12 @@ async fn check_jwt_auth( SignerModuleError::Unauthorized })?; - let jwt_secret = state.jwts.get(&module_id).ok_or_else(|| { + let jwt_config = state.jwts.get(&module_id).ok_or_else(|| { error!("Unauthorized request. Was the module started correctly?"); SignerModuleError::Unauthorized })?; - validate_jwt(jwt, jwt_secret).map_err(|e| { + validate_jwt(jwt, &jwt_config.jwt_secret).map_err(|e| { error!("Unauthorized request. Invalid JWT: {e}"); SignerModuleError::Unauthorized })?; diff --git a/tests/src/utils.rs b/tests/src/utils.rs index e8561931..af412f42 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -1,20 +1,19 @@ use std::{ collections::HashMap, net::{Ipv4Addr, SocketAddr}, + path::PathBuf, sync::{Arc, Once}, }; use alloy::{primitives::U256, rpc::types::beacon::BlsPublicKey}; use cb_common::{ config::{ - PbsConfig, PbsModuleConfig, RelayConfig, SignerConfig, SignerType, StartSignerConfig, - SIGNER_IMAGE_DEFAULT, + JwtConfig, PbsConfig, PbsModuleConfig, RelayConfig, SignerConfig, SignerType, + StartSignerConfig, SIGNER_IMAGE_DEFAULT, SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT, + SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT, SIGNER_PORT_DEFAULT, }, pbs::{RelayClient, RelayEntry}, - signer::{ - SignerLoader, DEFAULT_JWT_AUTH_FAIL_LIMIT, DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, - DEFAULT_SIGNER_PORT, - }, + signer::SignerLoader, types::{Chain, ModuleId}, utils::default_host, }; @@ -104,10 +103,11 @@ pub fn to_pbs_config( pub fn get_signer_config(loader: SignerLoader) -> SignerConfig { SignerConfig { host: default_host(), - port: DEFAULT_SIGNER_PORT, + port: SIGNER_PORT_DEFAULT, docker_image: SIGNER_IMAGE_DEFAULT.to_string(), - jwt_auth_fail_limit: DEFAULT_JWT_AUTH_FAIL_LIMIT, - jwt_auth_fail_timeout_seconds: DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, + jwt_auth_fail_limit: SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT, + jwt_auth_fail_timeout_seconds: SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT, + jwt_config_file: PathBuf::from(""), inner: SignerType::Local { loader, store: None }, } } @@ -115,7 +115,7 @@ pub fn get_signer_config(loader: SignerLoader) -> SignerConfig { pub fn get_start_signer_config( signer_config: SignerConfig, chain: Chain, - jwts: HashMap, + jwts: HashMap, ) -> StartSignerConfig { match signer_config.inner { SignerType::Local { loader, .. } => StartSignerConfig { diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index 90a0365f..a75b1976 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -1,9 +1,12 @@ use std::{collections::HashMap, time::Duration}; -use alloy::{hex, primitives::FixedBytes}; +use alloy::{ + hex, + primitives::{b256, FixedBytes, B256}, +}; use cb_common::{ commit::{constants::GET_PUBKEYS_PATH, request::GetPubkeysResponse}, - config::StartSignerConfig, + config::{JwtConfig, StartSignerConfig}, signer::{SignerLoader, ValidatorKeysFormat}, types::{Chain, ModuleId}, utils::create_jwt, @@ -16,6 +19,8 @@ use tracing::info; const JWT_MODULE: &str = "test-module"; const JWT_SECRET: &str = "test-jwt-secret"; +const JWT_SIGNING_ID: B256 = + b256!("0x0123456789012345678901234567890123456789012345678901234567890123"); #[tokio::test] async fn test_signer_jwt_auth_success() -> Result<()> { @@ -95,7 +100,11 @@ async fn start_server(port: u16) -> Result { // Mock JWT secrets let module_id = ModuleId(JWT_MODULE.to_string()); let mut jwts = HashMap::new(); - jwts.insert(module_id.clone(), JWT_SECRET.to_string()); + jwts.insert(module_id.clone(), JwtConfig { + module_name: module_id, + jwt_secret: JWT_SECRET.to_string(), + signing_id: JWT_SIGNING_ID, + }); // Create a signer config let loader = SignerLoader::ValidatorsDir { From 615774e551239bef29d14e5826934e4b4fa70045 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 16 Jun 2025 16:20:05 -0400 Subject: [PATCH 39/67] Signing requests now uses the module's signing ID --- crates/cli/src/docker_init.rs | 16 ++---- crates/common/src/config/jwt.rs | 2 +- crates/signer/src/hasher/keccak.rs | 26 ++++++++++ crates/signer/src/hasher/mod.rs | 11 ++++ crates/signer/src/lib.rs | 1 + crates/signer/src/manager/dirk.rs | 10 ++-- crates/signer/src/service.rs | 81 ++++++++++++++++++------------ 7 files changed, 96 insertions(+), 51 deletions(-) create mode 100644 crates/signer/src/hasher/keccak.rs create mode 100644 crates/signer/src/hasher/mod.rs diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index 14f5f30e..93ecdaa8 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -14,8 +14,8 @@ use cb_common::{ PROXY_DIR_DEFAULT, PROXY_DIR_ENV, PROXY_DIR_KEYS_DEFAULT, PROXY_DIR_KEYS_ENV, PROXY_DIR_SECRETS_DEFAULT, PROXY_DIR_SECRETS_ENV, SIGNER_DEFAULT, SIGNER_DIR_KEYS_DEFAULT, SIGNER_DIR_KEYS_ENV, SIGNER_DIR_SECRETS_DEFAULT, SIGNER_DIR_SECRETS_ENV, - SIGNER_ENDPOINT_ENV, SIGNER_KEYS_ENV, SIGNER_MODULE_NAME, SIGNER_PORT_DEFAULT, - SIGNER_URL_ENV, + SIGNER_ENDPOINT_ENV, SIGNER_JWT_AUTH_FAIL_LIMIT_ENV, SIGNER_JWT_CONFIG_FILE_ENV, + SIGNER_KEYS_ENV, SIGNER_MODULE_NAME, SIGNER_PORT_DEFAULT, SIGNER_URL_ENV, }, pbs::{BUILDER_API_PATH, GET_STATUS_PATH}, signer::{ProxyStore, SignerLoader}, @@ -330,10 +330,7 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re match signer_config.inner { SignerType::Local { loader, store } => { - let mut signer_envs = IndexMap::from([ - get_env_val(CONFIG_ENV, CONFIG_DEFAULT), - get_env_same(JWTS_ENV), - ]); + let mut signer_envs = IndexMap::from([get_env_val(CONFIG_ENV, CONFIG_DEFAULT)]); // Bind the signer API to 0.0.0.0 let container_endpoint = @@ -364,9 +361,6 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re signer_envs.insert(key, val); } - // write jwts to env - envs.insert(JWTS_ENV.into(), format_comma_separated(&jwts)); - // volumes let mut volumes = vec![config_volume.clone()]; volumes.extend(chain_spec_volume.clone()); @@ -464,7 +458,6 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re SignerType::Dirk { cert_path, key_path, secrets_path, ca_cert_path, store, .. } => { let mut signer_envs = IndexMap::from([ get_env_val(CONFIG_ENV, CONFIG_DEFAULT), - get_env_same(JWTS_ENV), get_env_val(DIRK_CERT_ENV, DIRK_CERT_DEFAULT), get_env_val(DIRK_KEY_ENV, DIRK_KEY_DEFAULT), get_env_val(DIRK_DIR_SECRETS_ENV, DIRK_DIR_SECRETS_DEFAULT), @@ -499,9 +492,6 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re signer_envs.insert(key, val); } - // write jwts to env - envs.insert(JWTS_ENV.into(), format_comma_separated(&jwts)); - // volumes let mut volumes = vec![ config_volume.clone(), diff --git a/crates/common/src/config/jwt.rs b/crates/common/src/config/jwt.rs index c72b5617..ab6baef4 100644 --- a/crates/common/src/config/jwt.rs +++ b/crates/common/src/config/jwt.rs @@ -8,7 +8,7 @@ use alloy::primitives::B256; use eyre::{bail, Result}; use serde::Deserialize; -use crate::types::{Jwt, ModuleId}; +use crate::types::ModuleId; /// Underlying implementation of the JWT configuration that's deserialized from /// disk. diff --git a/crates/signer/src/hasher/keccak.rs b/crates/signer/src/hasher/keccak.rs new file mode 100644 index 00000000..80a2ff54 --- /dev/null +++ b/crates/signer/src/hasher/keccak.rs @@ -0,0 +1,26 @@ +use alloy::primitives::{Keccak256, B256}; + +use super::SigningHasher; + +/// A hasher that uses Keccak256 for signing request hashes. +#[derive(Clone)] +pub struct KeccakHasher {} + +impl KeccakHasher { + /// Creates a new KeccakHasher instance. + pub fn new() -> Self { + Self {} + } +} + +impl SigningHasher for KeccakHasher { + /// Hashes an object root from a signing request and the unique signing ID + /// for the requesting module into a hash that can be used to sign the + /// request. + fn hash(&self, object_root: &B256, signing_id: &B256) -> B256 { + let mut hasher = Keccak256::new(); + hasher.update(object_root); + hasher.update(signing_id); + hasher.finalize() + } +} diff --git a/crates/signer/src/hasher/mod.rs b/crates/signer/src/hasher/mod.rs new file mode 100644 index 00000000..d124448a --- /dev/null +++ b/crates/signer/src/hasher/mod.rs @@ -0,0 +1,11 @@ +use alloy::primitives::B256; + +pub mod keccak; + +/// A trait for hashers that can provide unique signing hashes for incoming +/// signing requests. +pub trait SigningHasher: Clone { + /// Hashes an object root from a signing request and the unique signing ID + /// for the requesting module. + fn hash(&self, object_root: &B256, signing_id: &B256) -> B256; +} diff --git a/crates/signer/src/lib.rs b/crates/signer/src/lib.rs index 4b5e1451..cfe48407 100644 --- a/crates/signer/src/lib.rs +++ b/crates/signer/src/lib.rs @@ -1,5 +1,6 @@ mod constants; pub mod error; +pub mod hasher; pub mod manager; mod metrics; mod proto; diff --git a/crates/signer/src/manager/dirk.rs b/crates/signer/src/manager/dirk.rs index 4c2d909f..760e6640 100644 --- a/crates/signer/src/manager/dirk.rs +++ b/crates/signer/src/manager/dirk.rs @@ -192,7 +192,7 @@ impl DirkManager { pub async fn request_consensus_signature( &self, pubkey: &BlsPublicKey, - object_root: [u8; 32], + object_root: &[u8; 32], ) -> Result { match self.consensus_accounts.get(pubkey) { Some(Account::Simple(account)) => { @@ -209,7 +209,7 @@ impl DirkManager { pub async fn request_proxy_signature( &self, pubkey: &BlsPublicKey, - object_root: [u8; 32], + object_root: &[u8; 32], ) -> Result { match self.proxy_accounts.get(pubkey) { Some(ProxyAccount { inner: Account::Simple(account), .. }) => { @@ -226,7 +226,7 @@ impl DirkManager { async fn request_simple_signature( &self, account: &SimpleAccount, - object_root: [u8; 32], + object_root: &[u8; 32], ) -> Result { let domain = compute_domain(self.chain, COMMIT_BOOST_DOMAIN); @@ -256,7 +256,7 @@ impl DirkManager { async fn request_distributed_signature( &self, account: &DistributedAccount, - object_root: [u8; 32], + object_root: &[u8; 32], ) -> Result { let mut partials = Vec::with_capacity(account.participants.len()); let mut requests = Vec::with_capacity(account.participants.len()); @@ -336,7 +336,7 @@ impl DirkManager { let message = ProxyDelegation { delegator: consensus, proxy: proxy_account.inner.public_key() }; let delegation_signature = - self.request_consensus_signature(&consensus, message.tree_hash_root().0).await?; + self.request_consensus_signature(&consensus, &message.tree_hash_root().0).await?; let delegation = SignedProxyDelegation { message, signature: delegation_signature }; diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index 08bb13cd..d0cc1ad6 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -5,6 +5,7 @@ use std::{ time::{Duration, Instant}, }; +use alloy::primitives::B256; use axum::{ extract::{ConnectInfo, Request, State}, http::StatusCode, @@ -39,6 +40,7 @@ use uuid::Uuid; use crate::{ error::SignerModuleError, + hasher::{keccak::KeccakHasher, SigningHasher}, manager::{dirk::DirkManager, local::LocalSigningManager, SigningManager}, metrics::{uri_to_tag, SIGNER_METRICS_REGISTRY, SIGNER_STATUS}, }; @@ -56,10 +58,13 @@ struct JwtAuthFailureInfo { } #[derive(Clone)] -struct SigningState { +struct SigningState { /// Manager handling different signing methods manager: Arc>, + /// Hasher used to create unique hashes for signing requests + hasher: H, + /// Map of modules ids to JWT configurations. This also acts as registry of /// all modules running jwts: Arc>, @@ -83,6 +88,7 @@ impl SigningService { let state = SigningState { manager: Arc::new(RwLock::new(start_manager(config.clone()).await?)), + hasher: KeccakHasher::new(), jwts: config.jwts.into(), jwt_auth_failures: Arc::new(RwLock::new(HashMap::new())), jwt_auth_fail_limit: config.jwt_auth_fail_limit, @@ -134,8 +140,8 @@ impl SigningService { } /// Authentication middleware layer -async fn jwt_auth( - State(state): State, +async fn jwt_auth( + State(state): State>, TypedHeader(auth): TypedHeader>, addr: ConnectInfo, mut req: Request, @@ -166,8 +172,8 @@ async fn jwt_auth( /// Checks if the incoming request needs to be rate limited due to previous JWT /// authentication failures -async fn check_jwt_rate_limit( - state: &SigningState, +async fn check_jwt_rate_limit( + state: &SigningState, client_ip: &String, ) -> Result<(), SignerModuleError> { let mut failures = state.jwt_auth_failures.write().await; @@ -203,9 +209,9 @@ async fn check_jwt_rate_limit( } /// Checks if a request can successfully authenticate with the JWT secret -async fn check_jwt_auth( +async fn check_jwt_auth( auth: &Authorization, - state: &SigningState, + state: &SigningState, ) -> Result { let jwt: Jwt = auth.token().to_string().into(); @@ -242,9 +248,9 @@ async fn handle_status() -> Result { } /// Implements get_pubkeys from the Signer API -async fn handle_get_pubkeys( +async fn handle_get_pubkeys( Extension(module_id): Extension, - State(state): State, + State(state): State>, ) -> Result { let req_id = Uuid::new_v4(); @@ -263,44 +269,55 @@ async fn handle_get_pubkeys( } /// Implements request_signature from the Signer API -async fn handle_request_signature( +async fn handle_request_signature( Extension(module_id): Extension, - State(state): State, + State(state): State>, Json(request): Json, ) -> Result { let req_id = Uuid::new_v4(); - + let signing_id = &state.jwts[&module_id].signing_id; debug!(event = "request_signature", ?module_id, %request, ?req_id, "New request"); let manager = state.manager.read().await; let res = match &*manager { SigningManager::Local(local_manager) => match request { - SignRequest::Consensus(SignConsensusRequest { object_root, pubkey }) => local_manager - .sign_consensus(&pubkey, &object_root) - .await - .map(|sig| Json(sig).into_response()), - SignRequest::ProxyBls(SignProxyRequest { object_root, proxy: bls_key }) => { + SignRequest::Consensus(SignConsensusRequest { ref object_root, ref pubkey }) => { + let hash = state.hasher.hash(object_root, signing_id); + local_manager + .sign_consensus(pubkey, &hash) + .await + .map(|sig| Json(sig).into_response()) + } + SignRequest::ProxyBls(SignProxyRequest { ref object_root, proxy: ref bls_key }) => { + let hash = state.hasher.hash(object_root, signing_id); local_manager - .sign_proxy_bls(&bls_key, &object_root) + .sign_proxy_bls(bls_key, &hash) .await .map(|sig| Json(sig).into_response()) } - SignRequest::ProxyEcdsa(SignProxyRequest { object_root, proxy: ecdsa_key }) => { + SignRequest::ProxyEcdsa(SignProxyRequest { ref object_root, proxy: ref ecdsa_key }) => { + let hash = state.hasher.hash(object_root, signing_id); local_manager - .sign_proxy_ecdsa(&ecdsa_key, &object_root) + .sign_proxy_ecdsa(ecdsa_key, &hash) .await .map(|sig| Json(sig).into_response()) } }, SigningManager::Dirk(dirk_manager) => match request { - SignRequest::Consensus(SignConsensusRequest { object_root, pubkey }) => dirk_manager - .request_consensus_signature(&pubkey, *object_root) - .await - .map(|sig| Json(sig).into_response()), - SignRequest::ProxyBls(SignProxyRequest { object_root, proxy: bls_key }) => dirk_manager - .request_proxy_signature(&bls_key, *object_root) - .await - .map(|sig| Json(sig).into_response()), + SignRequest::Consensus(SignConsensusRequest { ref object_root, ref pubkey }) => { + let hash = state.hasher.hash(object_root, signing_id); + dirk_manager + .request_consensus_signature(pubkey, &hash) + .await + .map(|sig| Json(sig).into_response()) + } + SignRequest::ProxyBls(SignProxyRequest { ref object_root, proxy: ref bls_key }) => { + let hash = state.hasher.hash(object_root, signing_id); + dirk_manager + .request_proxy_signature(bls_key, &hash) + .await + .map(|sig| Json(sig).into_response()) + } SignRequest::ProxyEcdsa(_) => { error!( event = "request_signature", @@ -320,9 +337,9 @@ async fn handle_request_signature( res } -async fn handle_generate_proxy( +async fn handle_generate_proxy( Extension(module_id): Extension, - State(state): State, + State(state): State>, Json(request): Json, ) -> Result { let req_id = Uuid::new_v4(); @@ -360,8 +377,8 @@ async fn handle_generate_proxy( res } -async fn handle_reload( - State(mut state): State, +async fn handle_reload( + State(mut state): State>, ) -> Result { let req_id = Uuid::new_v4(); From af6076d1bb94cdc5588031adbf2ed18aadae2563 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 17 Jun 2025 17:44:09 -0400 Subject: [PATCH 40/67] Finished added signing ID support and a quick test --- config.example.toml | 13 ++-- crates/common/src/config/jwt.rs | 2 +- crates/common/src/config/signer.rs | 2 +- crates/common/src/config/utils.rs | 10 +-- crates/signer/src/service.rs | 7 +- tests/data/configs/jwt.happy.toml | 4 ++ tests/src/lib.rs | 1 + tests/src/signer_service.rs | 71 ++++++++++++++++++++ tests/src/utils.rs | 16 +++-- tests/tests/signer_jwt_auth.rs | 101 +++++------------------------ tests/tests/signer_request_sig.rs | 55 ++++++++++++++++ 11 files changed, 180 insertions(+), 102 deletions(-) create mode 100644 tests/data/configs/jwt.happy.toml create mode 100644 tests/src/signer_service.rs create mode 100644 tests/tests/signer_request_sig.rs diff --git a/config.example.toml b/config.example.toml index 899c6a10..7a6ae502 100644 --- a/config.example.toml +++ b/config.example.toml @@ -144,10 +144,10 @@ url = "http://0xa119589bb33ef52acbb8116832bec2b58fca590fe5c85eac5d3230b44d5bc09f # - Dirk: a remote Dirk instance # - Local: a local Signer module # More details on the docs (https://commit-boost.github.io/commit-boost-client/get_started/configuration/#signer-module) -# [signer] +[signer] # Docker image to use for the Signer module. # OPTIONAL, DEFAULT: ghcr.io/commit-boost/signer:latest -# docker_image = "ghcr.io/commit-boost/signer:latest" +docker_image = "ghcr.io/commit-boost/signer:latest" # Host to bind the Signer API server to # OPTIONAL, DEFAULT: 127.0.0.1 host = "127.0.0.1" @@ -156,10 +156,15 @@ host = "127.0.0.1" port = 20000 # Number of JWT authentication attempts a client can fail before blocking that client temporarily from Signer access # OPTIONAL, DEFAULT: 3 -jwt_auth_fail_limit: 3 +jwt_auth_fail_limit = 3 # How long to block a client from Signer access, in seconds, if it failed JWT authentication too many times # OPTIONAL, DEFAULT: 300 -jwt_auth_fail_timeout_seconds: 300 +jwt_auth_fail_timeout_seconds = 300 +# The path of the file on-disk with the JWT and signing ID configuration for each module. +# If you use modules in your setup, you'll need this for them to authenticate with Commit Boost properly. +# If you don't, you can leave this blank. +# See for more information on the module JWT configuration file. +jwt_config_file = "./tests/data/configs/jwt.happy.toml" # For Remote signer: # [signer.remote] diff --git a/crates/common/src/config/jwt.rs b/crates/common/src/config/jwt.rs index ab6baef4..9daa0c95 100644 --- a/crates/common/src/config/jwt.rs +++ b/crates/common/src/config/jwt.rs @@ -88,7 +88,7 @@ impl JwtConfig { } /// Load the JWT configuration from a file. -pub fn load(config_file_path: &Path) -> Result> { +pub fn load_jwt_config_file(config_file_path: &Path) -> Result> { // Make sure the file is legal if !config_file_path.is_absolute() { bail!("JWT config file '{}' must be an absolute path", config_file_path.display()); diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 367a67a5..2bb24476 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -180,7 +180,7 @@ impl StartSignerConfig { } else { signer_config.jwt_config_file }; - let jwts = jwt::load(&jwt_config_path) + let jwts = jwt::load_jwt_config_file(&jwt_config_path) .wrap_err_with(|| format!("Failed to load JWT config from {jwt_config_path:?}"))?; match signer_config.inner { diff --git a/crates/common/src/config/utils.rs b/crates/common/src/config/utils.rs index 86b3be0d..34c649e3 100644 --- a/crates/common/src/config/utils.rs +++ b/crates/common/src/config/utils.rs @@ -1,10 +1,8 @@ -use std::{collections::HashMap, path::Path}; +use std::path::Path; -use eyre::{bail, Context, Result}; +use eyre::{Context, Result}; use serde::de::DeserializeOwned; -use crate::types::ModuleId; - pub fn load_env_var(env: &str) -> Result { std::env::var(env).wrap_err(format!("{env} is not set")) } @@ -23,7 +21,8 @@ pub fn load_file_from_env(env: &str) -> Result { load_from_file(&path) } -/// TODO: This was only used by the old JWT loader, can it be removed now? +// TODO: This was only used by the old JWT loader, can it be removed now? +/* fn decode_string_to_map(raw: &str) -> Result> { // trim the string and split for comma raw.trim() @@ -53,3 +52,4 @@ mod tests { assert_eq!(map.get(&ModuleId("KEY2".into())), Some(&"value2".to_string())); } } +*/ diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index d0cc1ad6..74fac631 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -5,7 +5,6 @@ use std::{ time::{Duration, Instant}, }; -use alloy::primitives::B256; use axum::{ extract::{ConnectInfo, Request, State}, http::StatusCode, @@ -58,7 +57,10 @@ struct JwtAuthFailureInfo { } #[derive(Clone)] -struct SigningState { +struct SigningState +where + H: SigningHasher, +{ /// Manager handling different signing methods manager: Arc>, @@ -283,6 +285,7 @@ async fn handle_request_signature( SigningManager::Local(local_manager) => match request { SignRequest::Consensus(SignConsensusRequest { ref object_root, ref pubkey }) => { let hash = state.hasher.hash(object_root, signing_id); + info!("Signing hash: {hash:?}"); local_manager .sign_consensus(pubkey, &hash) .await diff --git a/tests/data/configs/jwt.happy.toml b/tests/data/configs/jwt.happy.toml new file mode 100644 index 00000000..c8739fde --- /dev/null +++ b/tests/data/configs/jwt.happy.toml @@ -0,0 +1,4 @@ +[[modules]] +module_name = "test-module" +jwt_secret = "supersecret" +signing_id = "0x6a33a23ef26a4836979edff86c493a69b26ccf0b4a16491a815a13787657431b" \ No newline at end of file diff --git a/tests/src/lib.rs b/tests/src/lib.rs index a4fbbb6a..54aedc46 100644 --- a/tests/src/lib.rs +++ b/tests/src/lib.rs @@ -1,3 +1,4 @@ pub mod mock_relay; pub mod mock_validator; +pub mod signer_service; pub mod utils; diff --git a/tests/src/signer_service.rs b/tests/src/signer_service.rs new file mode 100644 index 00000000..e87ef314 --- /dev/null +++ b/tests/src/signer_service.rs @@ -0,0 +1,71 @@ +use std::{collections::HashMap, time::Duration}; + +use alloy::{hex, primitives::FixedBytes}; +use cb_common::{ + commit::request::GetPubkeysResponse, + config::{JwtConfig, StartSignerConfig}, + signer::{SignerLoader, ValidatorKeysFormat}, + types::{Chain, ModuleId}, +}; +use cb_signer::service::SigningService; +use eyre::Result; +use reqwest::{Response, StatusCode}; +use tracing::info; + +use crate::utils::{get_signer_config, get_start_signer_config}; + +// Starts the signer moduler server on a separate task and returns its +// configuration +pub async fn start_server( + port: u16, + jwts: &HashMap, +) -> Result { + let chain = Chain::Hoodi; + + // Create a signer config + let loader = SignerLoader::ValidatorsDir { + keys_path: "data/keystores/keys".into(), + secrets_path: "data/keystores/secrets".into(), + format: ValidatorKeysFormat::Lighthouse, + }; + let mut config = get_signer_config(loader); + config.port = port; + config.jwt_auth_fail_limit = 3; // Set a low fail limit for testing + config.jwt_auth_fail_timeout_seconds = 3; // Set a short timeout for testing + let start_config = get_start_signer_config(config, chain, jwts); + + // Run the Signer + let server_handle = tokio::spawn(SigningService::run(start_config.clone())); + + // Make sure the server is running + tokio::time::sleep(Duration::from_millis(100)).await; + if server_handle.is_finished() { + return Err(eyre::eyre!( + "Signer service failed to start: {}", + server_handle.await.unwrap_err() + )); + } + Ok(start_config) +} + +// Verifies that the pubkeys returned by the server match the pubkeys in the +// test data +pub async fn verify_pubkeys(response: Response) -> Result<()> { + // Verify the expected pubkeys are returned + assert!(response.status() == StatusCode::OK); + let pubkey_json = response.json::().await?; + assert_eq!(pubkey_json.keys.len(), 2); + let expected_pubkeys = vec![ + FixedBytes::new(hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4")), + FixedBytes::new(hex!("b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9")), + ]; + for expected in expected_pubkeys { + assert!( + pubkey_json.keys.iter().any(|k| k.consensus == expected), + "Expected pubkey not found: {:?}", + expected + ); + info!("Server returned expected pubkey: {:?}", expected); + } + Ok(()) +} diff --git a/tests/src/utils.rs b/tests/src/utils.rs index af412f42..dc962f5f 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -8,8 +8,8 @@ use std::{ use alloy::{primitives::U256, rpc::types::beacon::BlsPublicKey}; use cb_common::{ config::{ - JwtConfig, PbsConfig, PbsModuleConfig, RelayConfig, SignerConfig, SignerType, - StartSignerConfig, SIGNER_IMAGE_DEFAULT, SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT, + load_jwt_config_file, JwtConfig, PbsConfig, PbsModuleConfig, RelayConfig, SignerConfig, + SignerType, StartSignerConfig, SIGNER_IMAGE_DEFAULT, SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT, SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT, SIGNER_PORT_DEFAULT, }, pbs::{RelayClient, RelayEntry}, @@ -115,7 +115,7 @@ pub fn get_signer_config(loader: SignerLoader) -> SignerConfig { pub fn get_start_signer_config( signer_config: SignerConfig, chain: Chain, - jwts: HashMap, + jwts: &HashMap, ) -> StartSignerConfig { match signer_config.inner { SignerType::Local { loader, .. } => StartSignerConfig { @@ -123,7 +123,7 @@ pub fn get_start_signer_config( loader: Some(loader), store: None, endpoint: SocketAddr::new(signer_config.host.into(), signer_config.port), - jwts, + jwts: jwts.clone(), jwt_auth_fail_limit: signer_config.jwt_auth_fail_limit, jwt_auth_fail_timeout_seconds: signer_config.jwt_auth_fail_timeout_seconds, dirk: None, @@ -131,3 +131,11 @@ pub fn get_start_signer_config( _ => panic!("Only local signers are supported in tests"), } } + +/// Loads the JWT config from the test file +pub fn get_jwt_config() -> HashMap { + let cwd = std::env::current_dir().unwrap(); + let mut jwt_file_path = cwd.join("data/configs/jwt.happy.toml"); + jwt_file_path = jwt_file_path.canonicalize().unwrap(); + load_jwt_config_file(&jwt_file_path).expect("Failed to load JWT config") +} diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index a75b1976..06a2ea32 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -1,35 +1,26 @@ -use std::{collections::HashMap, time::Duration}; +use std::time::Duration; -use alloy::{ - hex, - primitives::{b256, FixedBytes, B256}, +use cb_common::{commit::constants::GET_PUBKEYS_PATH, types::ModuleId, utils::create_jwt}; +use cb_tests::{ + signer_service::{start_server, verify_pubkeys}, + utils::{self, setup_test_env}, }; -use cb_common::{ - commit::{constants::GET_PUBKEYS_PATH, request::GetPubkeysResponse}, - config::{JwtConfig, StartSignerConfig}, - signer::{SignerLoader, ValidatorKeysFormat}, - types::{Chain, ModuleId}, - utils::create_jwt, -}; -use cb_signer::service::SigningService; -use cb_tests::utils::{get_signer_config, get_start_signer_config, setup_test_env}; use eyre::Result; -use reqwest::{Response, StatusCode}; +use reqwest::StatusCode; use tracing::info; const JWT_MODULE: &str = "test-module"; -const JWT_SECRET: &str = "test-jwt-secret"; -const JWT_SIGNING_ID: B256 = - b256!("0x0123456789012345678901234567890123456789012345678901234567890123"); #[tokio::test] async fn test_signer_jwt_auth_success() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let start_config = start_server(20100).await?; + let jwts = utils::get_jwt_config(); + let start_config = start_server(20100, &jwts).await?; + let jwt_config = jwts.get(&module_id).expect("JWT config for test module not found"); // Run a pubkeys request - let jwt = create_jwt(&module_id, JWT_SECRET)?; + let jwt = create_jwt(&module_id, &jwt_config.jwt_secret)?; let client = reqwest::Client::new(); let url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); let response = client.get(&url).bearer_auth(&jwt).send().await?; @@ -44,7 +35,8 @@ async fn test_signer_jwt_auth_success() -> Result<()> { async fn test_signer_jwt_auth_fail() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let start_config = start_server(20200).await?; + let jwts = utils::get_jwt_config(); + let start_config = start_server(20101, &jwts).await?; // Run a pubkeys request - this should fail due to invalid JWT let jwt = create_jwt(&module_id, "incorrect secret")?; @@ -64,7 +56,9 @@ async fn test_signer_jwt_auth_fail() -> Result<()> { async fn test_signer_jwt_rate_limit() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let start_config = start_server(20300).await?; + let jwts = utils::get_jwt_config(); + let start_config = start_server(20102, &jwts).await?; + let jwt_config = jwts.get(&module_id).expect("JWT config for test module not found"); // Run as many pubkeys requests as the fail limit let jwt = create_jwt(&module_id, "incorrect secret")?; @@ -76,7 +70,7 @@ async fn test_signer_jwt_rate_limit() -> Result<()> { } // Run another request - this should fail due to rate limiting now - let jwt = create_jwt(&module_id, JWT_SECRET)?; + let jwt = create_jwt(&module_id, &jwt_config.jwt_secret)?; let response = client.get(&url).bearer_auth(&jwt).send().await?; assert!(response.status() == StatusCode::TOO_MANY_REQUESTS); @@ -90,66 +84,3 @@ async fn test_signer_jwt_rate_limit() -> Result<()> { Ok(()) } - -// Starts the signer moduler server on a separate task and returns its -// configuration -async fn start_server(port: u16) -> Result { - setup_test_env(); - let chain = Chain::Hoodi; - - // Mock JWT secrets - let module_id = ModuleId(JWT_MODULE.to_string()); - let mut jwts = HashMap::new(); - jwts.insert(module_id.clone(), JwtConfig { - module_name: module_id, - jwt_secret: JWT_SECRET.to_string(), - signing_id: JWT_SIGNING_ID, - }); - - // Create a signer config - let loader = SignerLoader::ValidatorsDir { - keys_path: "data/keystores/keys".into(), - secrets_path: "data/keystores/secrets".into(), - format: ValidatorKeysFormat::Lighthouse, - }; - let mut config = get_signer_config(loader); - config.port = port; - config.jwt_auth_fail_limit = 3; // Set a low fail limit for testing - config.jwt_auth_fail_timeout_seconds = 3; // Set a short timeout for testing - let start_config = get_start_signer_config(config, chain, jwts); - - // Run the Signer - let server_handle = tokio::spawn(SigningService::run(start_config.clone())); - - // Make sure the server is running - tokio::time::sleep(Duration::from_millis(100)).await; - if server_handle.is_finished() { - return Err(eyre::eyre!( - "Signer service failed to start: {}", - server_handle.await.unwrap_err() - )); - } - Ok(start_config) -} - -// Verifies that the pubkeys returned by the server match the pubkeys in the -// test data -async fn verify_pubkeys(response: Response) -> Result<()> { - // Verify the expected pubkeys are returned - assert!(response.status() == StatusCode::OK); - let pubkey_json = response.json::().await?; - assert_eq!(pubkey_json.keys.len(), 2); - let expected_pubkeys = vec![ - FixedBytes::new(hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4")), - FixedBytes::new(hex!("b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9")), - ]; - for expected in expected_pubkeys { - assert!( - pubkey_json.keys.iter().any(|k| k.consensus == expected), - "Expected pubkey not found: {:?}", - expected - ); - info!("Server returned expected pubkey: {:?}", expected); - } - Ok(()) -} diff --git a/tests/tests/signer_request_sig.rs b/tests/tests/signer_request_sig.rs new file mode 100644 index 00000000..19b81ebc --- /dev/null +++ b/tests/tests/signer_request_sig.rs @@ -0,0 +1,55 @@ +use alloy::{ + hex, + primitives::{b256, FixedBytes}, +}; +use cb_common::{ + commit::{ + constants::REQUEST_SIGNATURE_PATH, + request::{SignConsensusRequest, SignRequest}, + }, + types::ModuleId, + utils::create_jwt, +}; +use cb_tests::{ + signer_service::start_server, + utils::{self, setup_test_env}, +}; +use eyre::Result; +use reqwest::StatusCode; +use tracing::info; + +const JWT_MODULE: &str = "test-module"; + +/// Makes sure the signer service signs requests correctly, using the module's +/// signing ID +#[tokio::test] +async fn test_signer_sign_request_good() -> Result<()> { + setup_test_env(); + let module_id = ModuleId(JWT_MODULE.to_string()); + let jwts = utils::get_jwt_config(); + let start_config = start_server(20200, &jwts).await?; + let jwt_config = jwts.get(&module_id).expect("JWT config for test module not found"); + + let pubkey = FixedBytes::new(hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4")); + let object_root = b256!("0x0123456789012345678901234567890123456789012345678901234567890123"); + + // Send a signing request + let request = SignRequest::Consensus(SignConsensusRequest { pubkey, object_root }); + let jwt = create_jwt(&module_id, &jwt_config.jwt_secret)?; + let client = reqwest::Client::new(); + let url = format!("http://{}{}", start_config.endpoint, REQUEST_SIGNATURE_PATH); + let response = client.post(&url).json(&request).bearer_auth(&jwt).send().await?; + + // Verify the response is successful + assert!(response.status() == StatusCode::OK); + info!("Server returned expected success code {} for signing request", response.status()); + + // Verify the signature is returned + let signature = response.text().await?; + assert!(!signature.is_empty(), "Signature should not be empty"); + + let expected_signature = "\"0x992e6fc29ba219e6afeceb91df3f58ebaa6c82ea8d00b3f4564a4d47cfd886c076ade87c6df765ba3fdcc5ba71513d8f0f12b17c76e4859126ab902a3ae5e8789eb3c9c49e8e9c5cd70ef0a93c76ca16763a940b991192eaba97dcc8c060ff7a\""; + assert_eq!(signature, expected_signature, "Signature does not match expected value"); + + Ok(()) +} From cc13a6fa2ee95e4f0e9f1fbefee7afb037c9ac34 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 17 Jun 2025 23:15:00 -0400 Subject: [PATCH 41/67] Fixed some example config parameters --- config.example.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config.example.toml b/config.example.toml index 899c6a10..176b6b31 100644 --- a/config.example.toml +++ b/config.example.toml @@ -156,10 +156,10 @@ host = "127.0.0.1" port = 20000 # Number of JWT authentication attempts a client can fail before blocking that client temporarily from Signer access # OPTIONAL, DEFAULT: 3 -jwt_auth_fail_limit: 3 +jwt_auth_fail_limit = 3 # How long to block a client from Signer access, in seconds, if it failed JWT authentication too many times # OPTIONAL, DEFAULT: 300 -jwt_auth_fail_timeout_seconds: 300 +jwt_auth_fail_timeout_seconds = 300 # For Remote signer: # [signer.remote] From 488547a42f4464a930d171dad639c34744b33823 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 18 Jun 2025 00:16:17 -0400 Subject: [PATCH 42/67] Added a test to ensure modules can't create the same sigs --- tests/data/configs/jwt.happy.toml | 7 ++++- tests/tests/signer_request_sig.rs | 49 ++++++++++++++++++++++++++----- 2 files changed, 48 insertions(+), 8 deletions(-) diff --git a/tests/data/configs/jwt.happy.toml b/tests/data/configs/jwt.happy.toml index c8739fde..8df699ed 100644 --- a/tests/data/configs/jwt.happy.toml +++ b/tests/data/configs/jwt.happy.toml @@ -1,4 +1,9 @@ [[modules]] module_name = "test-module" jwt_secret = "supersecret" -signing_id = "0x6a33a23ef26a4836979edff86c493a69b26ccf0b4a16491a815a13787657431b" \ No newline at end of file +signing_id = "0x6a33a23ef26a4836979edff86c493a69b26ccf0b4a16491a815a13787657431b" + +[[modules]] +module_name = "another-module" +jwt_secret = "secondsecret" +signing_id = "0x61fe00135d7b4912a8c63ada215ac2e62326e6e7b30f49a29fcf9779d7ad800d" \ No newline at end of file diff --git a/tests/tests/signer_request_sig.rs b/tests/tests/signer_request_sig.rs index 19b81ebc..a69648a6 100644 --- a/tests/tests/signer_request_sig.rs +++ b/tests/tests/signer_request_sig.rs @@ -18,23 +18,27 @@ use eyre::Result; use reqwest::StatusCode; use tracing::info; -const JWT_MODULE: &str = "test-module"; +const MODULE_ID_1: &str = "test-module"; +const MODULE_ID_2: &str = "another-module"; +const PUBKEY_1: [u8; 48] = + hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4"); +const PUBKEY_2: [u8; 48] = + hex!("b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9"); /// Makes sure the signer service signs requests correctly, using the module's /// signing ID #[tokio::test] async fn test_signer_sign_request_good() -> Result<()> { setup_test_env(); - let module_id = ModuleId(JWT_MODULE.to_string()); + let module_id = ModuleId(MODULE_ID_1.to_string()); let jwts = utils::get_jwt_config(); let start_config = start_server(20200, &jwts).await?; let jwt_config = jwts.get(&module_id).expect("JWT config for test module not found"); - let pubkey = FixedBytes::new(hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4")); - let object_root = b256!("0x0123456789012345678901234567890123456789012345678901234567890123"); - // Send a signing request - let request = SignRequest::Consensus(SignConsensusRequest { pubkey, object_root }); + let object_root = b256!("0x0123456789012345678901234567890123456789012345678901234567890123"); + let request = + SignRequest::Consensus(SignConsensusRequest { pubkey: FixedBytes(PUBKEY_1), object_root }); let jwt = create_jwt(&module_id, &jwt_config.jwt_secret)?; let client = reqwest::Client::new(); let url = format!("http://{}{}", start_config.endpoint, REQUEST_SIGNATURE_PATH); @@ -42,7 +46,6 @@ async fn test_signer_sign_request_good() -> Result<()> { // Verify the response is successful assert!(response.status() == StatusCode::OK); - info!("Server returned expected success code {} for signing request", response.status()); // Verify the signature is returned let signature = response.text().await?; @@ -53,3 +56,35 @@ async fn test_signer_sign_request_good() -> Result<()> { Ok(()) } + +/// Makes sure the signer service returns a signature that is different for each +/// module +#[tokio::test] +async fn test_signer_sign_request_different_module() -> Result<()> { + setup_test_env(); + let module_id = ModuleId(MODULE_ID_2.to_string()); + let jwts = utils::get_jwt_config(); + let start_config = start_server(20201, &jwts).await?; + let jwt_config = jwts.get(&module_id).expect("JWT config for 2nd test module not found"); + + // Send a signing request + let object_root = b256!("0x0123456789012345678901234567890123456789012345678901234567890123"); + let request = + SignRequest::Consensus(SignConsensusRequest { pubkey: FixedBytes(PUBKEY_1), object_root }); + let jwt = create_jwt(&module_id, &jwt_config.jwt_secret)?; + let client = reqwest::Client::new(); + let url = format!("http://{}{}", start_config.endpoint, REQUEST_SIGNATURE_PATH); + let response = client.post(&url).json(&request).bearer_auth(&jwt).send().await?; + + // Verify the response is successful + assert!(response.status() == StatusCode::OK); + + // Verify the signature is returned + let signature = response.text().await?; + assert!(!signature.is_empty(), "Signature should not be empty"); + + let incorrect_signature = "\"0x992e6fc29ba219e6afeceb91df3f58ebaa6c82ea8d00b3f4564a4d47cfd886c076ade87c6df765ba3fdcc5ba71513d8f0f12b17c76e4859126ab902a3ae5e8789eb3c9c49e8e9c5cd70ef0a93c76ca16763a940b991192eaba97dcc8c060ff7a\""; + assert_ne!(signature, incorrect_signature, "Signature does not match expected value"); + + Ok(()) +} From 6fd4327e98dd826d76f3c670d1d3134384446964 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 18 Jun 2025 14:55:22 -0400 Subject: [PATCH 43/67] Made the jwt_config_file optional --- crates/common/src/config/signer.rs | 25 +++++++++++++++---------- tests/src/utils.rs | 2 +- tests/tests/signer_request_sig.rs | 1 - 3 files changed, 16 insertions(+), 12 deletions(-) diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 2bb24476..4d10a189 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -49,8 +49,8 @@ pub struct SignerConfig { #[serde(default = "default_u32::")] pub jwt_auth_fail_timeout_seconds: u32, - /// Path to the JWT config file - must be set explicitly - pub jwt_config_file: PathBuf, + /// Path to the JWT config file if the signer is used with modules + pub jwt_config_file: Option, /// Inner type-specific configuration #[serde(flatten)] @@ -173,15 +173,20 @@ impl StartSignerConfig { signer_config.jwt_auth_fail_timeout_seconds }; - // Load the JWT config file - let jwt_config_path = if let Some(path) = load_optional_env_var(SIGNER_JWT_CONFIG_FILE_ENV) - { - PathBuf::from(path) - } else { - signer_config.jwt_config_file + // Load the JWT config file if set - if not set or empty, use an empty JWT + // config so there won't be any modules + let mut jwts = HashMap::new(); + if let Some(path) = load_optional_env_var(SIGNER_JWT_CONFIG_FILE_ENV) { + if !path.is_empty() { + jwts = jwt::load_jwt_config_file(&PathBuf::from(path.clone())) + .wrap_err_with(|| format!("Failed to load JWT config from '{path:?}'"))?; + } + } else if let Some(path) = &signer_config.jwt_config_file { + if !path.as_os_str().is_empty() { + jwts = jwt::load_jwt_config_file(path) + .wrap_err_with(|| format!("Failed to load JWT config from '{path:?}'"))?; + } }; - let jwts = jwt::load_jwt_config_file(&jwt_config_path) - .wrap_err_with(|| format!("Failed to load JWT config from {jwt_config_path:?}"))?; match signer_config.inner { SignerType::Local { loader, store, .. } => Ok(StartSignerConfig { diff --git a/tests/src/utils.rs b/tests/src/utils.rs index dc962f5f..8f402d9a 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -107,7 +107,7 @@ pub fn get_signer_config(loader: SignerLoader) -> SignerConfig { docker_image: SIGNER_IMAGE_DEFAULT.to_string(), jwt_auth_fail_limit: SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT, jwt_auth_fail_timeout_seconds: SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT, - jwt_config_file: PathBuf::from(""), + jwt_config_file: None, inner: SignerType::Local { loader, store: None }, } } diff --git a/tests/tests/signer_request_sig.rs b/tests/tests/signer_request_sig.rs index a69648a6..f3af9672 100644 --- a/tests/tests/signer_request_sig.rs +++ b/tests/tests/signer_request_sig.rs @@ -16,7 +16,6 @@ use cb_tests::{ }; use eyre::Result; use reqwest::StatusCode; -use tracing::info; const MODULE_ID_1: &str = "test-module"; const MODULE_ID_2: &str = "another-module"; From d9ef82f594324ae886700616a7ac759b15a2c941 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 24 Jun 2025 02:40:51 -0400 Subject: [PATCH 44/67] Started working on docs --- config.example.toml | 5 ----- crates/common/src/config/signer.rs | 31 +++++++++----------------- docs/docs/get_started/configuration.md | 29 ++++++++++++++++++++++++ tests/src/utils.rs | 2 -- 4 files changed, 40 insertions(+), 27 deletions(-) diff --git a/config.example.toml b/config.example.toml index 7a6ae502..7e3d1ab0 100644 --- a/config.example.toml +++ b/config.example.toml @@ -160,11 +160,6 @@ jwt_auth_fail_limit = 3 # How long to block a client from Signer access, in seconds, if it failed JWT authentication too many times # OPTIONAL, DEFAULT: 300 jwt_auth_fail_timeout_seconds = 300 -# The path of the file on-disk with the JWT and signing ID configuration for each module. -# If you use modules in your setup, you'll need this for them to authenticate with Commit Boost properly. -# If you don't, you can leave this blank. -# See for more information on the module JWT configuration file. -jwt_config_file = "./tests/data/configs/jwt.happy.toml" # For Remote signer: # [signer.remote] diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 4d10a189..886ab36e 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -18,8 +18,8 @@ use super::{ }; use crate::{ config::{ - jwt, JwtConfig, DIRK_CA_CERT_ENV, DIRK_CERT_ENV, DIRK_DIR_SECRETS_ENV, DIRK_KEY_ENV, - SIGNER_JWT_CONFIG_FILE_ENV, + jwt, load_file_from_env, JwtConfig, DIRK_CA_CERT_ENV, DIRK_CERT_ENV, DIRK_DIR_SECRETS_ENV, + DIRK_KEY_ENV, SIGNER_JWT_CONFIG_FILE_ENV, }, signer::{ProxyStore, SignerLoader}, types::{Chain, ModuleId}, @@ -49,9 +49,6 @@ pub struct SignerConfig { #[serde(default = "default_u32::")] pub jwt_auth_fail_timeout_seconds: u32, - /// Path to the JWT config file if the signer is used with modules - pub jwt_config_file: Option, - /// Inner type-specific configuration #[serde(flatten)] pub inner: SignerType, @@ -148,6 +145,15 @@ impl StartSignerConfig { let signer_config = config.signer.ok_or_eyre("Signer config is missing")?; + // Load the JWT config file + let jwt_config_path = load_env_var(SIGNER_JWT_CONFIG_FILE_ENV) + .wrap_err("Failed to load JWT config file from environment")?; + if jwt_config_path.is_empty() { + bail!("JWT config file path is empty"); + } + let jwts = jwt::load_jwt_config_file(&PathBuf::from(&jwt_config_path)) + .wrap_err_with(|| format!("Failed to load JWT config from '{jwt_config_path:?}'"))?; + // Load the server endpoint first from the env var if present, otherwise the // config let endpoint = if let Some(endpoint) = load_optional_env_var(SIGNER_ENDPOINT_ENV) { @@ -173,21 +179,6 @@ impl StartSignerConfig { signer_config.jwt_auth_fail_timeout_seconds }; - // Load the JWT config file if set - if not set or empty, use an empty JWT - // config so there won't be any modules - let mut jwts = HashMap::new(); - if let Some(path) = load_optional_env_var(SIGNER_JWT_CONFIG_FILE_ENV) { - if !path.is_empty() { - jwts = jwt::load_jwt_config_file(&PathBuf::from(path.clone())) - .wrap_err_with(|| format!("Failed to load JWT config from '{path:?}'"))?; - } - } else if let Some(path) = &signer_config.jwt_config_file { - if !path.as_os_str().is_empty() { - jwts = jwt::load_jwt_config_file(path) - .wrap_err_with(|| format!("Failed to load JWT config from '{path:?}'"))?; - } - }; - match signer_config.inner { SignerType::Local { loader, store, .. } => Ok(StartSignerConfig { chain: config.chain, diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index 5dd46329..7bc49405 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -310,6 +310,7 @@ Delegation signatures will be stored in files with the format `/deleg A full example of a config file with Dirk can be found [here](https://github.com/Commit-Boost/commit-boost-client/blob/main/examples/configs/dirk_signer.toml). + ## Custom module We currently provide a test module that needs to be built locally. To build the module run: ```bash @@ -358,6 +359,34 @@ A few things to note: To learn more about developing modules, check out [here](/category/developing). + +## JWT Config File + +The Signer service's API is not configured to be used publically by arbitrary clients - access to it is whitelisted to applications (modules) that you permit in the Signer's configuration files. Each module that interacts with the Signer service must authenticate via a [JWT](https://en.wikipedia.org/wiki/JSON_Web_Token) included in its HTTP request headers. The secret authentication string for these JWTs is a unique value assigned to each module that you permit. Configuration for these secrets is done in the **JWT configuration file**. Any module that attempts to access the Signer API, but is not able to provide a JWT with an authentication secret, will be denied access. + +The JWT configuration file is a TOML file, similar to the Commit Boost configuration file, but is kept separate for isolation when using Docker containers. It has the following structure: + +```toml +[[modules]] +module_name = "test-module" +jwt_secret = "supersecret" +signing_id = "0x6a33a23ef26a4836979edff86c493a69b26ccf0b4a16491a815a13787657431b" + +[[modules]] +module_name = "another-module" +jwt_secret = "secondsecret" +signing_id = "0x61fe00135d7b4912a8c63ada215ac2e62326e6e7b30f49a29fcf9779d7ad800d" +``` + +Each module that should be allowed to access the Signer API must have an entry in this file, prefixed with the `[[modules]]` line. Each one must have the following fields: + +- `module_name`: The unique name (preferably human-readable) to assign to the module. This is primarily used as an identifier for it in things like logging messages. +- `jwt_secret`: The unique secret string that the module must provide in its JWT header for authentication. The module must have this same secret embedded in its own configuration. If using Commit Boost's Docker Compose generator, the generated container files will provide this value to each respective module's Docker container as the `CB_SIGNER_JWT` environment variable. +- `signing_id`: A 32-byte hex string unique to the module that will be used by the Signer service during signing requests from the module to generate signatures that are unique to the requesting module. This should *not* change and will typically come from the documentation provided by the module authors. If using Commit Boost's Docker Compose generator, it will *not* be provided to the module in the environment variable; the module's own code must have this value built into it ahead of time. See the [Module Signing ID](../developing/commit-module.md#module-signing-id) section below for more details. + +This file can be named anything, as long as it ends with a `.toml` extension, and saved anywhere accessible by the Commit Boost CLI and Docker daemon (if using the Docker Compose generator and Docker mode). The location for the file must be specified in the `CB_SIGNER_JWT_CONFIG_FILE` environment variable of the machine (or container) running the Signer service. + + ## Vouch [Vouch](https://github.com/attestantio/vouch) is a multi-node validator client built by [Attestant](https://www.attestant.io/). Vouch is particular in that it also integrates an MEV-Boost client to interact with relays. The Commit-Boost PBS module is compatible with the Vouch `blockrelay` since it implements the same Builder-API as relays. For example, depending on your setup and preference, you may want to fetch headers from a given relay using Commit-Boost vs using the built-in Vouch `blockrelay`. diff --git a/tests/src/utils.rs b/tests/src/utils.rs index 8f402d9a..b3bff878 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -1,7 +1,6 @@ use std::{ collections::HashMap, net::{Ipv4Addr, SocketAddr}, - path::PathBuf, sync::{Arc, Once}, }; @@ -107,7 +106,6 @@ pub fn get_signer_config(loader: SignerLoader) -> SignerConfig { docker_image: SIGNER_IMAGE_DEFAULT.to_string(), jwt_auth_fail_limit: SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT, jwt_auth_fail_timeout_seconds: SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT, - jwt_config_file: None, inner: SignerType::Local { loader, store: None }, } } From 83db727ce704c83db1001883b63db7d2e0fbc719 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 26 Jun 2025 00:25:01 -0400 Subject: [PATCH 45/67] Redid implementation with the original JWTS env var --- config.example.toml | 2 + crates/cli/src/docker_init.rs | 26 +- crates/common/src/config/constants.rs | 4 +- crates/common/src/config/jwt.rs | 398 ------------------------ crates/common/src/config/mod.rs | 2 - crates/common/src/config/module.rs | 3 + crates/common/src/config/signer.rs | 407 ++++++++++++++++++++++++- crates/common/src/config/utils.rs | 15 +- crates/signer/src/service.rs | 11 +- tests/data/configs/jwt.happy.toml | 9 - tests/data/configs/signer.happy.toml | 52 ++++ tests/data/module-jwt.txt | 1 - tests/src/signer_service.rs | 6 +- tests/src/utils.rs | 51 +++- tests/tests/pbs_get_header.rs | 8 +- tests/tests/pbs_get_status.rs | 6 +- tests/tests/pbs_mux.rs | 4 +- tests/tests/pbs_post_blinded_blocks.rs | 6 +- tests/tests/pbs_post_validators.rs | 6 +- tests/tests/signer_jwt_auth.rs | 42 ++- tests/tests/signer_request_sig.rs | 39 ++- 21 files changed, 604 insertions(+), 494 deletions(-) delete mode 100644 crates/common/src/config/jwt.rs delete mode 100644 tests/data/configs/jwt.happy.toml create mode 100644 tests/data/configs/signer.happy.toml delete mode 100644 tests/data/module-jwt.txt diff --git a/config.example.toml b/config.example.toml index 7e3d1ab0..15d80599 100644 --- a/config.example.toml +++ b/config.example.toml @@ -239,6 +239,8 @@ proxy_dir = "./proxies" [[modules]] # Unique ID of the module id = "DA_COMMIT" +# Unique hash that the Signer service will combine with the incoming data in signing requests to generate a signature specific to this module +signing_id = "0x6a33a23ef26a4836979edff86c493a69b26ccf0b4a16491a815a13787657431b" # Type of the module. Supported values: commit, events type = "commit" # Docker image of the module diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index 93ecdaa8..706e863e 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -9,13 +9,13 @@ use cb_common::{ CommitBoostConfig, LogsSettings, ModuleKind, SignerConfig, SignerType, BUILDER_PORT_ENV, BUILDER_URLS_ENV, CHAIN_SPEC_ENV, CONFIG_DEFAULT, CONFIG_ENV, DIRK_CA_CERT_DEFAULT, DIRK_CA_CERT_ENV, DIRK_CERT_DEFAULT, DIRK_CERT_ENV, DIRK_DIR_SECRETS_DEFAULT, - DIRK_DIR_SECRETS_ENV, DIRK_KEY_DEFAULT, DIRK_KEY_ENV, LOGS_DIR_DEFAULT, LOGS_DIR_ENV, - METRICS_PORT_ENV, MODULE_ID_ENV, MODULE_JWT_ENV, PBS_ENDPOINT_ENV, PBS_MODULE_NAME, - PROXY_DIR_DEFAULT, PROXY_DIR_ENV, PROXY_DIR_KEYS_DEFAULT, PROXY_DIR_KEYS_ENV, - PROXY_DIR_SECRETS_DEFAULT, PROXY_DIR_SECRETS_ENV, SIGNER_DEFAULT, SIGNER_DIR_KEYS_DEFAULT, - SIGNER_DIR_KEYS_ENV, SIGNER_DIR_SECRETS_DEFAULT, SIGNER_DIR_SECRETS_ENV, - SIGNER_ENDPOINT_ENV, SIGNER_JWT_AUTH_FAIL_LIMIT_ENV, SIGNER_JWT_CONFIG_FILE_ENV, - SIGNER_KEYS_ENV, SIGNER_MODULE_NAME, SIGNER_PORT_DEFAULT, SIGNER_URL_ENV, + DIRK_DIR_SECRETS_ENV, DIRK_KEY_DEFAULT, DIRK_KEY_ENV, JWTS_ENV, LOGS_DIR_DEFAULT, + LOGS_DIR_ENV, METRICS_PORT_ENV, MODULE_ID_ENV, MODULE_JWT_ENV, PBS_ENDPOINT_ENV, + PBS_MODULE_NAME, PROXY_DIR_DEFAULT, PROXY_DIR_ENV, PROXY_DIR_KEYS_DEFAULT, + PROXY_DIR_KEYS_ENV, PROXY_DIR_SECRETS_DEFAULT, PROXY_DIR_SECRETS_ENV, SIGNER_DEFAULT, + SIGNER_DIR_KEYS_DEFAULT, SIGNER_DIR_KEYS_ENV, SIGNER_DIR_SECRETS_DEFAULT, + SIGNER_DIR_SECRETS_ENV, SIGNER_ENDPOINT_ENV, SIGNER_KEYS_ENV, SIGNER_MODULE_NAME, + SIGNER_PORT_DEFAULT, SIGNER_URL_ENV, }, pbs::{BUILDER_API_PATH, GET_STATUS_PATH}, signer::{ProxyStore, SignerLoader}, @@ -330,7 +330,10 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re match signer_config.inner { SignerType::Local { loader, store } => { - let mut signer_envs = IndexMap::from([get_env_val(CONFIG_ENV, CONFIG_DEFAULT)]); + let mut signer_envs = IndexMap::from([ + get_env_val(CONFIG_ENV, CONFIG_DEFAULT), + get_env_same(JWTS_ENV), + ]); // Bind the signer API to 0.0.0.0 let container_endpoint = @@ -361,6 +364,9 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re signer_envs.insert(key, val); } + // write jwts to env + envs.insert(JWTS_ENV.into(), format_comma_separated(&jwts)); + // volumes let mut volumes = vec![config_volume.clone()]; volumes.extend(chain_spec_volume.clone()); @@ -458,6 +464,7 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re SignerType::Dirk { cert_path, key_path, secrets_path, ca_cert_path, store, .. } => { let mut signer_envs = IndexMap::from([ get_env_val(CONFIG_ENV, CONFIG_DEFAULT), + get_env_same(JWTS_ENV), get_env_val(DIRK_CERT_ENV, DIRK_CERT_DEFAULT), get_env_val(DIRK_KEY_ENV, DIRK_KEY_DEFAULT), get_env_val(DIRK_DIR_SECRETS_ENV, DIRK_DIR_SECRETS_DEFAULT), @@ -492,6 +499,9 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re signer_envs.insert(key, val); } + // write jwts to env + envs.insert(JWTS_ENV.into(), format_comma_separated(&jwts)); + // volumes let mut volumes = vec![ config_volume.clone(), diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index a2f6124b..65ef1c1c 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -45,8 +45,8 @@ pub const SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV: &str = "CB_SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS"; pub const SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT: u32 = 5 * 60; -/// JWT file settings -pub const SIGNER_JWT_CONFIG_FILE_ENV: &str = "CB_SIGNER_JWT_CONFIG_FILE"; +/// Comma separated list module_id=jwt_secret +pub const JWTS_ENV: &str = "CB_JWTS"; /// Path to json file with plaintext keys (testing only) pub const SIGNER_KEYS_ENV: &str = "CB_SIGNER_LOADER_FILE"; diff --git a/crates/common/src/config/jwt.rs b/crates/common/src/config/jwt.rs deleted file mode 100644 index 9daa0c95..00000000 --- a/crates/common/src/config/jwt.rs +++ /dev/null @@ -1,398 +0,0 @@ -use std::{ - collections::HashMap, - io::Read, - path::{Path, PathBuf}, -}; - -use alloy::primitives::B256; -use eyre::{bail, Result}; -use serde::Deserialize; - -use crate::types::ModuleId; - -/// Underlying implementation of the JWT configuration that's deserialized from -/// disk. -#[derive(Deserialize)] -struct JwtConfigOnDisk { - module_name: ModuleId, - - // One of these must be provided - they're listed here in order of precedence - jwt_env: Option, - jwt_file: Option, - jwt_secret: Option, - - signing_id: B256, -} - -impl JwtConfigOnDisk { - /// Load the JWT secret from the provides sources, in order of precedence. - fn load_jwt_secret(&self) -> Result { - // Start with the environment variable - let jwt_string = if let Some(jwt_env) = &self.jwt_env { - // Load JWT secret from environment variable - std::env::var(jwt_env).map_err(|e| { - eyre::eyre!( - "Failed to read JWT secret from environment variable '{jwt_env}': {}", - e - ) - })? - } else if let Some(jwt_file) = &self.jwt_file { - // Load JWT secret from file - std::fs::read_to_string(jwt_file).map_err(|e| { - eyre::eyre!("Failed to read JWT secret from file '{}': {}", jwt_file.display(), e) - })? - } else if let Some(jwt_secret) = &self.jwt_secret { - // Use the provided JWT secret directly - jwt_secret.clone() - } else { - bail!("No JWT secret provided"); - }; - - Ok(jwt_string) - } -} - -#[derive(Deserialize)] -struct JwtConfigFile { - modules: Vec, -} - -#[derive(Clone, Debug, PartialEq)] -pub struct JwtConfig { - /// Human-readable name of the module. - pub module_name: ModuleId, - - /// The JWT secret for the module to communicate with the signer module. - pub jwt_secret: String, - - /// A unique identifier for the module, which is used when signing requests - /// to generate signatures for this module. Must be a 32-byte hex string. - /// A leading 0x prefix is optional. - pub signing_id: B256, -} - -impl JwtConfig { - pub fn validate(&self) -> Result<()> { - // Ensure the JWT secret is not empty - if self.jwt_secret.is_empty() { - bail!("JWT secret cannot be empty"); - } - - // Ensure the signing ID is a valid B256 - if self.signing_id.is_zero() { - bail!("Signing ID cannot be zero"); - } - - Ok(()) - } -} - -/// Load the JWT configuration from a file. -pub fn load_jwt_config_file(config_file_path: &Path) -> Result> { - // Make sure the file is legal - if !config_file_path.is_absolute() { - bail!("JWT config file '{}' must be an absolute path", config_file_path.display()); - } - let config_file_path = config_file_path.canonicalize().map_err(|e| { - eyre::eyre!( - "Failed to canonicalize JWT config path '{}': {}", - config_file_path.display(), - e - ) - })?; - if config_file_path.extension().map_or(true, |ext| ext != "toml") { - bail!("JWT config file '{}' must have a .toml extension", config_file_path.display()); - } - if !config_file_path.exists() { - bail!("JWT config file '{}' does not exist", config_file_path.display()); - } - if !config_file_path.is_file() { - bail!("JWT config file '{}' is not a regular file", config_file_path.display()); - } - - // Parse the JWT config file - let mut file = std::fs::File::open(&config_file_path).map_err(|e| { - eyre::eyre!("Failed to open JWT config file '{}': {}", config_file_path.display(), e) - })?; - let mut contents = String::new(); - file.read_to_string(&mut contents)?; - - let jwt_configs: JwtConfigFile = toml::from_str(&contents).map_err(|e| { - eyre::eyre!("Failed to parse JWT config '{}': {}", config_file_path.display(), e) - })?; - - load_impl(jwt_configs) -} - -/// Implementation for loading a JWT configuration from a file. -fn load_impl(config_file: JwtConfigFile) -> Result> { - // Load the JWT secrets and validate them - let mut jwt_configs = HashMap::new(); - for raw_config in config_file.modules { - let jwt_secret = raw_config.load_jwt_secret()?; - let jwt_config = JwtConfig { - module_name: raw_config.module_name.clone(), - jwt_secret, - signing_id: raw_config.signing_id, - }; - jwt_config.validate()?; - - // Make sure there are no duplicate module names - if jwt_configs.contains_key(&raw_config.module_name) { - bail!("Duplicate JWT configuration for module '{}'", raw_config.module_name); - } - - // Make sure the signing ID hasn't been used before - if jwt_configs - .values() - .any(|existing_config: &JwtConfig| existing_config.signing_id == jwt_config.signing_id) - { - bail!( - "Duplicate signing ID '{}' for module '{}'", - jwt_config.signing_id, - raw_config.module_name - ); - } - - // Safe to use - jwt_configs.insert(raw_config.module_name, jwt_config); - } - - Ok(jwt_configs) -} - -#[cfg(test)] -mod tests { - use alloy::primitives::b256; - - use super::*; - - #[tokio::test] - async fn test_good_config() -> Result<()> { - let toml_str = r#" - [[modules]] - module_name = "test_module" - jwt_secret = "supersecret" - signing_id = "0101010101010101010101010101010101010101010101010101010101010101" - - [[modules]] - module_name = "2nd_test_module" - jwt_secret = "another-secret" - signing_id = "0x0202020202020202020202020202020202020202020202020202020202020202" - "#; - - // Load the JWT configuration - let jwt_config_file: JwtConfigFile = - toml::from_str(toml_str).expect("Failed to deserialize JWT config"); - let jwts = load_impl(jwt_config_file)?; - assert!(jwts.len() == 2, "Expected 2 JWT configurations"); - - // Check the first module - let module_id_1 = ModuleId("test_module".to_string()); - let module_1 = jwts.get(&module_id_1).expect("Missing 'test_module' in JWT configs"); - assert_eq!(module_1.module_name, module_id_1, "Module name mismatch for 'test_module'"); - assert_eq!( - module_1.jwt_secret, - "supersecret".to_string(), - "JWT secret mismatch for 'test_module'" - ); - assert_eq!( - module_1.signing_id, - b256!("0101010101010101010101010101010101010101010101010101010101010101"), - "Signing ID mismatch for 'test_module'" - ); - - // Check the second module - let module_id_2 = ModuleId("2nd_test_module".to_string()); - assert!(jwts.contains_key(&module_id_2), "Missing '2nd_test_module' in JWT configs"); - let module_2 = jwts.get(&module_id_2).expect("Missing '2nd_test_module' in JWT configs"); - assert_eq!(module_2.module_name, module_id_2, "Module name mismatch for '2nd_test_module'"); - assert_eq!( - module_2.jwt_secret, - "another-secret".to_string(), - "JWT secret mismatch for '2nd_test_module'" - ); - assert_eq!( - module_2.signing_id, - b256!("0202020202020202020202020202020202020202020202020202020202020202"), - "Signing ID mismatch for '2nd_test_module'" - ); - - Ok(()) - } - - #[tokio::test] - async fn test_jwt_from_env() -> Result<()> { - let jwt = "supersecret-env"; - let jwt_env = "CB_TEST_MODULE_JTW"; - let toml_str = r#" - [[modules]] - module_name = "test_module" - jwt_env = "CB_TEST_MODULE_JTW" - signing_id = "0101010101010101010101010101010101010101010101010101010101010101" - "#; - - // Set the environment variable - std::env::set_var(jwt_env, jwt); - struct EnvVarGuard { - env_name: &'static str, - } - impl Drop for EnvVarGuard { - fn drop(&mut self) { - std::env::remove_var(self.env_name); - } - } - - // Load the JWT configuration - let jwts: HashMap; - { - let _env_guard = EnvVarGuard { env_name: jwt_env }; - let jwt_config_file: JwtConfigFile = - toml::from_str(toml_str).expect("Failed to deserialize JWT config"); - jwts = load_impl(jwt_config_file)?; - } - assert!(jwts.len() == 1, "Expected 1 JWT configuration"); - - // Check the module - let module_id = ModuleId("test_module".to_string()); - let module = jwts.get(&module_id).expect("Missing 'test_module' in JWT configs"); - assert_eq!(module.module_name, module_id, "Module name mismatch for 'test_module'"); - assert_eq!(module.jwt_secret, jwt.to_string(), "JWT secret mismatch for 'test_module'"); - assert_eq!( - module.signing_id, - b256!("0101010101010101010101010101010101010101010101010101010101010101"), - "Signing ID mismatch for 'test_module'" - ); - Ok(()) - } - - #[tokio::test] - async fn test_jwt_from_file() -> Result<()> { - let jwt = "supersecret-file"; - let cwd = std::env::current_dir()?; - let mut jwt_file_path = cwd.join("../../tests/data/module-jwt.txt"); - jwt_file_path = jwt_file_path.canonicalize()?; - let toml_str = format!( - r#" - [[modules]] - module_name = "test_module" - jwt_file = "{}" - signing_id = "0101010101010101010101010101010101010101010101010101010101010101" - "#, - jwt_file_path.display() - ); - - // Load the JWT configuration - let jwt_config_file: JwtConfigFile = - toml::from_str(&toml_str).expect("Failed to deserialize JWT config"); - let jwts = load_impl(jwt_config_file)?; - assert!(jwts.len() == 1, "Expected 1 JWT configuration"); - - // Check the module - let module_id = ModuleId("test_module".to_string()); - let module = jwts.get(&module_id).expect("Missing 'test_module' in JWT configs"); - assert_eq!(module.module_name, module_id, "Module name mismatch for 'test_module'"); - assert_eq!(module.jwt_secret, jwt.to_string(), "JWT secret mismatch for 'test_module'"); - assert_eq!( - module.signing_id, - b256!("0101010101010101010101010101010101010101010101010101010101010101"), - "Signing ID mismatch for 'test_module'" - ); - Ok(()) - } - - #[tokio::test] - async fn test_duplicate_module_names() -> Result<()> { - let toml_str = r#" - [[modules]] - module_name = "test_module" - jwt_secret = "supersecret" - signing_id = "0101010101010101010101010101010101010101010101010101010101010101" - - [[modules]] - module_name = "test_module" # Duplicate name - jwt_secret = "another-secret" - signing_id = "0202020202020202020202020202020202020202020202020202020202020202" - "#; - let jwt_config_file: JwtConfigFile = - toml::from_str(toml_str).expect("Failed to deserialize JWT config"); - let result = load_impl(jwt_config_file); - assert!(result.is_err(), "Expected error due to duplicate module names"); - if let Err(e) = result { - assert_eq!(&e.to_string(), "Duplicate JWT configuration for module 'test_module'"); - } - Ok(()) - } - - #[tokio::test] - async fn test_duplicate_signing_ids() -> Result<()> { - let toml_str = r#" - [[modules]] - module_name = "test_module" - jwt_secret = "supersecret" - signing_id = "0101010101010101010101010101010101010101010101010101010101010101" - - [[modules]] - module_name = "2nd_test_module" - jwt_secret = "another-secret" - signing_id = "0101010101010101010101010101010101010101010101010101010101010101" # Duplicate signing ID - "#; - let jwt_config_file: JwtConfigFile = - toml::from_str(toml_str).expect("Failed to deserialize JWT config"); - let result = load_impl(jwt_config_file); - assert!(result.is_err(), "Expected error due to duplicate signing IDs"); - if let Err(e) = result { - assert_eq!(&e.to_string(),"Duplicate signing ID '0x0101010101010101010101010101010101010101010101010101010101010101' for module '2nd_test_module'"); - } - Ok(()) - } - - #[tokio::test] - async fn test_missing_jwt_secret() -> Result<()> { - let toml_str = r#" - [[modules]] - module_name = "test_module" - signing_id = "0101010101010101010101010101010101010101010101010101010101010101" - "#; - let jwt_config_file: JwtConfigFile = - toml::from_str(toml_str).expect("Failed to deserialize JWT config"); - let result = load_impl(jwt_config_file); - assert!(result.is_err(), "Expected error due to missing JWT secret"); - if let Err(e) = result { - assert_eq!(&e.to_string(), "No JWT secret provided"); - } - Ok(()) - } - - #[tokio::test] - async fn test_empty_jwt_secret() -> Result<()> { - let cfg = JwtConfig { - module_name: ModuleId("test_module".to_string()), - jwt_secret: "".to_string(), - signing_id: b256!("0101010101010101010101010101010101010101010101010101010101010101"), - }; - - let result = cfg.validate(); - assert!(result.is_err(), "Expected error due to empty JWT secret"); - if let Err(e) = result { - assert_eq!(&e.to_string(), "JWT secret cannot be empty"); - } - - Ok(()) - } - - #[tokio::test] - async fn test_zero_signing_id() -> Result<()> { - let cfg = JwtConfig { - module_name: ModuleId("test_module".to_string()), - jwt_secret: "supersecret".to_string(), - signing_id: b256!("0000000000000000000000000000000000000000000000000000000000000000"), - }; - let result = cfg.validate(); - assert!(result.is_err(), "Expected error due to zero signing ID"); - if let Err(e) = result { - assert_eq!(&e.to_string(), "Signing ID cannot be zero"); - } - Ok(()) - } -} diff --git a/crates/common/src/config/mod.rs b/crates/common/src/config/mod.rs index 5f1144e3..b782999b 100644 --- a/crates/common/src/config/mod.rs +++ b/crates/common/src/config/mod.rs @@ -6,7 +6,6 @@ use serde::{Deserialize, Serialize}; use crate::types::{load_chain_from_file, Chain, ChainLoader, ForkVersion}; mod constants; -mod jwt; mod log; mod metrics; mod module; @@ -16,7 +15,6 @@ mod signer; mod utils; pub use constants::*; -pub use jwt::*; pub use log::*; pub use metrics::*; pub use module::*; diff --git a/crates/common/src/config/module.rs b/crates/common/src/config/module.rs index 16b089ca..09ccee89 100644 --- a/crates/common/src/config/module.rs +++ b/crates/common/src/config/module.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; +use alloy::primitives::B256; use eyre::{ContextCompat, Result}; use serde::{de::DeserializeOwned, Deserialize, Serialize}; use toml::Table; @@ -37,6 +38,8 @@ pub struct StaticModuleConfig { /// Type of the module #[serde(rename = "type")] pub kind: ModuleKind, + /// Signing ID for the module to use when requesting signatures + pub signing_id: Option, } /// Runtime config to start a module diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 886ab36e..c94e07ca 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -4,6 +4,7 @@ use std::{ path::PathBuf, }; +use alloy::primitives::B256; use docker_image::DockerImage; use eyre::{bail, ensure, Context, OptionExt, Result}; use serde::{Deserialize, Serialize}; @@ -18,14 +19,44 @@ use super::{ }; use crate::{ config::{ - jwt, load_file_from_env, JwtConfig, DIRK_CA_CERT_ENV, DIRK_CERT_ENV, DIRK_DIR_SECRETS_ENV, - DIRK_KEY_ENV, SIGNER_JWT_CONFIG_FILE_ENV, + load_jwt_secrets, DIRK_CA_CERT_ENV, DIRK_CERT_ENV, DIRK_DIR_SECRETS_ENV, DIRK_KEY_ENV, }, signer::{ProxyStore, SignerLoader}, types::{Chain, ModuleId}, utils::{default_host, default_u16, default_u32}, }; +/// The signing configuration for a commitment module. +#[derive(Clone, Debug, PartialEq)] +pub struct ModuleSigningConfig { + /// Human-readable name of the module. + pub module_name: ModuleId, + + /// The JWT secret for the module to communicate with the signer module. + pub jwt_secret: String, + + /// A unique identifier for the module, which is used when signing requests + /// to generate signatures for this module. Must be a 32-byte hex string. + /// A leading 0x prefix is optional. + pub signing_id: B256, +} + +impl ModuleSigningConfig { + pub fn validate(&self) -> Result<()> { + // Ensure the JWT secret is not empty + if self.jwt_secret.is_empty() { + bail!("JWT secret cannot be empty"); + } + + // Ensure the signing ID is a valid B256 + if self.signing_id.is_zero() { + bail!("Signing ID cannot be zero"); + } + + Ok(()) + } +} + #[derive(Debug, Serialize, Deserialize, Clone)] #[serde(rename_all = "snake_case")] pub struct SignerConfig { @@ -133,7 +164,7 @@ pub struct StartSignerConfig { pub loader: Option, pub store: Option, pub endpoint: SocketAddr, - pub jwts: HashMap, + pub mod_signing_configs: HashMap, pub jwt_auth_fail_limit: u32, pub jwt_auth_fail_timeout_seconds: u32, pub dirk: Option, @@ -143,16 +174,13 @@ impl StartSignerConfig { pub fn load_from_env() -> Result { let config = CommitBoostConfig::from_env_path()?; - let signer_config = config.signer.ok_or_eyre("Signer config is missing")?; + let jwt_secrets = load_jwt_secrets()?; - // Load the JWT config file - let jwt_config_path = load_env_var(SIGNER_JWT_CONFIG_FILE_ENV) - .wrap_err("Failed to load JWT config file from environment")?; - if jwt_config_path.is_empty() { - bail!("JWT config file path is empty"); - } - let jwts = jwt::load_jwt_config_file(&PathBuf::from(&jwt_config_path)) - .wrap_err_with(|| format!("Failed to load JWT config from '{jwt_config_path:?}'"))?; + // Load the module signing configs + let mod_signing_configs = load_module_signing_configs(&config, &jwt_secrets) + .wrap_err("Failed to load module signing configs")?; + + let signer_config = config.signer.ok_or_eyre("Signer config is missing")?; // Load the server endpoint first from the env var if present, otherwise the // config @@ -184,7 +212,7 @@ impl StartSignerConfig { chain: config.chain, loader: Some(loader), endpoint, - jwts, + mod_signing_configs, jwt_auth_fail_limit, jwt_auth_fail_timeout_seconds, store, @@ -214,7 +242,7 @@ impl StartSignerConfig { Ok(StartSignerConfig { chain: config.chain, endpoint, - jwts, + mod_signing_configs, jwt_auth_fail_limit, jwt_auth_fail_timeout_seconds, loader: None, @@ -242,3 +270,354 @@ impl StartSignerConfig { } } } + +/// Loads the signing configurations for each module defined in the Commit Boost +/// config, coupling them with their JWT secrets and handling any potential +/// duplicates or missing values. +pub fn load_module_signing_configs( + config: &CommitBoostConfig, + jwt_secrets: &HashMap, +) -> Result> { + let mut mod_signing_configs = HashMap::new(); + if let Some(modules) = &config.modules { + let mut seen_jwt_secrets = HashMap::new(); + let mut seen_signing_ids = HashMap::new(); + for module in modules { + // Validate the module ID + ensure!(!module.id.is_empty(), "Module ID cannot be empty"); + + // Make sure it hasn't been used yet + ensure!( + !mod_signing_configs.contains_key(&module.id), + "Duplicate module config detected: ID {} is already used", + module.id + ); + + // Make sure the JWT secret is present + let jwt_secret = match jwt_secrets.get(&module.id) { + Some(secret) => secret.clone(), + None => bail!("JWT secret for module {} is missing", module.id), + }; + + // Make sure the signing ID is present + let signing_id = match &module.signing_id { + Some(id) => *id, + None => bail!("Signing ID for module {} is missing", module.id), + }; + + // Create the module signing config and validate it + let module_signing_config = + ModuleSigningConfig { module_name: module.id.clone(), jwt_secret, signing_id }; + module_signing_config + .validate() + .wrap_err(format!("Invalid signing config for module {}", module.id))?; + + // Check for duplicates in JWT secrets and signing IDs + match seen_jwt_secrets.get(&module_signing_config.jwt_secret) { + Some(existing_module) => { + bail!( + "Duplicate JWT secret detected for modules {} and {}", + existing_module, + module.id + ) + } + None => { + seen_jwt_secrets.insert(module_signing_config.jwt_secret.clone(), &module.id); + } + }; + match seen_signing_ids.get(&module_signing_config.signing_id) { + Some(existing_module) => { + bail!( + "Duplicate signing ID detected for modules {} and {}", + existing_module, + module.id + ) + } + None => { + seen_signing_ids.insert(module_signing_config.signing_id.clone(), &module.id); + signing_id + } + }; + + mod_signing_configs.insert(module.id.clone(), module_signing_config); + } + } + + Ok(mod_signing_configs) +} + +#[cfg(test)] +mod tests { + use alloy::primitives::{b256, Uint}; + + use super::*; + use crate::config::{LogsSettings, ModuleKind, PbsConfig, StaticModuleConfig, StaticPbsConfig}; + + async fn get_base_config() -> CommitBoostConfig { + CommitBoostConfig { + chain: Chain::Hoodi, + relays: vec![], + pbs: StaticPbsConfig { + docker_image: String::from(""), + pbs_config: PbsConfig { + host: Ipv4Addr::new(127, 0, 0, 1), + port: 0, + relay_check: false, + wait_all_registrations: false, + timeout_get_header_ms: 0, + timeout_get_payload_ms: 0, + timeout_register_validator_ms: 0, + skip_sigverify: false, + min_bid_wei: Uint::<256, 4>::from(0), + late_in_slot_time_ms: 0, + extra_validation_enabled: false, + rpc_url: None, + }, + with_signer: true, + }, + muxes: None, + modules: Some(vec![]), + signer: None, + metrics: None, + logs: LogsSettings::default(), + } + } + + async fn create_module_config(id: &ModuleId, signing_id: &B256) -> StaticModuleConfig { + StaticModuleConfig { + id: id.clone(), + signing_id: Some(*signing_id), + docker_image: String::from(""), + env: None, + env_file: None, + kind: ModuleKind::Commit, + } + } + + #[tokio::test] + async fn test_good_config() -> Result<()> { + let mut cfg = get_base_config().await; + let first_module_id = ModuleId("test_module".to_string()); + let first_signing_id = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + let second_module_id = ModuleId("2nd_test_module".to_string()); + let second_signing_id = + b256!("0202020202020202020202020202020202020202020202020202020202020202"); + + cfg.modules = Some(vec![ + create_module_config(&first_module_id, &first_signing_id).await, + create_module_config(&second_module_id, &second_signing_id).await, + ]); + + let jwts = HashMap::from([ + (first_module_id.clone(), "supersecret".to_string()), + (second_module_id.clone(), "another-secret".to_string()), + ]); + + // Load the mod signing configuration + let mod_signing_configs = load_module_signing_configs(&cfg, &jwts) + .wrap_err("Failed to load module signing configs")?; + assert!(mod_signing_configs.len() == 2, "Expected 2 mod signing configurations"); + + // Check the first module + let module_1 = mod_signing_configs + .get(&first_module_id) + .unwrap_or_else(|| panic!("Missing '{first_module_id}' in mod signing configs")); + assert_eq!(module_1.module_name, first_module_id, "Module name mismatch for 'test_module'"); + assert_eq!( + module_1.jwt_secret, jwts[&first_module_id], + "JWT secret mismatch for '{first_module_id}'" + ); + assert_eq!( + module_1.signing_id, first_signing_id, + "Signing ID mismatch for '{first_module_id}'" + ); + + // Check the second module + let module_2 = mod_signing_configs + .get(&second_module_id) + .unwrap_or_else(|| panic!("Missing '{second_module_id}' in mod signing configs")); + assert_eq!( + module_2.module_name, second_module_id, + "Module name mismatch for '{second_module_id}'" + ); + assert_eq!( + module_2.jwt_secret, jwts[&second_module_id], + "JWT secret mismatch for '{second_module_id}'" + ); + assert_eq!( + module_2.signing_id, second_signing_id, + "Signing ID mismatch for '{second_module_id}'" + ); + + Ok(()) + } + + #[tokio::test] + async fn test_duplicate_module_names() -> Result<()> { + let mut cfg = get_base_config().await; + let first_module_id = ModuleId("test_module".to_string()); + let first_signing_id = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + let second_module_id = ModuleId("2nd_test_module".to_string()); + let second_signing_id = + b256!("0202020202020202020202020202020202020202020202020202020202020202"); + + cfg.modules = Some(vec![ + create_module_config(&first_module_id, &first_signing_id).await, + create_module_config(&first_module_id, &second_signing_id).await, /* Duplicate module name */ + ]); + + let jwts = HashMap::from([ + (first_module_id.clone(), "supersecret".to_string()), + (second_module_id.clone(), "another-secret".to_string()), + ]); + + // Make sure there was an error + let result = load_module_signing_configs(&cfg, &jwts); + assert!(result.is_err(), "Expected error due to duplicate module names"); + if let Err(e) = result { + assert_eq!( + e.to_string(), + format!("Duplicate module config detected: ID {first_module_id} is already used") + ); + } + Ok(()) + } + + #[tokio::test] + async fn test_duplicate_jwt_secrets() -> Result<()> { + let mut cfg = get_base_config().await; + let first_module_id = ModuleId("test_module".to_string()); + let first_signing_id = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + let second_module_id = ModuleId("2nd_test_module".to_string()); + let second_signing_id = + b256!("0202020202020202020202020202020202020202020202020202020202020202"); + + cfg.modules = Some(vec![ + create_module_config(&first_module_id, &first_signing_id).await, + create_module_config(&second_module_id, &second_signing_id).await, + ]); + + let jwts = HashMap::from([ + (first_module_id.clone(), "supersecret".to_string()), + (second_module_id.clone(), "supersecret".to_string()), /* Duplicate JWT secret */ + ]); + + // Make sure there was an error + let result = load_module_signing_configs(&cfg, &jwts); + assert!(result.is_err(), "Expected error due to duplicate JWT secrets"); + if let Err(e) = result { + assert_eq!( + e.to_string(), + format!( + "Duplicate JWT secret detected for modules {first_module_id} and {second_module_id}", + ) + ); + } + Ok(()) + } + + #[tokio::test] + async fn test_duplicate_signing_ids() -> Result<()> { + let mut cfg = get_base_config().await; + let first_module_id = ModuleId("test_module".to_string()); + let first_signing_id = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + let second_module_id = ModuleId("2nd_test_module".to_string()); + + cfg.modules = Some(vec![ + create_module_config(&first_module_id, &first_signing_id).await, + create_module_config(&second_module_id, &first_signing_id).await, /* Duplicate signing ID */ + ]); + + let jwts = HashMap::from([ + (first_module_id.clone(), "supersecret".to_string()), + (second_module_id.clone(), "another-secret".to_string()), + ]); + + // Make sure there was an error + let result = load_module_signing_configs(&cfg, &jwts); + assert!(result.is_err(), "Expected error due to duplicate signing IDs"); + if let Err(e) = result { + assert_eq!( + e.to_string(), + format!( + "Duplicate signing ID detected for modules {first_module_id} and {second_module_id}", + ) + ); + } + Ok(()) + } + + #[tokio::test] + async fn test_missing_jwt_secret() -> Result<()> { + let mut cfg = get_base_config().await; + let first_module_id = ModuleId("test_module".to_string()); + let first_signing_id = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + let second_module_id = ModuleId("2nd_test_module".to_string()); + let second_signing_id = + b256!("0202020202020202020202020202020202020202020202020202020202020202"); + + cfg.modules = Some(vec![ + create_module_config(&first_module_id, &first_signing_id).await, + create_module_config(&second_module_id, &second_signing_id).await, + ]); + + let jwts = HashMap::from([(second_module_id.clone(), "another-secret".to_string())]); + + // Make sure there was an error + let result = load_module_signing_configs(&cfg, &jwts); + assert!(result.is_err(), "Expected error due to missing JWT secret"); + if let Err(e) = result { + assert_eq!( + e.to_string(), + format!("JWT secret for module {first_module_id} is missing") + ); + } + Ok(()) + } + + #[tokio::test] + async fn test_empty_jwt_secret() -> Result<()> { + let mut cfg = get_base_config().await; + let first_module_id = ModuleId("test_module".to_string()); + let first_signing_id = + b256!("0101010101010101010101010101010101010101010101010101010101010101"); + + cfg.modules = Some(vec![create_module_config(&first_module_id, &first_signing_id).await]); + + let jwts = HashMap::from([(first_module_id.clone(), "".to_string())]); + + // Make sure there was an error + let result = load_module_signing_configs(&cfg, &jwts); + assert!(result.is_err(), "Expected error due to empty JWT secret"); + if let Err(e) = result { + assert!(format!("{:?}", e).contains("JWT secret cannot be empty")); + } + + Ok(()) + } + + #[tokio::test] + async fn test_zero_signing_id() -> Result<()> { + let mut cfg = get_base_config().await; + let first_module_id = ModuleId("test_module".to_string()); + let first_signing_id = + b256!("0000000000000000000000000000000000000000000000000000000000000000"); + + cfg.modules = Some(vec![create_module_config(&first_module_id, &first_signing_id).await]); + + let jwts = HashMap::from([(first_module_id.clone(), "supersecret".to_string())]); + + // Make sure there was an error + let result = load_module_signing_configs(&cfg, &jwts); + assert!(result.is_err(), "Expected error due to zero signing ID"); + if let Err(e) = result { + assert!(format!("{:?}", e).contains("Signing ID cannot be zero")); + } + Ok(()) + } +} diff --git a/crates/common/src/config/utils.rs b/crates/common/src/config/utils.rs index 34c649e3..43d6e71c 100644 --- a/crates/common/src/config/utils.rs +++ b/crates/common/src/config/utils.rs @@ -1,8 +1,10 @@ -use std::path::Path; +use std::{collections::HashMap, path::Path}; -use eyre::{Context, Result}; +use eyre::{bail, Context, Result}; use serde::de::DeserializeOwned; +use crate::{config::JWTS_ENV, types::ModuleId}; + pub fn load_env_var(env: &str) -> Result { std::env::var(env).wrap_err(format!("{env} is not set")) } @@ -21,8 +23,12 @@ pub fn load_file_from_env(env: &str) -> Result { load_from_file(&path) } -// TODO: This was only used by the old JWT loader, can it be removed now? -/* +/// Loads a map of module id -> jwt secret from a json env +pub fn load_jwt_secrets() -> Result> { + let jwt_secrets = std::env::var(JWTS_ENV).wrap_err(format!("{JWTS_ENV} is not set"))?; + decode_string_to_map(&jwt_secrets) +} + fn decode_string_to_map(raw: &str) -> Result> { // trim the string and split for comma raw.trim() @@ -52,4 +58,3 @@ mod tests { assert_eq!(map.get(&ModuleId("KEY2".into())), Some(&"value2".to_string())); } } -*/ diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index 74fac631..2422df31 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -25,7 +25,7 @@ use cb_common::{ SignProxyRequest, SignRequest, }, }, - config::{JwtConfig, StartSignerConfig}, + config::{ModuleSigningConfig, StartSignerConfig}, constants::{COMMIT_BOOST_COMMIT, COMMIT_BOOST_VERSION}, types::{Chain, Jwt, ModuleId}, utils::{decode_jwt, validate_jwt}, @@ -69,7 +69,7 @@ where /// Map of modules ids to JWT configurations. This also acts as registry of /// all modules running - jwts: Arc>, + jwts: Arc>, /// Map of JWT failures per peer jwt_auth_failures: Arc>>, @@ -81,17 +81,18 @@ where impl SigningService { pub async fn run(config: StartSignerConfig) -> eyre::Result<()> { - if config.jwts.is_empty() { + if config.mod_signing_configs.is_empty() { warn!("Signing service was started but no module is registered. Exiting"); return Ok(()); } - let module_ids: Vec = config.jwts.keys().cloned().map(Into::into).collect(); + let module_ids: Vec = + config.mod_signing_configs.keys().cloned().map(Into::into).collect(); let state = SigningState { manager: Arc::new(RwLock::new(start_manager(config.clone()).await?)), hasher: KeccakHasher::new(), - jwts: config.jwts.into(), + jwts: config.mod_signing_configs.into(), jwt_auth_failures: Arc::new(RwLock::new(HashMap::new())), jwt_auth_fail_limit: config.jwt_auth_fail_limit, jwt_auth_fail_timeout: Duration::from_secs(config.jwt_auth_fail_timeout_seconds as u64), diff --git a/tests/data/configs/jwt.happy.toml b/tests/data/configs/jwt.happy.toml deleted file mode 100644 index 8df699ed..00000000 --- a/tests/data/configs/jwt.happy.toml +++ /dev/null @@ -1,9 +0,0 @@ -[[modules]] -module_name = "test-module" -jwt_secret = "supersecret" -signing_id = "0x6a33a23ef26a4836979edff86c493a69b26ccf0b4a16491a815a13787657431b" - -[[modules]] -module_name = "another-module" -jwt_secret = "secondsecret" -signing_id = "0x61fe00135d7b4912a8c63ada215ac2e62326e6e7b30f49a29fcf9779d7ad800d" \ No newline at end of file diff --git a/tests/data/configs/signer.happy.toml b/tests/data/configs/signer.happy.toml new file mode 100644 index 00000000..6fb76445 --- /dev/null +++ b/tests/data/configs/signer.happy.toml @@ -0,0 +1,52 @@ +chain = "Hoodi" + +[pbs] +docker_image = "ghcr.io/commit-boost/pbs:latest" +with_signer = true +host = "127.0.0.1" +port = 18550 +relay_check = true +wait_all_registrations = true +timeout_get_header_ms = 950 +timeout_get_payload_ms = 4000 +timeout_register_validator_ms = 3000 +skip_sigverify = false +min_bid_eth = 0.5 +late_in_slot_time_ms = 2000 +extra_validation_enabled = false +rpc_url = "https://ethereum-holesky-rpc.publicnode.com" + +[[relays]] +id = "example-relay" +url = "http://0xa1cec75a3f0661e99299274182938151e8433c61a19222347ea1313d839229cb4ce4e3e5aa2bdeb71c8fcf1b084963c2@abc.xyz" +headers = { X-MyCustomHeader = "MyCustomHeader" } +enable_timing_games = false +target_first_request_ms = 200 +frequency_get_header_ms = 300 + +[signer] +docker_image = "ghcr.io/commit-boost/signer:latest" +host = "127.0.0.1" +port = 20000 +jwt_auth_fail_limit = 3 +jwt_auth_fail_timeout_seconds = 300 + +[signer.local.loader] +key_path = "./tests/data/keys.example.json" + +[signer.local.store] +proxy_dir = "./proxies" + +[[modules]] +id = "test-module" +signing_id = "0x6a33a23ef26a4836979edff86c493a69b26ccf0b4a16491a815a13787657431b" +type = "commit" +docker_image = "test_da_commit" +env_file = ".cb.env" + +[[modules]] +id = "another-module" +signing_id = "0x61fe00135d7b4912a8c63ada215ac2e62326e6e7b30f49a29fcf9779d7ad800d" +type = "commit" +docker_image = "test_da_commit" +env_file = ".cb.env" diff --git a/tests/data/module-jwt.txt b/tests/data/module-jwt.txt deleted file mode 100644 index f837695a..00000000 --- a/tests/data/module-jwt.txt +++ /dev/null @@ -1 +0,0 @@ -supersecret-file \ No newline at end of file diff --git a/tests/src/signer_service.rs b/tests/src/signer_service.rs index e87ef314..c31e5a1c 100644 --- a/tests/src/signer_service.rs +++ b/tests/src/signer_service.rs @@ -3,7 +3,7 @@ use std::{collections::HashMap, time::Duration}; use alloy::{hex, primitives::FixedBytes}; use cb_common::{ commit::request::GetPubkeysResponse, - config::{JwtConfig, StartSignerConfig}, + config::{ModuleSigningConfig, StartSignerConfig}, signer::{SignerLoader, ValidatorKeysFormat}, types::{Chain, ModuleId}, }; @@ -18,7 +18,7 @@ use crate::utils::{get_signer_config, get_start_signer_config}; // configuration pub async fn start_server( port: u16, - jwts: &HashMap, + mod_signing_configs: &HashMap, ) -> Result { let chain = Chain::Hoodi; @@ -32,7 +32,7 @@ pub async fn start_server( config.port = port; config.jwt_auth_fail_limit = 3; // Set a low fail limit for testing config.jwt_auth_fail_timeout_seconds = 3; // Set a short timeout for testing - let start_config = get_start_signer_config(config, chain, jwts); + let start_config = get_start_signer_config(config, chain, mod_signing_configs); // Run the Signer let server_handle = tokio::spawn(SigningService::run(start_config.clone())); diff --git a/tests/src/utils.rs b/tests/src/utils.rs index b3bff878..c2312a74 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -4,12 +4,17 @@ use std::{ sync::{Arc, Once}, }; -use alloy::{primitives::U256, rpc::types::beacon::BlsPublicKey}; +use alloy::{ + primitives::{B256, U256}, + rpc::types::beacon::BlsPublicKey, +}; use cb_common::{ config::{ - load_jwt_config_file, JwtConfig, PbsConfig, PbsModuleConfig, RelayConfig, SignerConfig, - SignerType, StartSignerConfig, SIGNER_IMAGE_DEFAULT, SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT, - SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT, SIGNER_PORT_DEFAULT, + CommitBoostConfig, LogsSettings, ModuleKind, ModuleSigningConfig, PbsConfig, + PbsModuleConfig, RelayConfig, SignerConfig, SignerType, StartSignerConfig, + StaticModuleConfig, StaticPbsConfig, SIGNER_IMAGE_DEFAULT, + SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT, SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT, + SIGNER_PORT_DEFAULT, }, pbs::{RelayClient, RelayEntry}, signer::SignerLoader, @@ -65,7 +70,7 @@ pub fn generate_mock_relay_with_batch_size( RelayClient::new(config) } -pub fn get_pbs_static_config(port: u16) -> PbsConfig { +pub fn get_pbs_config(port: u16) -> PbsConfig { PbsConfig { host: Ipv4Addr::UNSPECIFIED, port, @@ -82,6 +87,23 @@ pub fn get_pbs_static_config(port: u16) -> PbsConfig { } } +pub fn get_pbs_static_config(pbs_config: PbsConfig) -> StaticPbsConfig { + StaticPbsConfig { docker_image: String::from(""), pbs_config, with_signer: true } +} + +pub fn get_commit_boost_config(pbs_static_config: StaticPbsConfig) -> CommitBoostConfig { + CommitBoostConfig { + chain: Chain::Hoodi, + relays: vec![], + pbs: pbs_static_config, + muxes: None, + modules: Some(vec![]), + signer: None, + metrics: None, + logs: LogsSettings::default(), + } +} + pub fn to_pbs_config( chain: Chain, pbs_config: PbsConfig, @@ -113,7 +135,7 @@ pub fn get_signer_config(loader: SignerLoader) -> SignerConfig { pub fn get_start_signer_config( signer_config: SignerConfig, chain: Chain, - jwts: &HashMap, + mod_signing_configs: &HashMap, ) -> StartSignerConfig { match signer_config.inner { SignerType::Local { loader, .. } => StartSignerConfig { @@ -121,7 +143,7 @@ pub fn get_start_signer_config( loader: Some(loader), store: None, endpoint: SocketAddr::new(signer_config.host.into(), signer_config.port), - jwts: jwts.clone(), + mod_signing_configs: mod_signing_configs.clone(), jwt_auth_fail_limit: signer_config.jwt_auth_fail_limit, jwt_auth_fail_timeout_seconds: signer_config.jwt_auth_fail_timeout_seconds, dirk: None, @@ -130,10 +152,13 @@ pub fn get_start_signer_config( } } -/// Loads the JWT config from the test file -pub fn get_jwt_config() -> HashMap { - let cwd = std::env::current_dir().unwrap(); - let mut jwt_file_path = cwd.join("data/configs/jwt.happy.toml"); - jwt_file_path = jwt_file_path.canonicalize().unwrap(); - load_jwt_config_file(&jwt_file_path).expect("Failed to load JWT config") +pub fn create_module_config(id: &ModuleId, signing_id: &B256) -> StaticModuleConfig { + StaticModuleConfig { + id: id.clone(), + signing_id: Some(*signing_id), + docker_image: String::from(""), + env: None, + env_file: None, + kind: ModuleKind::Commit, + } } diff --git a/tests/tests/pbs_get_header.rs b/tests/tests/pbs_get_header.rs index 747d460c..5a13b094 100644 --- a/tests/tests/pbs_get_header.rs +++ b/tests/tests/pbs_get_header.rs @@ -12,7 +12,7 @@ use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; use cb_tests::{ mock_relay::{start_mock_relay_service, MockRelayState}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_static_config, setup_test_env, to_pbs_config}, + utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, }; use eyre::Result; use reqwest::StatusCode; @@ -35,7 +35,7 @@ async fn test_get_header() -> Result<()> { tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); // Run the PBS service - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), vec![mock_relay.clone()]); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), vec![mock_relay.clone()]); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); @@ -87,7 +87,7 @@ async fn test_get_header_returns_204_if_relay_down() -> Result<()> { // tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); // Run the PBS service - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), vec![mock_relay.clone()]); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), vec![mock_relay.clone()]); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); @@ -119,7 +119,7 @@ async fn test_get_header_returns_400_if_request_is_invalid() -> Result<()> { tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); // Run the PBS service - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), vec![mock_relay.clone()]); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), vec![mock_relay.clone()]); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); diff --git a/tests/tests/pbs_get_status.rs b/tests/tests/pbs_get_status.rs index 0694b97a..7112a46b 100644 --- a/tests/tests/pbs_get_status.rs +++ b/tests/tests/pbs_get_status.rs @@ -9,7 +9,7 @@ use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; use cb_tests::{ mock_relay::{start_mock_relay_service, MockRelayState}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_static_config, setup_test_env, to_pbs_config}, + utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, }; use eyre::Result; use reqwest::StatusCode; @@ -34,7 +34,7 @@ async fn test_get_status() -> Result<()> { tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_0_port)); tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_1_port)); - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), relays.clone()); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays.clone()); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); @@ -67,7 +67,7 @@ async fn test_get_status_returns_502_if_relay_down() -> Result<()> { // Don't start the relay // tokio::spawn(start_mock_relay_service(mock_state.clone(), relay_port)); - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), relays.clone()); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays.clone()); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); diff --git a/tests/tests/pbs_mux.rs b/tests/tests/pbs_mux.rs index a8f3ed1c..624217d3 100644 --- a/tests/tests/pbs_mux.rs +++ b/tests/tests/pbs_mux.rs @@ -10,7 +10,7 @@ use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; use cb_tests::{ mock_relay::{start_mock_relay_service, MockRelayState}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_static_config, setup_test_env, to_pbs_config}, + utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, }; use eyre::Result; use reqwest::StatusCode; @@ -37,7 +37,7 @@ async fn test_mux() -> Result<()> { // Register all relays in PBS config let relays = vec![default_relay.clone()]; - let mut config = to_pbs_config(chain, get_pbs_static_config(pbs_port), relays); + let mut config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); config.all_relays = vec![mux_relay_1.clone(), mux_relay_2.clone(), default_relay.clone()]; // Configure mux for two relays diff --git a/tests/tests/pbs_post_blinded_blocks.rs b/tests/tests/pbs_post_blinded_blocks.rs index 3ab378a4..03c268ba 100644 --- a/tests/tests/pbs_post_blinded_blocks.rs +++ b/tests/tests/pbs_post_blinded_blocks.rs @@ -10,7 +10,7 @@ use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; use cb_tests::{ mock_relay::{start_mock_relay_service, MockRelayState}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_static_config, setup_test_env, to_pbs_config}, + utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, }; use eyre::Result; use reqwest::StatusCode; @@ -31,7 +31,7 @@ async fn test_submit_block() -> Result<()> { tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); // Run the PBS service - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), relays); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); @@ -63,7 +63,7 @@ async fn test_submit_block_too_large() -> Result<()> { let mock_state = Arc::new(MockRelayState::new(chain, signer).with_large_body()); tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), relays); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); diff --git a/tests/tests/pbs_post_validators.rs b/tests/tests/pbs_post_validators.rs index c0a27c93..3f493305 100644 --- a/tests/tests/pbs_post_validators.rs +++ b/tests/tests/pbs_post_validators.rs @@ -10,7 +10,7 @@ use cb_pbs::{DefaultBuilderApi, PbsService, PbsState}; use cb_tests::{ mock_relay::{start_mock_relay_service, MockRelayState}, mock_validator::MockValidator, - utils::{generate_mock_relay, get_pbs_static_config, setup_test_env, to_pbs_config}, + utils::{generate_mock_relay, get_pbs_config, setup_test_env, to_pbs_config}, }; use eyre::Result; use reqwest::StatusCode; @@ -31,7 +31,7 @@ async fn test_register_validators() -> Result<()> { tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); // Run the PBS service - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), relays); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); @@ -77,7 +77,7 @@ async fn test_register_validators_returns_422_if_request_is_malformed() -> Resul tokio::spawn(start_mock_relay_service(mock_state.clone(), pbs_port + 1)); // Run the PBS service - let config = to_pbs_config(chain, get_pbs_static_config(pbs_port), relays); + let config = to_pbs_config(chain, get_pbs_config(pbs_port), relays); let state = PbsState::new(config); tokio::spawn(PbsService::run::<(), DefaultBuilderApi>(state)); diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index 06a2ea32..fce8ae72 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -1,6 +1,12 @@ -use std::time::Duration; +use std::{collections::HashMap, time::Duration}; -use cb_common::{commit::constants::GET_PUBKEYS_PATH, types::ModuleId, utils::create_jwt}; +use alloy::primitives::b256; +use cb_common::{ + commit::constants::GET_PUBKEYS_PATH, + config::{load_module_signing_configs, ModuleSigningConfig}, + types::ModuleId, + utils::create_jwt, +}; use cb_tests::{ signer_service::{start_server, verify_pubkeys}, utils::{self, setup_test_env}, @@ -11,13 +17,27 @@ use tracing::info; const JWT_MODULE: &str = "test-module"; +async fn create_mod_signing_configs() -> HashMap { + let mut cfg = + utils::get_commit_boost_config(utils::get_pbs_static_config(utils::get_pbs_config(0))); + + let module_id = ModuleId(JWT_MODULE.to_string()); + let signing_id = b256!("0101010101010101010101010101010101010101010101010101010101010101"); + + cfg.modules = Some(vec![utils::create_module_config(&module_id, &signing_id)]); + + let jwts = HashMap::from([(module_id.clone(), "supersecret".to_string())]); + + load_module_signing_configs(&cfg, &jwts).unwrap() +} + #[tokio::test] async fn test_signer_jwt_auth_success() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let jwts = utils::get_jwt_config(); - let start_config = start_server(20100, &jwts).await?; - let jwt_config = jwts.get(&module_id).expect("JWT config for test module not found"); + let mod_cfgs = create_mod_signing_configs().await; + let start_config = start_server(20100, &mod_cfgs).await?; + let jwt_config = mod_cfgs.get(&module_id).expect("JWT config for test module not found"); // Run a pubkeys request let jwt = create_jwt(&module_id, &jwt_config.jwt_secret)?; @@ -35,8 +55,8 @@ async fn test_signer_jwt_auth_success() -> Result<()> { async fn test_signer_jwt_auth_fail() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let jwts = utils::get_jwt_config(); - let start_config = start_server(20101, &jwts).await?; + let mod_cfgs = create_mod_signing_configs().await; + let start_config = start_server(20101, &mod_cfgs).await?; // Run a pubkeys request - this should fail due to invalid JWT let jwt = create_jwt(&module_id, "incorrect secret")?; @@ -56,9 +76,9 @@ async fn test_signer_jwt_auth_fail() -> Result<()> { async fn test_signer_jwt_rate_limit() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); - let jwts = utils::get_jwt_config(); - let start_config = start_server(20102, &jwts).await?; - let jwt_config = jwts.get(&module_id).expect("JWT config for test module not found"); + let mod_cfgs = create_mod_signing_configs().await; + let start_config = start_server(20102, &mod_cfgs).await?; + let mod_cfg = mod_cfgs.get(&module_id).expect("JWT config for test module not found"); // Run as many pubkeys requests as the fail limit let jwt = create_jwt(&module_id, "incorrect secret")?; @@ -70,7 +90,7 @@ async fn test_signer_jwt_rate_limit() -> Result<()> { } // Run another request - this should fail due to rate limiting now - let jwt = create_jwt(&module_id, &jwt_config.jwt_secret)?; + let jwt = create_jwt(&module_id, &mod_cfg.jwt_secret)?; let response = client.get(&url).bearer_auth(&jwt).send().await?; assert!(response.status() == StatusCode::TOO_MANY_REQUESTS); diff --git a/tests/tests/signer_request_sig.rs b/tests/tests/signer_request_sig.rs index f3af9672..a6ca7be3 100644 --- a/tests/tests/signer_request_sig.rs +++ b/tests/tests/signer_request_sig.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use alloy::{ hex, primitives::{b256, FixedBytes}, @@ -7,6 +9,7 @@ use cb_common::{ constants::REQUEST_SIGNATURE_PATH, request::{SignConsensusRequest, SignRequest}, }, + config::{load_module_signing_configs, ModuleSigningConfig}, types::ModuleId, utils::create_jwt, }; @@ -21,8 +24,28 @@ const MODULE_ID_1: &str = "test-module"; const MODULE_ID_2: &str = "another-module"; const PUBKEY_1: [u8; 48] = hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4"); -const PUBKEY_2: [u8; 48] = - hex!("b3a22e4a673ac7a153ab5b3c17a4dbef55f7e47210b20c0cbb0e66df5b36bb49ef808577610b034172e955d2312a61b9"); + +async fn create_mod_signing_configs() -> HashMap { + let mut cfg = + utils::get_commit_boost_config(utils::get_pbs_static_config(utils::get_pbs_config(0))); + + let module_id_1 = ModuleId(MODULE_ID_1.to_string()); + let signing_id_1 = b256!("0x6a33a23ef26a4836979edff86c493a69b26ccf0b4a16491a815a13787657431b"); + let module_id_2 = ModuleId(MODULE_ID_2.to_string()); + let signing_id_2 = b256!("0x61fe00135d7b4912a8c63ada215ac2e62326e6e7b30f49a29fcf9779d7ad800d"); + + cfg.modules = Some(vec![ + utils::create_module_config(&module_id_1, &signing_id_1), + utils::create_module_config(&module_id_2, &signing_id_2), + ]); + + let jwts = HashMap::from([ + (module_id_1.clone(), "supersecret".to_string()), + (module_id_2.clone(), "anothersecret".to_string()), + ]); + + load_module_signing_configs(&cfg, &jwts).unwrap() +} /// Makes sure the signer service signs requests correctly, using the module's /// signing ID @@ -30,9 +53,9 @@ const PUBKEY_2: [u8; 48] = async fn test_signer_sign_request_good() -> Result<()> { setup_test_env(); let module_id = ModuleId(MODULE_ID_1.to_string()); - let jwts = utils::get_jwt_config(); - let start_config = start_server(20200, &jwts).await?; - let jwt_config = jwts.get(&module_id).expect("JWT config for test module not found"); + let mod_cfgs = create_mod_signing_configs().await; + let start_config = start_server(20200, &mod_cfgs).await?; + let jwt_config = mod_cfgs.get(&module_id).expect("JWT config for test module not found"); // Send a signing request let object_root = b256!("0x0123456789012345678901234567890123456789012345678901234567890123"); @@ -62,9 +85,9 @@ async fn test_signer_sign_request_good() -> Result<()> { async fn test_signer_sign_request_different_module() -> Result<()> { setup_test_env(); let module_id = ModuleId(MODULE_ID_2.to_string()); - let jwts = utils::get_jwt_config(); - let start_config = start_server(20201, &jwts).await?; - let jwt_config = jwts.get(&module_id).expect("JWT config for 2nd test module not found"); + let mod_cfgs = create_mod_signing_configs().await; + let start_config = start_server(20201, &mod_cfgs).await?; + let jwt_config = mod_cfgs.get(&module_id).expect("JWT config for 2nd test module not found"); // Send a signing request let object_root = b256!("0x0123456789012345678901234567890123456789012345678901234567890123"); From 8d08c114fd25b10de06f2692778ad003b7b8620e Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 26 Jun 2025 13:29:13 -0400 Subject: [PATCH 46/67] Started the signer doc --- docs/docs/developing/preconf-signing.md | 0 docs/docs/get_started/configuration.md | 28 +------------------------ 2 files changed, 1 insertion(+), 27 deletions(-) create mode 100644 docs/docs/developing/preconf-signing.md diff --git a/docs/docs/developing/preconf-signing.md b/docs/docs/developing/preconf-signing.md new file mode 100644 index 00000000..e69de29b diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index 7bc49405..bd438945 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -345,6 +345,7 @@ enabled = true id = "DA_COMMIT" type = "commit" docker_image = "test_da_commit" +signing_id = "0x6a33a23ef26a4836979edff86c493a69b26ccf0b4a16491a815a13787657431b" sleep_secs = 5 [[modules]] @@ -360,33 +361,6 @@ A few things to note: To learn more about developing modules, check out [here](/category/developing). -## JWT Config File - -The Signer service's API is not configured to be used publically by arbitrary clients - access to it is whitelisted to applications (modules) that you permit in the Signer's configuration files. Each module that interacts with the Signer service must authenticate via a [JWT](https://en.wikipedia.org/wiki/JSON_Web_Token) included in its HTTP request headers. The secret authentication string for these JWTs is a unique value assigned to each module that you permit. Configuration for these secrets is done in the **JWT configuration file**. Any module that attempts to access the Signer API, but is not able to provide a JWT with an authentication secret, will be denied access. - -The JWT configuration file is a TOML file, similar to the Commit Boost configuration file, but is kept separate for isolation when using Docker containers. It has the following structure: - -```toml -[[modules]] -module_name = "test-module" -jwt_secret = "supersecret" -signing_id = "0x6a33a23ef26a4836979edff86c493a69b26ccf0b4a16491a815a13787657431b" - -[[modules]] -module_name = "another-module" -jwt_secret = "secondsecret" -signing_id = "0x61fe00135d7b4912a8c63ada215ac2e62326e6e7b30f49a29fcf9779d7ad800d" -``` - -Each module that should be allowed to access the Signer API must have an entry in this file, prefixed with the `[[modules]]` line. Each one must have the following fields: - -- `module_name`: The unique name (preferably human-readable) to assign to the module. This is primarily used as an identifier for it in things like logging messages. -- `jwt_secret`: The unique secret string that the module must provide in its JWT header for authentication. The module must have this same secret embedded in its own configuration. If using Commit Boost's Docker Compose generator, the generated container files will provide this value to each respective module's Docker container as the `CB_SIGNER_JWT` environment variable. -- `signing_id`: A 32-byte hex string unique to the module that will be used by the Signer service during signing requests from the module to generate signatures that are unique to the requesting module. This should *not* change and will typically come from the documentation provided by the module authors. If using Commit Boost's Docker Compose generator, it will *not* be provided to the module in the environment variable; the module's own code must have this value built into it ahead of time. See the [Module Signing ID](../developing/commit-module.md#module-signing-id) section below for more details. - -This file can be named anything, as long as it ends with a `.toml` extension, and saved anywhere accessible by the Commit Boost CLI and Docker daemon (if using the Docker Compose generator and Docker mode). The location for the file must be specified in the `CB_SIGNER_JWT_CONFIG_FILE` environment variable of the machine (or container) running the Signer service. - - ## Vouch [Vouch](https://github.com/attestantio/vouch) is a multi-node validator client built by [Attestant](https://www.attestant.io/). Vouch is particular in that it also integrates an MEV-Boost client to interact with relays. The Commit-Boost PBS module is compatible with the Vouch `blockrelay` since it implements the same Builder-API as relays. For example, depending on your setup and preference, you may want to fetch headers from a given relay using Commit-Boost vs using the built-in Vouch `blockrelay`. From 64b49f2382c8efe98dee3df880da7f9256b18a18 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 1 Jul 2025 00:23:31 -0400 Subject: [PATCH 47/67] Overhauled the signing_id setup to be directly in the signed struct --- crates/common/src/commit/request.rs | 1 + crates/common/src/config/signer.rs | 2 +- crates/common/src/pbs/types/get_header.rs | 1 + crates/common/src/signature.rs | 49 ++++++++---- crates/common/src/signer/schemes/bls.rs | 20 ++++- crates/common/src/signer/schemes/ecdsa.rs | 52 +++++++++++-- crates/common/src/signer/store.rs | 8 +- crates/common/src/types.rs | 16 ++++ crates/pbs/src/mev_boost/get_header.rs | 1 + crates/signer/src/manager/local.rs | 92 ++++++++++++++++++++--- crates/signer/src/service.rs | 6 +- docs/docs/developing/preconf-signing.md | 59 +++++++++++++++ docs/docs/get_started/configuration.md | 2 +- 13 files changed, 264 insertions(+), 45 deletions(-) diff --git a/crates/common/src/commit/request.rs b/crates/common/src/commit/request.rs index b8843234..d9286868 100644 --- a/crates/common/src/commit/request.rs +++ b/crates/common/src/commit/request.rs @@ -57,6 +57,7 @@ impl SignedProxyDelegation { &self.message.delegator, &self.message, &self.signature, + None, COMMIT_BOOST_DOMAIN, ) } diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index c94e07ca..f2623acf 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -334,7 +334,7 @@ pub fn load_module_signing_configs( ) } None => { - seen_signing_ids.insert(module_signing_config.signing_id.clone(), &module.id); + seen_signing_ids.insert(module_signing_config.signing_id, &module.id); signing_id } }; diff --git a/crates/common/src/pbs/types/get_header.rs b/crates/common/src/pbs/types/get_header.rs index 954aca66..7b378674 100644 --- a/crates/common/src/pbs/types/get_header.rs +++ b/crates/common/src/pbs/types/get_header.rs @@ -179,6 +179,7 @@ mod tests { &parsed.message.pubkey.into(), &parsed.message, &parsed.signature, + None, APPLICATION_BUILDER_DOMAIN ) .is_ok()) diff --git a/crates/common/src/signature.rs b/crates/common/src/signature.rs index e51e2291..5ed065ac 100644 --- a/crates/common/src/signature.rs +++ b/crates/common/src/signature.rs @@ -1,4 +1,7 @@ -use alloy::rpc::types::beacon::{constants::BLS_DST_SIG, BlsPublicKey, BlsSignature}; +use alloy::{ + primitives::B256, + rpc::types::beacon::{constants::BLS_DST_SIG, BlsPublicKey, BlsSignature}, +}; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; @@ -6,7 +9,7 @@ use crate::{ constants::{COMMIT_BOOST_DOMAIN, GENESIS_VALIDATORS_ROOT}, error::BlstErrorWrapper, signer::{verify_bls_signature, BlsSecretKey}, - types::Chain, + types::{self, Chain}, }; pub fn sign_message(secret_key: &BlsSecretKey, msg: &[u8]) -> BlsSignature { @@ -14,14 +17,7 @@ pub fn sign_message(secret_key: &BlsSecretKey, msg: &[u8]) -> BlsSignature { BlsSignature::from_slice(&signature) } -pub fn compute_signing_root(object_root: [u8; 32], signing_domain: [u8; 32]) -> [u8; 32] { - #[derive(Default, Debug, TreeHash)] - struct SigningData { - object_root: [u8; 32], - signing_domain: [u8; 32], - } - - let signing_data = SigningData { object_root, signing_domain }; +pub fn compute_signing_root(signing_data: &T) -> [u8; 32] { signing_data.tree_hash_root().0 } @@ -52,14 +48,25 @@ pub fn verify_signed_message( pubkey: &BlsPublicKey, msg: &T, signature: &BlsSignature, + module_signing_id: Option<&B256>, domain_mask: [u8; 4], ) -> Result<(), BlstErrorWrapper> { let domain = compute_domain(chain, domain_mask); - let signing_root = compute_signing_root(msg.tree_hash_root().0, domain); - + let signing_root = match module_signing_id { + Some(id) => compute_signing_root(&types::PropCommitSigningData { + object_root: msg.tree_hash_root().0, + module_signing_id: id.0, + signing_domain: domain, + }), + None => compute_signing_root(&types::BeaconSigningData { + object_root: msg.tree_hash_root().0, + signing_domain: domain, + }), + }; verify_bls_signature(pubkey, &signing_root, signature) } +/// Signs a message with the Beacon builder domain. pub fn sign_builder_message( chain: Chain, secret_key: &BlsSecretKey, @@ -74,7 +81,11 @@ pub fn sign_builder_root( object_root: [u8; 32], ) -> BlsSignature { let domain = chain.builder_domain(); - let signing_root = compute_signing_root(object_root, domain); + let signing_data = types::BeaconSigningData { + object_root: object_root.tree_hash_root().0, + signing_domain: domain, + }; + let signing_root = compute_signing_root(&signing_data); sign_message(secret_key, &signing_root) } @@ -82,9 +93,19 @@ pub fn sign_commit_boost_root( chain: Chain, secret_key: &BlsSecretKey, object_root: [u8; 32], + module_signing_id: Option<[u8; 32]>, ) -> BlsSignature { let domain = compute_domain(chain, COMMIT_BOOST_DOMAIN); - let signing_root = compute_signing_root(object_root, domain); + let signing_root = match module_signing_id { + Some(id) => compute_signing_root(&types::PropCommitSigningData { + object_root, + module_signing_id: id, + signing_domain: domain, + }), + None => { + compute_signing_root(&types::BeaconSigningData { object_root, signing_domain: domain }) + } + }; sign_message(secret_key, &signing_root) } diff --git a/crates/common/src/signer/schemes/bls.rs b/crates/common/src/signer/schemes/bls.rs index f133b2bc..f3a511e7 100644 --- a/crates/common/src/signer/schemes/bls.rs +++ b/crates/common/src/signer/schemes/bls.rs @@ -38,14 +38,26 @@ impl BlsSigner { } } - pub async fn sign(&self, chain: Chain, object_root: [u8; 32]) -> BlsSignature { + pub async fn sign( + &self, + chain: Chain, + object_root: [u8; 32], + module_signing_id: Option<[u8; 32]>, + ) -> BlsSignature { match self { - BlsSigner::Local(sk) => sign_commit_boost_root(chain, sk, object_root), + BlsSigner::Local(sk) => { + sign_commit_boost_root(chain, sk, object_root, module_signing_id) + } } } - pub async fn sign_msg(&self, chain: Chain, msg: &impl TreeHash) -> BlsSignature { - self.sign(chain, msg.tree_hash_root().0).await + pub async fn sign_msg( + &self, + chain: Chain, + msg: &impl TreeHash, + module_signing_id: Option<[u8; 32]>, + ) -> BlsSignature { + self.sign(chain, msg.tree_hash_root().0, module_signing_id).await } } diff --git a/crates/common/src/signer/schemes/ecdsa.rs b/crates/common/src/signer/schemes/ecdsa.rs index 612df5e3..2a33001c 100644 --- a/crates/common/src/signer/schemes/ecdsa.rs +++ b/crates/common/src/signer/schemes/ecdsa.rs @@ -10,7 +10,7 @@ use tree_hash::TreeHash; use crate::{ constants::COMMIT_BOOST_DOMAIN, signature::{compute_domain, compute_signing_root}, - types::Chain, + types::{self, Chain}, }; #[derive(Debug, Clone, PartialEq, Eq, Hash)] @@ -87,22 +87,37 @@ impl EcdsaSigner { &self, chain: Chain, object_root: [u8; 32], + module_signing_id: Option<[u8; 32]>, ) -> Result { match self { EcdsaSigner::Local(sk) => { let domain = compute_domain(chain, COMMIT_BOOST_DOMAIN); - let signing_root = compute_signing_root(object_root, domain).into(); + let signing_root = match module_signing_id { + Some(id) => { + let signing_data = types::PropCommitSigningData { + object_root, + module_signing_id: id, + signing_domain: domain, + }; + compute_signing_root(&signing_data).into() + } + None => { + let signing_data = + types::BeaconSigningData { object_root, signing_domain: domain }; + compute_signing_root(&signing_data).into() + } + }; sk.sign_hash_sync(&signing_root).map(EcdsaSignature::from) } } } - pub async fn sign_msg( &self, chain: Chain, msg: &impl TreeHash, + module_signing_id: Option<[u8; 32]>, ) -> Result { - self.sign(chain, msg.tree_hash_root().0).await + self.sign(chain, msg.tree_hash_root().0, module_signing_id).await } } @@ -124,15 +139,38 @@ mod test { use super::*; #[tokio::test] - async fn test_ecdsa_signer() { + async fn test_ecdsa_signer_noncommit() { + let pk = bytes!("88bcd6672d95bcba0d52a3146494ed4d37675af4ed2206905eb161aa99a6c0d1"); + let signer = EcdsaSigner::new_from_bytes(&pk).unwrap(); + + let object_root = [1; 32]; + let signature = signer.sign(Chain::Holesky, object_root, None).await.unwrap(); + + let domain = compute_domain(Chain::Holesky, COMMIT_BOOST_DOMAIN); + let signing_data = types::BeaconSigningData { object_root, signing_domain: domain }; + let msg = compute_signing_root(&signing_data); + + assert_eq!(msg, hex!("219ca7a673b2cbbf67bec6c9f60f78bd051336d57b68d1540190f30667e86725")); + + let address = signer.address(); + let verified = verify_ecdsa_signature(&address, &msg, &signature); + assert!(verified.is_ok()); + } + + #[tokio::test] + async fn test_ecdsa_signer_prop_commit() { let pk = bytes!("88bcd6672d95bcba0d52a3146494ed4d37675af4ed2206905eb161aa99a6c0d1"); let signer = EcdsaSigner::new_from_bytes(&pk).unwrap(); let object_root = [1; 32]; - let signature = signer.sign(Chain::Holesky, object_root).await.unwrap(); + let module_signing_id = [2; 32]; + let signature = + signer.sign(Chain::Holesky, object_root, Some(module_signing_id)).await.unwrap(); let domain = compute_domain(Chain::Holesky, COMMIT_BOOST_DOMAIN); - let msg = compute_signing_root(object_root, domain); + let signing_data = + types::PropCommitSigningData { object_root, module_signing_id, signing_domain: domain }; + let msg = compute_signing_root(&signing_data); assert_eq!(msg, hex!("219ca7a673b2cbbf67bec6c9f60f78bd051336d57b68d1540190f30667e86725")); diff --git a/crates/common/src/signer/store.rs b/crates/common/src/signer/store.rs index bd23c120..9e251dd9 100644 --- a/crates/common/src/signer/store.rs +++ b/crates/common/src/signer/store.rs @@ -532,7 +532,8 @@ mod test { delegator: consensus_signer.pubkey(), proxy: proxy_signer.pubkey(), }; - let signature = consensus_signer.sign(Chain::Mainnet, message.tree_hash_root().0).await; + let signature = + consensus_signer.sign(Chain::Mainnet, message.tree_hash_root().0, None).await; let delegation = SignedProxyDelegationBls { signature, message }; let proxy_signer = BlsProxySigner { signer: proxy_signer, delegation }; @@ -645,7 +646,8 @@ mod test { delegator: consensus_signer.pubkey(), proxy: proxy_signer.pubkey(), }; - let signature = consensus_signer.sign(Chain::Mainnet, message.tree_hash_root().0).await; + let signature = + consensus_signer.sign(Chain::Mainnet, message.tree_hash_root().0, None).await; let delegation = SignedProxyDelegationBls { signature, message }; let proxy_signer = BlsProxySigner { signer: proxy_signer, delegation }; @@ -674,7 +676,7 @@ mod test { .join(consensus_signer.pubkey().to_string()) .join("TEST_MODULE") .join("bls") - .join(format!("{}.sig", proxy_signer.pubkey().to_string())) + .join(format!("{}.sig", proxy_signer.pubkey())) ) .unwrap() ) diff --git a/crates/common/src/types.rs b/crates/common/src/types.rs index 5293a789..5d73d663 100644 --- a/crates/common/src/types.rs +++ b/crates/common/src/types.rs @@ -4,6 +4,7 @@ use alloy::primitives::{hex, Bytes}; use derive_more::{Deref, Display, From, Into}; use eyre::{bail, Context}; use serde::{Deserialize, Serialize}; +use tree_hash_derive::TreeHash; use crate::{constants::APPLICATION_BUILDER_DOMAIN, signature::compute_domain}; @@ -283,6 +284,21 @@ impl<'de> Deserialize<'de> for Chain { } } +/// Structure for signatures used in Beacon chain operations +#[derive(Default, Debug, TreeHash)] +pub struct BeaconSigningData { + pub object_root: [u8; 32], + pub signing_domain: [u8; 32], +} + +/// Structure for signatures used for proposer commitments in Commit Boost +#[derive(Default, Debug, TreeHash)] +pub struct PropCommitSigningData { + pub object_root: [u8; 32], + pub module_signing_id: [u8; 32], + pub signing_domain: [u8; 32], +} + /// Returns seconds_per_slot and genesis_fork_version from a spec, such as /// returned by /eth/v1/config/spec ref: https://ethereum.github.io/beacon-APIs/#/Config/getSpec /// Try to load two formats: diff --git a/crates/pbs/src/mev_boost/get_header.rs b/crates/pbs/src/mev_boost/get_header.rs index e4922245..b0ede1ab 100644 --- a/crates/pbs/src/mev_boost/get_header.rs +++ b/crates/pbs/src/mev_boost/get_header.rs @@ -498,6 +498,7 @@ fn validate_signature( &received_relay_pubkey, &message, signature, + None, APPLICATION_BUILDER_DOMAIN, ) .map_err(ValidationError::Sigverify)?; diff --git a/crates/signer/src/manager/local.rs b/crates/signer/src/manager/local.rs index 6d9e35fe..9a423d78 100644 --- a/crates/signer/src/manager/local.rs +++ b/crates/signer/src/manager/local.rs @@ -1,6 +1,9 @@ use std::collections::HashMap; -use alloy::{primitives::Address, rpc::types::beacon::BlsSignature}; +use alloy::{ + primitives::{Address, B256}, + rpc::types::beacon::BlsSignature, +}; use cb_common::{ commit::request::{ ConsensusProxyMap, ProxyDelegationBls, ProxyDelegationEcdsa, SignedProxyDelegationBls, @@ -95,7 +98,7 @@ impl LocalSigningManager { let proxy_pubkey = signer.pubkey(); let message = ProxyDelegationBls { delegator, proxy: proxy_pubkey }; - let signature = self.sign_consensus(&delegator, &message.tree_hash_root().0).await?; + let signature = self.sign_consensus(&delegator, &message.tree_hash_root().0, None).await?; let delegation = SignedProxyDelegationBls { signature, message }; let proxy_signer = BlsProxySigner { signer, delegation }; @@ -114,7 +117,7 @@ impl LocalSigningManager { let proxy_address = signer.address(); let message = ProxyDelegationEcdsa { delegator, proxy: proxy_address }; - let signature = self.sign_consensus(&delegator, &message.tree_hash_root().0).await?; + let signature = self.sign_consensus(&delegator, &message.tree_hash_root().0, None).await?; let delegation = SignedProxyDelegationEcdsa { signature, message }; let proxy_signer = EcdsaProxySigner { signer, delegation }; @@ -130,12 +133,16 @@ impl LocalSigningManager { &self, pubkey: &BlsPublicKey, object_root: &[u8; 32], + module_signing_id: Option<&B256>, ) -> Result { let signer = self .consensus_signers .get(pubkey) .ok_or(SignerModuleError::UnknownConsensusSigner(pubkey.to_vec()))?; - let signature = signer.sign(self.chain, *object_root).await; + let signature = match module_signing_id { + Some(id) => signer.sign(self.chain, *object_root, Some(id.0)).await, + None => signer.sign(self.chain, *object_root, None).await, + }; Ok(signature) } @@ -144,13 +151,17 @@ impl LocalSigningManager { &self, pubkey: &BlsPublicKey, object_root: &[u8; 32], + module_signing_id: Option<&B256>, ) -> Result { let bls_proxy = self .proxy_signers .bls_signers .get(pubkey) .ok_or(SignerModuleError::UnknownProxySigner(pubkey.to_vec()))?; - let signature = bls_proxy.sign(self.chain, *object_root).await; + let signature = match module_signing_id { + Some(id) => bls_proxy.sign(self.chain, *object_root, Some(id.0)).await, + None => bls_proxy.sign(self.chain, *object_root, None).await, + }; Ok(signature) } @@ -158,13 +169,17 @@ impl LocalSigningManager { &self, address: &Address, object_root: &[u8; 32], + module_signing_id: Option<&B256>, ) -> Result { let ecdsa_proxy = self .proxy_signers .ecdsa_signers .get(address) .ok_or(SignerModuleError::UnknownProxySigner(address.to_vec()))?; - let signature = ecdsa_proxy.sign(self.chain, *object_root).await?; + let signature = match module_signing_id { + Some(id) => ecdsa_proxy.sign(self.chain, *object_root, Some(id.0)).await?, + None => ecdsa_proxy.sign(self.chain, *object_root, None).await?, + }; Ok(signature) } @@ -287,9 +302,48 @@ mod tests { (signing_manager, consensus_pk) } + mod test_bls { + use cb_common::{ + constants::COMMIT_BOOST_DOMAIN, signature::compute_domain, + signer::verify_bls_signature, types, + }; + + use super::*; + + #[tokio::test] + async fn test_key_signs_message() { + let (signing_manager, consensus_pk) = init_signing_manager(); + + let data_root = B256::random(); + let module_signing_id = B256::random(); + + let sig = signing_manager + .sign_consensus( + &consensus_pk.try_into().unwrap(), + &data_root, + Some(&module_signing_id), + ) + .await + .unwrap(); + + // Verify signature + let domain = compute_domain(CHAIN, COMMIT_BOOST_DOMAIN); + let signing_root = compute_signing_root(&types::PropCommitSigningData { + object_root: data_root.tree_hash_root().0, + signing_domain: domain, + module_signing_id: module_signing_id.0, + }); + + let validation_result = verify_bls_signature(&consensus_pk, &signing_root, &sig); + + assert!(validation_result.is_ok(), "Keypair must produce valid signatures of messages.") + } + } + mod test_proxy_bls { use cb_common::{ - constants::COMMIT_BOOST_DOMAIN, signature::compute_domain, signer::verify_bls_signature, + constants::COMMIT_BOOST_DOMAIN, signature::compute_domain, + signer::verify_bls_signature, types, }; use super::*; @@ -345,15 +399,20 @@ mod tests { let proxy_pk = signed_delegation.message.proxy; let data_root = B256::random(); + let module_signing_id = B256::random(); let sig = signing_manager - .sign_proxy_bls(&proxy_pk.try_into().unwrap(), &data_root) + .sign_proxy_bls(&proxy_pk.try_into().unwrap(), &data_root, Some(&module_signing_id)) .await .unwrap(); // Verify signature let domain = compute_domain(CHAIN, COMMIT_BOOST_DOMAIN); - let signing_root = compute_signing_root(data_root.tree_hash_root().0, domain); + let signing_root = compute_signing_root(&types::PropCommitSigningData { + object_root: data_root.tree_hash_root().0, + signing_domain: domain, + module_signing_id: module_signing_id.0, + }); let validation_result = verify_bls_signature(&proxy_pk, &signing_root, &sig); @@ -367,7 +426,7 @@ mod tests { mod test_proxy_ecdsa { use cb_common::{ constants::COMMIT_BOOST_DOMAIN, signature::compute_domain, - signer::verify_ecdsa_signature, + signer::verify_ecdsa_signature, types, }; use super::*; @@ -423,15 +482,24 @@ mod tests { let proxy_pk = signed_delegation.message.proxy; let data_root = B256::random(); + let module_signing_id = B256::random(); let sig = signing_manager - .sign_proxy_ecdsa(&proxy_pk.try_into().unwrap(), &data_root) + .sign_proxy_ecdsa( + &proxy_pk.try_into().unwrap(), + &data_root, + Some(&module_signing_id), + ) .await .unwrap(); // Verify signature let domain = compute_domain(CHAIN, COMMIT_BOOST_DOMAIN); - let signing_root = compute_signing_root(data_root.tree_hash_root().0, domain); + let signing_root = compute_signing_root(&types::PropCommitSigningData { + object_root: data_root.tree_hash_root().0, + signing_domain: domain, + module_signing_id: module_signing_id.0, + }); let validation_result = verify_ecdsa_signature(&proxy_pk, &signing_root, &sig); diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index 2422df31..3a169512 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -288,21 +288,21 @@ async fn handle_request_signature( let hash = state.hasher.hash(object_root, signing_id); info!("Signing hash: {hash:?}"); local_manager - .sign_consensus(pubkey, &hash) + .sign_consensus(pubkey, &hash, Some(signing_id)) .await .map(|sig| Json(sig).into_response()) } SignRequest::ProxyBls(SignProxyRequest { ref object_root, proxy: ref bls_key }) => { let hash = state.hasher.hash(object_root, signing_id); local_manager - .sign_proxy_bls(bls_key, &hash) + .sign_proxy_bls(bls_key, &hash, Some(signing_id)) .await .map(|sig| Json(sig).into_response()) } SignRequest::ProxyEcdsa(SignProxyRequest { ref object_root, proxy: ref ecdsa_key }) => { let hash = state.hasher.hash(object_root, signing_id); local_manager - .sign_proxy_ecdsa(ecdsa_key, &hash) + .sign_proxy_ecdsa(ecdsa_key, &hash, Some(signing_id)) .await .map(|sig| Json(sig).into_response()) } diff --git a/docs/docs/developing/preconf-signing.md b/docs/docs/developing/preconf-signing.md index e69de29b..746fd434 100644 --- a/docs/docs/developing/preconf-signing.md +++ b/docs/docs/developing/preconf-signing.md @@ -0,0 +1,59 @@ +# Requesting Preconfirmation Signatures with Commit Boost + +When you create a new validator on the Ethereum network, one of the steps is the generation of a new BLS private key (commonly known as the "validator key" or the "signer key") and its corresponding BLS public key (the "validator pubkey", used as an identifier). Typically this private key will be used by an Ethereum consensus client to sign things such as attestations and blocks for publication on the Beacon chain. These signatures prove that you, as the owner of that private key, approve of the data being signed. However, as general-purpose private keys, they can also be used to sign *other* arbitrary messages not destined for the Beacon chain. + +Commit Boost takes advantage of this by offering a standard known as **preconfirmations**. These are arbitrary messages (albeit with some important rules), similar to the kind used on the Beacon chain, that have been signed by one of the owner's private keys. Modules interested in leveraging Commit Boost's preconfirmations can construct their own data in whatever format they like and request that Commit Boost's **signer service** generate a signature for it with a particular private key. The module can then use that signature to verify the data was signed by that user. + +Commit Boost supports preconfirmation signatures for both BLS private keys (identified by their public key) and ECDSA private keys (identified by their Ethereum address). + + +## Rules of Preconfirmation Signatures + +Preconfirmation signatures produced by Commit Boost's signer service conform to the following rules: + +- Signatures are **unique** to a given EVM chain (identified by its [chain ID](https://chainlist.org/)). Signatures generated for one chain will not work on a different chain. +- Signatures are **unique** to Commit Boost preconfirmations. The signer service **cannot** be used to create signatures that could be used for other applications, such as for attestations on the Beacon chain. While the signer service has access to the same validator private keys used to attest on the Beacon chain, it cannot create signatures that would get you slashed on the Beacon chain. +- Signatures are **unique** to a particular module. One module cannot, for example, request an identical payload as another module and effectively "forge" a signature for the second module; identical payloads from two separate modules will result in two separate signatures. +- The data payload being signed must be a **32-byte array**, typically serializd as a 64-character hex string with an optional `0x` prefix. The value itself is arbitrary, as long as it has meaning to the requester - though it is typically the 256-bit hash of some kind of data. +- If requesting a signature from a BLS key, the resulting signature will be a standard BLS signature (96 bytes in length). +- If requesting a signature from an ECDSA key, the resulting signature will be a standard Ethereum RSV signature (65 bytes in length). + + +## Configuring a Module for Preconfirmations + +Commit Boost's signer service must be configured prior to launching to expect requests from your module. There are two main parts: + +1. An entry for your module into [Commit Boost's configuration file](../get_started/configuration.md#custom-module). This must include a unique ID for your module, the line `type = "commit"`, and include a unique [signing ID](#the-signing-id) for your module. Generally you should provide values for these in your documentation, so your users can reference it when configuring their own Commit Boost node. + +2. A JWT secret used by your module to authenticate with the signer in HTTP requests. *{Placeholder for more details on setting this here}* + +Once the user has configured both Commit Boost and your module with these settings, your module will be able to authenticate with the signer service and request signatures. + + +## The Signing ID + +Your module's signing ID is a 32-byte value that is used as a unique identifier within the signing process. Preconfirmation signatures incorporate this value along with the data being signed as a way to create signatures that are exclusive to your module, so other modules can't maliciously construct signatures that appear to be from your module. Your module must have this ID incorporated into itself ahead of time, and the user must include this same ID within their Commit Boost configuration file section for your module. Commit Boost does not maintain a global registry of signing IDs, so this is a value you should provide to your users in your documentation. + +The Signing ID is decoupled from your module's human-readable name (the `module_id` field in the Commit Boost configuration file) so that any changes to your module name will not invalidate signatures from previous versions. Similarly, if you don't change the module ID but *want* to invalidate previous signatures, you can modify the signing ID and it will do so. Just ensure your users are made aware of the change, so they can update it in their Commit Boost configuration files accordingly. + + +## Structure of a Signature + +The form preconfirmation signatures take depends on the type of signature being requested. + + +### BLS Signatures + +Signatures requested from BLS keys take the standard form (96-byte values). Generating them is done by constructing a 32-byte signing root from the hash of an SSZ Merkle tree that , which is typical of BLS signatures used by the Beacon chain: + + + + + + + + + + + +## Requesting a Signature from the Signer diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index bd438945..5f8bfd42 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -356,7 +356,7 @@ docker_image = "test_builder_log" A few things to note: - We now added a `signer` section which will be used to create the Signer module. -- There is now a `[[modules]]` section which at a minimum needs to specify the module `id`, `type` and `docker_image`. Additional parameters needed for the business logic of the module will also be here, +- There is now a `[[modules]]` section which at a minimum needs to specify the module `id`, `type` and `docker_image`. For modules with type `commit`, which will be used to access the Signer service and request signatures for preconfs, you will also need to specify the module's unique `signing_id` (see ). Additional parameters needed for the business logic of the module will also be here. To learn more about developing modules, check out [here](/category/developing). From 8b65b1e0492591eda9c44a3322eb83d6661baf6a Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 1 Jul 2025 02:21:49 -0400 Subject: [PATCH 48/67] Made proposer commitments nested Merkle trees to allow Dirk support --- crates/common/src/signature.rs | 28 ++++++------ crates/common/src/signer/schemes/ecdsa.rs | 29 +++++++----- crates/common/src/types.rs | 11 ++--- crates/signer/src/hasher/keccak.rs | 26 ----------- crates/signer/src/hasher/mod.rs | 11 ----- crates/signer/src/lib.rs | 1 - crates/signer/src/manager/dirk.rs | 43 +++++++++++++----- crates/signer/src/manager/local.rs | 24 ++++++---- crates/signer/src/service.rs | 54 +++++++++-------------- tests/tests/signer_request_sig.rs | 4 +- 10 files changed, 108 insertions(+), 123 deletions(-) delete mode 100644 crates/signer/src/hasher/keccak.rs delete mode 100644 crates/signer/src/hasher/mod.rs diff --git a/crates/common/src/signature.rs b/crates/common/src/signature.rs index 5ed065ac..cace9570 100644 --- a/crates/common/src/signature.rs +++ b/crates/common/src/signature.rs @@ -53,12 +53,14 @@ pub fn verify_signed_message( ) -> Result<(), BlstErrorWrapper> { let domain = compute_domain(chain, domain_mask); let signing_root = match module_signing_id { - Some(id) => compute_signing_root(&types::PropCommitSigningData { - object_root: msg.tree_hash_root().0, - module_signing_id: id.0, + Some(id) => compute_signing_root(&types::SigningData { + object_root: compute_signing_root(&types::PropCommitSigningInfo { + data: msg.tree_hash_root().0, + module_signing_id: id.0, + }), signing_domain: domain, }), - None => compute_signing_root(&types::BeaconSigningData { + None => compute_signing_root(&types::SigningData { object_root: msg.tree_hash_root().0, signing_domain: domain, }), @@ -81,10 +83,8 @@ pub fn sign_builder_root( object_root: [u8; 32], ) -> BlsSignature { let domain = chain.builder_domain(); - let signing_data = types::BeaconSigningData { - object_root: object_root.tree_hash_root().0, - signing_domain: domain, - }; + let signing_data = + types::SigningData { object_root: object_root.tree_hash_root().0, signing_domain: domain }; let signing_root = compute_signing_root(&signing_data); sign_message(secret_key, &signing_root) } @@ -97,14 +97,14 @@ pub fn sign_commit_boost_root( ) -> BlsSignature { let domain = compute_domain(chain, COMMIT_BOOST_DOMAIN); let signing_root = match module_signing_id { - Some(id) => compute_signing_root(&types::PropCommitSigningData { - object_root, - module_signing_id: id, + Some(id) => compute_signing_root(&types::SigningData { + object_root: compute_signing_root(&types::PropCommitSigningInfo { + data: object_root, + module_signing_id: id, + }), signing_domain: domain, }), - None => { - compute_signing_root(&types::BeaconSigningData { object_root, signing_domain: domain }) - } + None => compute_signing_root(&types::SigningData { object_root, signing_domain: domain }), }; sign_message(secret_key, &signing_root) } diff --git a/crates/common/src/signer/schemes/ecdsa.rs b/crates/common/src/signer/schemes/ecdsa.rs index 2a33001c..73bf7272 100644 --- a/crates/common/src/signer/schemes/ecdsa.rs +++ b/crates/common/src/signer/schemes/ecdsa.rs @@ -94,16 +94,18 @@ impl EcdsaSigner { let domain = compute_domain(chain, COMMIT_BOOST_DOMAIN); let signing_root = match module_signing_id { Some(id) => { - let signing_data = types::PropCommitSigningData { - object_root, - module_signing_id: id, + let signing_data = types::SigningData { + object_root: compute_signing_root(&types::PropCommitSigningInfo { + data: object_root, + module_signing_id: id, + }), signing_domain: domain, }; compute_signing_root(&signing_data).into() } None => { let signing_data = - types::BeaconSigningData { object_root, signing_domain: domain }; + types::SigningData { object_root, signing_domain: domain }; compute_signing_root(&signing_data).into() } }; @@ -147,7 +149,7 @@ mod test { let signature = signer.sign(Chain::Holesky, object_root, None).await.unwrap(); let domain = compute_domain(Chain::Holesky, COMMIT_BOOST_DOMAIN); - let signing_data = types::BeaconSigningData { object_root, signing_domain: domain }; + let signing_data = types::SigningData { object_root, signing_domain: domain }; let msg = compute_signing_root(&signing_data); assert_eq!(msg, hex!("219ca7a673b2cbbf67bec6c9f60f78bd051336d57b68d1540190f30667e86725")); @@ -165,14 +167,19 @@ mod test { let object_root = [1; 32]; let module_signing_id = [2; 32]; let signature = - signer.sign(Chain::Holesky, object_root, Some(module_signing_id)).await.unwrap(); - - let domain = compute_domain(Chain::Holesky, COMMIT_BOOST_DOMAIN); - let signing_data = - types::PropCommitSigningData { object_root, module_signing_id, signing_domain: domain }; + signer.sign(Chain::Hoodi, object_root, Some(module_signing_id)).await.unwrap(); + + let domain = compute_domain(Chain::Hoodi, COMMIT_BOOST_DOMAIN); + let signing_data = types::SigningData { + object_root: compute_signing_root(&types::PropCommitSigningInfo { + data: object_root, + module_signing_id, + }), + signing_domain: domain, + }; let msg = compute_signing_root(&signing_data); - assert_eq!(msg, hex!("219ca7a673b2cbbf67bec6c9f60f78bd051336d57b68d1540190f30667e86725")); + assert_eq!(msg, hex!("8cd49ccf2f9b0297796ff96ce5f7c5d26e20a59d0032ee2ad6249dcd9682b808")); let address = signer.address(); let verified = verify_ecdsa_signature(&address, &msg, &signature); diff --git a/crates/common/src/types.rs b/crates/common/src/types.rs index 5d73d663..a9c8ebfd 100644 --- a/crates/common/src/types.rs +++ b/crates/common/src/types.rs @@ -286,17 +286,18 @@ impl<'de> Deserialize<'de> for Chain { /// Structure for signatures used in Beacon chain operations #[derive(Default, Debug, TreeHash)] -pub struct BeaconSigningData { +pub struct SigningData { pub object_root: [u8; 32], pub signing_domain: [u8; 32], } -/// Structure for signatures used for proposer commitments in Commit Boost +/// Structure for signatures used for proposer commitments in Commit Boost. +/// The signing root of this struct must be used as the object_root of a +/// SigningData for signatures. #[derive(Default, Debug, TreeHash)] -pub struct PropCommitSigningData { - pub object_root: [u8; 32], +pub struct PropCommitSigningInfo { + pub data: [u8; 32], pub module_signing_id: [u8; 32], - pub signing_domain: [u8; 32], } /// Returns seconds_per_slot and genesis_fork_version from a spec, such as diff --git a/crates/signer/src/hasher/keccak.rs b/crates/signer/src/hasher/keccak.rs deleted file mode 100644 index 80a2ff54..00000000 --- a/crates/signer/src/hasher/keccak.rs +++ /dev/null @@ -1,26 +0,0 @@ -use alloy::primitives::{Keccak256, B256}; - -use super::SigningHasher; - -/// A hasher that uses Keccak256 for signing request hashes. -#[derive(Clone)] -pub struct KeccakHasher {} - -impl KeccakHasher { - /// Creates a new KeccakHasher instance. - pub fn new() -> Self { - Self {} - } -} - -impl SigningHasher for KeccakHasher { - /// Hashes an object root from a signing request and the unique signing ID - /// for the requesting module into a hash that can be used to sign the - /// request. - fn hash(&self, object_root: &B256, signing_id: &B256) -> B256 { - let mut hasher = Keccak256::new(); - hasher.update(object_root); - hasher.update(signing_id); - hasher.finalize() - } -} diff --git a/crates/signer/src/hasher/mod.rs b/crates/signer/src/hasher/mod.rs deleted file mode 100644 index d124448a..00000000 --- a/crates/signer/src/hasher/mod.rs +++ /dev/null @@ -1,11 +0,0 @@ -use alloy::primitives::B256; - -pub mod keccak; - -/// A trait for hashers that can provide unique signing hashes for incoming -/// signing requests. -pub trait SigningHasher: Clone { - /// Hashes an object root from a signing request and the unique signing ID - /// for the requesting module. - fn hash(&self, object_root: &B256, signing_id: &B256) -> B256; -} diff --git a/crates/signer/src/lib.rs b/crates/signer/src/lib.rs index cfe48407..4b5e1451 100644 --- a/crates/signer/src/lib.rs +++ b/crates/signer/src/lib.rs @@ -1,6 +1,5 @@ mod constants; pub mod error; -pub mod hasher; pub mod manager; mod metrics; mod proto; diff --git a/crates/signer/src/manager/dirk.rs b/crates/signer/src/manager/dirk.rs index 760e6640..08c73def 100644 --- a/crates/signer/src/manager/dirk.rs +++ b/crates/signer/src/manager/dirk.rs @@ -1,14 +1,14 @@ use std::{collections::HashMap, io::Write, path::PathBuf}; -use alloy::{hex, rpc::types::beacon::constants::BLS_SIGNATURE_BYTES_LEN}; +use alloy::{hex, primitives::B256, rpc::types::beacon::constants::BLS_SIGNATURE_BYTES_LEN}; use blsful::inner_types::{Field, G2Affine, G2Projective, Group, Scalar}; use cb_common::{ commit::request::{ConsensusProxyMap, ProxyDelegation, SignedProxyDelegation}, config::{DirkConfig, DirkHostConfig}, constants::COMMIT_BOOST_DOMAIN, - signature::compute_domain, + signature::{compute_domain, compute_signing_root}, signer::{BlsPublicKey, BlsSignature, ProxyStore}, - types::{Chain, ModuleId}, + types::{self, Chain, ModuleId}, }; use eyre::{bail, OptionExt}; use futures::{future::join_all, stream::FuturesUnordered, FutureExt, StreamExt}; @@ -193,13 +193,14 @@ impl DirkManager { &self, pubkey: &BlsPublicKey, object_root: &[u8; 32], + module_signing_id: Option<&B256>, ) -> Result { match self.consensus_accounts.get(pubkey) { Some(Account::Simple(account)) => { - self.request_simple_signature(account, object_root).await + self.request_simple_signature(account, object_root, module_signing_id).await } Some(Account::Distributed(account)) => { - self.request_distributed_signature(account, object_root).await + self.request_distributed_signature(account, object_root, module_signing_id).await } None => Err(SignerModuleError::UnknownConsensusSigner(pubkey.to_vec())), } @@ -210,13 +211,14 @@ impl DirkManager { &self, pubkey: &BlsPublicKey, object_root: &[u8; 32], + module_signing_id: Option<&B256>, ) -> Result { match self.proxy_accounts.get(pubkey) { Some(ProxyAccount { inner: Account::Simple(account), .. }) => { - self.request_simple_signature(account, object_root).await + self.request_simple_signature(account, object_root, module_signing_id).await } Some(ProxyAccount { inner: Account::Distributed(account), .. }) => { - self.request_distributed_signature(account, object_root).await + self.request_distributed_signature(account, object_root, module_signing_id).await } None => Err(SignerModuleError::UnknownProxySigner(pubkey.to_vec())), } @@ -227,12 +229,22 @@ impl DirkManager { &self, account: &SimpleAccount, object_root: &[u8; 32], + module_signing_id: Option<&B256>, ) -> Result { let domain = compute_domain(self.chain, COMMIT_BOOST_DOMAIN); + let data = match module_signing_id { + Some(id) => compute_signing_root(&types::PropCommitSigningInfo { + data: *object_root, + module_signing_id: id.0, + }) + .to_vec(), + None => object_root.to_vec(), + }; + let response = SignerClient::new(account.connection.clone()) .sign(SignRequest { - data: object_root.to_vec(), + data, domain: domain.to_vec(), id: Some(sign_request::Id::PublicKey(account.public_key.to_vec())), }) @@ -257,15 +269,26 @@ impl DirkManager { &self, account: &DistributedAccount, object_root: &[u8; 32], + module_signing_id: Option<&B256>, ) -> Result { let mut partials = Vec::with_capacity(account.participants.len()); let mut requests = Vec::with_capacity(account.participants.len()); + let data = match module_signing_id { + Some(id) => compute_signing_root(&types::PropCommitSigningInfo { + data: *object_root, + module_signing_id: id.0, + }) + .to_vec(), + None => object_root.to_vec(), + }; + for (id, channel) in account.participants.iter() { + let data_copy = data.clone(); let request = async move { SignerClient::new(channel.clone()) .sign(SignRequest { - data: object_root.to_vec(), + data: data_copy, domain: compute_domain(self.chain, COMMIT_BOOST_DOMAIN).to_vec(), id: Some(sign_request::Id::Account(account.name.clone())), }) @@ -336,7 +359,7 @@ impl DirkManager { let message = ProxyDelegation { delegator: consensus, proxy: proxy_account.inner.public_key() }; let delegation_signature = - self.request_consensus_signature(&consensus, &message.tree_hash_root().0).await?; + self.request_consensus_signature(&consensus, &message.tree_hash_root().0, None).await?; let delegation = SignedProxyDelegation { message, signature: delegation_signature }; diff --git a/crates/signer/src/manager/local.rs b/crates/signer/src/manager/local.rs index 9a423d78..a242a754 100644 --- a/crates/signer/src/manager/local.rs +++ b/crates/signer/src/manager/local.rs @@ -328,10 +328,12 @@ mod tests { // Verify signature let domain = compute_domain(CHAIN, COMMIT_BOOST_DOMAIN); - let signing_root = compute_signing_root(&types::PropCommitSigningData { - object_root: data_root.tree_hash_root().0, + let signing_root = compute_signing_root(&types::SigningData { + object_root: compute_signing_root(&types::PropCommitSigningInfo { + data: data_root.tree_hash_root().0, + module_signing_id: module_signing_id.0, + }), signing_domain: domain, - module_signing_id: module_signing_id.0, }); let validation_result = verify_bls_signature(&consensus_pk, &signing_root, &sig); @@ -408,10 +410,12 @@ mod tests { // Verify signature let domain = compute_domain(CHAIN, COMMIT_BOOST_DOMAIN); - let signing_root = compute_signing_root(&types::PropCommitSigningData { - object_root: data_root.tree_hash_root().0, + let signing_root = compute_signing_root(&types::SigningData { + object_root: compute_signing_root(&types::PropCommitSigningInfo { + data: data_root.tree_hash_root().0, + module_signing_id: module_signing_id.0, + }), signing_domain: domain, - module_signing_id: module_signing_id.0, }); let validation_result = verify_bls_signature(&proxy_pk, &signing_root, &sig); @@ -495,10 +499,12 @@ mod tests { // Verify signature let domain = compute_domain(CHAIN, COMMIT_BOOST_DOMAIN); - let signing_root = compute_signing_root(&types::PropCommitSigningData { - object_root: data_root.tree_hash_root().0, + let signing_root = compute_signing_root(&types::SigningData { + object_root: compute_signing_root(&types::PropCommitSigningInfo { + data: data_root.tree_hash_root().0, + module_signing_id: module_signing_id.0, + }), signing_domain: domain, - module_signing_id: module_signing_id.0, }); let validation_result = verify_ecdsa_signature(&proxy_pk, &signing_root, &sig); diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index 3a169512..ff570264 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -39,7 +39,6 @@ use uuid::Uuid; use crate::{ error::SignerModuleError, - hasher::{keccak::KeccakHasher, SigningHasher}, manager::{dirk::DirkManager, local::LocalSigningManager, SigningManager}, metrics::{uri_to_tag, SIGNER_METRICS_REGISTRY, SIGNER_STATUS}, }; @@ -57,16 +56,10 @@ struct JwtAuthFailureInfo { } #[derive(Clone)] -struct SigningState -where - H: SigningHasher, -{ +struct SigningState { /// Manager handling different signing methods manager: Arc>, - /// Hasher used to create unique hashes for signing requests - hasher: H, - /// Map of modules ids to JWT configurations. This also acts as registry of /// all modules running jwts: Arc>, @@ -91,7 +84,6 @@ impl SigningService { let state = SigningState { manager: Arc::new(RwLock::new(start_manager(config.clone()).await?)), - hasher: KeccakHasher::new(), jwts: config.mod_signing_configs.into(), jwt_auth_failures: Arc::new(RwLock::new(HashMap::new())), jwt_auth_fail_limit: config.jwt_auth_fail_limit, @@ -143,8 +135,8 @@ impl SigningService { } /// Authentication middleware layer -async fn jwt_auth( - State(state): State>, +async fn jwt_auth( + State(state): State, TypedHeader(auth): TypedHeader>, addr: ConnectInfo, mut req: Request, @@ -175,8 +167,8 @@ async fn jwt_auth( /// Checks if the incoming request needs to be rate limited due to previous JWT /// authentication failures -async fn check_jwt_rate_limit( - state: &SigningState, +async fn check_jwt_rate_limit( + state: &SigningState, client_ip: &String, ) -> Result<(), SignerModuleError> { let mut failures = state.jwt_auth_failures.write().await; @@ -212,9 +204,9 @@ async fn check_jwt_rate_limit( } /// Checks if a request can successfully authenticate with the JWT secret -async fn check_jwt_auth( +async fn check_jwt_auth( auth: &Authorization, - state: &SigningState, + state: &SigningState, ) -> Result { let jwt: Jwt = auth.token().to_string().into(); @@ -251,9 +243,9 @@ async fn handle_status() -> Result { } /// Implements get_pubkeys from the Signer API -async fn handle_get_pubkeys( +async fn handle_get_pubkeys( Extension(module_id): Extension, - State(state): State>, + State(state): State, ) -> Result { let req_id = Uuid::new_v4(); @@ -272,9 +264,9 @@ async fn handle_get_pubkeys( } /// Implements request_signature from the Signer API -async fn handle_request_signature( +async fn handle_request_signature( Extension(module_id): Extension, - State(state): State>, + State(state): State, Json(request): Json, ) -> Result { let req_id = Uuid::new_v4(); @@ -285,40 +277,34 @@ async fn handle_request_signature( let res = match &*manager { SigningManager::Local(local_manager) => match request { SignRequest::Consensus(SignConsensusRequest { ref object_root, ref pubkey }) => { - let hash = state.hasher.hash(object_root, signing_id); - info!("Signing hash: {hash:?}"); local_manager - .sign_consensus(pubkey, &hash, Some(signing_id)) + .sign_consensus(pubkey, object_root, Some(signing_id)) .await .map(|sig| Json(sig).into_response()) } SignRequest::ProxyBls(SignProxyRequest { ref object_root, proxy: ref bls_key }) => { - let hash = state.hasher.hash(object_root, signing_id); local_manager - .sign_proxy_bls(bls_key, &hash, Some(signing_id)) + .sign_proxy_bls(bls_key, object_root, Some(signing_id)) .await .map(|sig| Json(sig).into_response()) } SignRequest::ProxyEcdsa(SignProxyRequest { ref object_root, proxy: ref ecdsa_key }) => { - let hash = state.hasher.hash(object_root, signing_id); local_manager - .sign_proxy_ecdsa(ecdsa_key, &hash, Some(signing_id)) + .sign_proxy_ecdsa(ecdsa_key, object_root, Some(signing_id)) .await .map(|sig| Json(sig).into_response()) } }, SigningManager::Dirk(dirk_manager) => match request { SignRequest::Consensus(SignConsensusRequest { ref object_root, ref pubkey }) => { - let hash = state.hasher.hash(object_root, signing_id); dirk_manager - .request_consensus_signature(pubkey, &hash) + .request_consensus_signature(pubkey, object_root, Some(signing_id)) .await .map(|sig| Json(sig).into_response()) } SignRequest::ProxyBls(SignProxyRequest { ref object_root, proxy: ref bls_key }) => { - let hash = state.hasher.hash(object_root, signing_id); dirk_manager - .request_proxy_signature(bls_key, &hash) + .request_proxy_signature(bls_key, object_root, Some(signing_id)) .await .map(|sig| Json(sig).into_response()) } @@ -341,9 +327,9 @@ async fn handle_request_signature( res } -async fn handle_generate_proxy( +async fn handle_generate_proxy( Extension(module_id): Extension, - State(state): State>, + State(state): State, Json(request): Json, ) -> Result { let req_id = Uuid::new_v4(); @@ -381,8 +367,8 @@ async fn handle_generate_proxy( res } -async fn handle_reload( - State(mut state): State>, +async fn handle_reload( + State(mut state): State, ) -> Result { let req_id = Uuid::new_v4(); diff --git a/tests/tests/signer_request_sig.rs b/tests/tests/signer_request_sig.rs index a6ca7be3..1990172e 100644 --- a/tests/tests/signer_request_sig.rs +++ b/tests/tests/signer_request_sig.rs @@ -73,7 +73,7 @@ async fn test_signer_sign_request_good() -> Result<()> { let signature = response.text().await?; assert!(!signature.is_empty(), "Signature should not be empty"); - let expected_signature = "\"0x992e6fc29ba219e6afeceb91df3f58ebaa6c82ea8d00b3f4564a4d47cfd886c076ade87c6df765ba3fdcc5ba71513d8f0f12b17c76e4859126ab902a3ae5e8789eb3c9c49e8e9c5cd70ef0a93c76ca16763a940b991192eaba97dcc8c060ff7a\""; + let expected_signature = "\"0xa43e623f009e615faa3987368f64d6286a4103de70e9a81d82562c50c91eae2d5d6fb9db9fe943aa8ee42fd92d8210c1149f25ed6aa72a557d74a0ed5646fdd0e8255ec58e3e2931695fe913863ba0cdf90d29f651bce0a34169a6f6ce5b3115\""; assert_eq!(signature, expected_signature, "Signature does not match expected value"); Ok(()) @@ -105,7 +105,7 @@ async fn test_signer_sign_request_different_module() -> Result<()> { let signature = response.text().await?; assert!(!signature.is_empty(), "Signature should not be empty"); - let incorrect_signature = "\"0x992e6fc29ba219e6afeceb91df3f58ebaa6c82ea8d00b3f4564a4d47cfd886c076ade87c6df765ba3fdcc5ba71513d8f0f12b17c76e4859126ab902a3ae5e8789eb3c9c49e8e9c5cd70ef0a93c76ca16763a940b991192eaba97dcc8c060ff7a\""; + let incorrect_signature = "\"0xa43e623f009e615faa3987368f64d6286a4103de70e9a81d82562c50c91eae2d5d6fb9db9fe943aa8ee42fd92d8210c1149f25ed6aa72a557d74a0ed5646fdd0e8255ec58e3e2931695fe913863ba0cdf90d29f651bce0a34169a6f6ce5b3115\""; assert_ne!(signature, incorrect_signature, "Signature does not match expected value"); Ok(()) From 996703369296c78611bb661f18649a8fc23f613c Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 1 Jul 2025 17:07:47 -0400 Subject: [PATCH 49/67] Added the signer request guide --- docs/docs/developing/preconf-signing.md | 59 ------- docs/docs/developing/prop-commit-signing.md | 175 ++++++++++++++++++++ docs/docs/res/img/prop_commit_tree.png | Bin 0 -> 57442 bytes 3 files changed, 175 insertions(+), 59 deletions(-) delete mode 100644 docs/docs/developing/preconf-signing.md create mode 100644 docs/docs/developing/prop-commit-signing.md create mode 100644 docs/docs/res/img/prop_commit_tree.png diff --git a/docs/docs/developing/preconf-signing.md b/docs/docs/developing/preconf-signing.md deleted file mode 100644 index 746fd434..00000000 --- a/docs/docs/developing/preconf-signing.md +++ /dev/null @@ -1,59 +0,0 @@ -# Requesting Preconfirmation Signatures with Commit Boost - -When you create a new validator on the Ethereum network, one of the steps is the generation of a new BLS private key (commonly known as the "validator key" or the "signer key") and its corresponding BLS public key (the "validator pubkey", used as an identifier). Typically this private key will be used by an Ethereum consensus client to sign things such as attestations and blocks for publication on the Beacon chain. These signatures prove that you, as the owner of that private key, approve of the data being signed. However, as general-purpose private keys, they can also be used to sign *other* arbitrary messages not destined for the Beacon chain. - -Commit Boost takes advantage of this by offering a standard known as **preconfirmations**. These are arbitrary messages (albeit with some important rules), similar to the kind used on the Beacon chain, that have been signed by one of the owner's private keys. Modules interested in leveraging Commit Boost's preconfirmations can construct their own data in whatever format they like and request that Commit Boost's **signer service** generate a signature for it with a particular private key. The module can then use that signature to verify the data was signed by that user. - -Commit Boost supports preconfirmation signatures for both BLS private keys (identified by their public key) and ECDSA private keys (identified by their Ethereum address). - - -## Rules of Preconfirmation Signatures - -Preconfirmation signatures produced by Commit Boost's signer service conform to the following rules: - -- Signatures are **unique** to a given EVM chain (identified by its [chain ID](https://chainlist.org/)). Signatures generated for one chain will not work on a different chain. -- Signatures are **unique** to Commit Boost preconfirmations. The signer service **cannot** be used to create signatures that could be used for other applications, such as for attestations on the Beacon chain. While the signer service has access to the same validator private keys used to attest on the Beacon chain, it cannot create signatures that would get you slashed on the Beacon chain. -- Signatures are **unique** to a particular module. One module cannot, for example, request an identical payload as another module and effectively "forge" a signature for the second module; identical payloads from two separate modules will result in two separate signatures. -- The data payload being signed must be a **32-byte array**, typically serializd as a 64-character hex string with an optional `0x` prefix. The value itself is arbitrary, as long as it has meaning to the requester - though it is typically the 256-bit hash of some kind of data. -- If requesting a signature from a BLS key, the resulting signature will be a standard BLS signature (96 bytes in length). -- If requesting a signature from an ECDSA key, the resulting signature will be a standard Ethereum RSV signature (65 bytes in length). - - -## Configuring a Module for Preconfirmations - -Commit Boost's signer service must be configured prior to launching to expect requests from your module. There are two main parts: - -1. An entry for your module into [Commit Boost's configuration file](../get_started/configuration.md#custom-module). This must include a unique ID for your module, the line `type = "commit"`, and include a unique [signing ID](#the-signing-id) for your module. Generally you should provide values for these in your documentation, so your users can reference it when configuring their own Commit Boost node. - -2. A JWT secret used by your module to authenticate with the signer in HTTP requests. *{Placeholder for more details on setting this here}* - -Once the user has configured both Commit Boost and your module with these settings, your module will be able to authenticate with the signer service and request signatures. - - -## The Signing ID - -Your module's signing ID is a 32-byte value that is used as a unique identifier within the signing process. Preconfirmation signatures incorporate this value along with the data being signed as a way to create signatures that are exclusive to your module, so other modules can't maliciously construct signatures that appear to be from your module. Your module must have this ID incorporated into itself ahead of time, and the user must include this same ID within their Commit Boost configuration file section for your module. Commit Boost does not maintain a global registry of signing IDs, so this is a value you should provide to your users in your documentation. - -The Signing ID is decoupled from your module's human-readable name (the `module_id` field in the Commit Boost configuration file) so that any changes to your module name will not invalidate signatures from previous versions. Similarly, if you don't change the module ID but *want* to invalidate previous signatures, you can modify the signing ID and it will do so. Just ensure your users are made aware of the change, so they can update it in their Commit Boost configuration files accordingly. - - -## Structure of a Signature - -The form preconfirmation signatures take depends on the type of signature being requested. - - -### BLS Signatures - -Signatures requested from BLS keys take the standard form (96-byte values). Generating them is done by constructing a 32-byte signing root from the hash of an SSZ Merkle tree that , which is typical of BLS signatures used by the Beacon chain: - - - - - - - - - - - -## Requesting a Signature from the Signer diff --git a/docs/docs/developing/prop-commit-signing.md b/docs/docs/developing/prop-commit-signing.md new file mode 100644 index 00000000..97263ba7 --- /dev/null +++ b/docs/docs/developing/prop-commit-signing.md @@ -0,0 +1,175 @@ +# Requesting Proposer Commitment Signatures with Commit Boost + +When you create a new validator on the Ethereum network, one of the steps is the generation of a new BLS private key (commonly known as the "validator key" or the "signer key") and its corresponding BLS public key (the "validator pubkey", used as an identifier). Typically this private key will be used by an Ethereum consensus client to sign things such as attestations and blocks for publication on the Beacon chain. These signatures prove that you, as the owner of that private key, approve of the data being signed. However, as general-purpose private keys, they can also be used to sign *other* arbitrary messages not destined for the Beacon chain. + +Commit Boost takes advantage of this by offering a standard known as **proposer commitments**. These are arbitrary messages (albeit with some important rules), similar to the kind used on the Beacon chain, that have been signed by one of the owner's private keys. Modules interested in leveraging Commit Boost's proposer commitments can construct their own data in whatever format they like and request that Commit Boost's **signer service** generate a signature for it with a particular private key. The module can then use that signature to verify the data was signed by that user. + +Commit Boost supports proposer commitment signatures for both BLS private keys (identified by their public key) and ECDSA private keys (identified by their Ethereum address). + + +## Rules of Preconfirmation Signatures + +Preconfirmation signatures produced by Commit Boost's signer service conform to the following rules: + +- Signatures are **unique** to a given EVM chain (identified by its [chain ID](https://chainlist.org/)). Signatures generated for one chain will not work on a different chain. +- Signatures are **unique** to Commit Boost proposer commitments. The signer service **cannot** be used to create signatures that could be used for other applications, such as for attestations on the Beacon chain. While the signer service has access to the same validator private keys used to attest on the Beacon chain, it cannot create signatures that would get you slashed on the Beacon chain. +- Signatures are **unique** to a particular module. One module cannot, for example, request an identical payload as another module and effectively "forge" a signature for the second module; identical payloads from two separate modules will result in two separate signatures. +- The data payload being signed must be a **32-byte array**, typically serializd as a 64-character hex string with an optional `0x` prefix. The value itself is arbitrary, as long as it has meaning to the requester - though it is typically the 256-bit hash of some kind of data. +- If requesting a signature from a BLS key, the resulting signature will be a standard BLS signature (96 bytes in length). +- If requesting a signature from an ECDSA key, the resulting signature will be a standard Ethereum RSV signature (65 bytes in length). + + +## Configuring a Module for Proposer Commitments + +Commit Boost's signer service must be configured prior to launching to expect requests from your module. There are two main parts: + +1. An entry for your module into [Commit Boost's configuration file](../get_started/configuration.md#custom-module). This must include a unique ID for your module, the line `type = "commit"`, and include a unique [signing ID](#the-signing-id) for your module. Generally you should provide values for these in your documentation, so your users can reference it when configuring their own Commit Boost node. + +2. A JWT secret used by your module to authenticate with the signer in HTTP requests. *{Placeholder for more details on setting this here}* + +Once the user has configured both Commit Boost and your module with these settings, your module will be able to authenticate with the signer service and request signatures. + + +## The Signing ID + +Your module's signing ID is a 32-byte value that is used as a unique identifier within the signing process. Preconfirmation signatures incorporate this value along with the data being signed as a way to create signatures that are exclusive to your module, so other modules can't maliciously construct signatures that appear to be from your module. Your module must have this ID incorporated into itself ahead of time, and the user must include this same ID within their Commit Boost configuration file section for your module. Commit Boost does not maintain a global registry of signing IDs, so this is a value you should provide to your users in your documentation. + +The Signing ID is decoupled from your module's human-readable name (the `module_id` field in the Commit Boost configuration file) so that any changes to your module name will not invalidate signatures from previous versions. Similarly, if you don't change the module ID but *want* to invalidate previous signatures, you can modify the signing ID and it will do so. Just ensure your users are made aware of the change, so they can update it in their Commit Boost configuration files accordingly. + + +## Structure of a Signature + +The form proposer commitment signatures take depends on the type of signature being requested. BLS signatures take the [standard form](https://eth2book.info/latest/part2/building_blocks/signatures/) (96-byte values). ECDSA (Ethereum EL) signatures take the [standard Ethereum ECDSA `r,s,v` signature form](https://forum.openzeppelin.com/t/sign-it-like-you-mean-it-creating-and-verifying-ethereum-signatures/697). In both cases, the data being signed is a 32-byte hash - the root hash of an SSZ Merkle tree, described below: + +
+ + + +
+ +where: + +- `Request Data` is a 32-byte array that serves as the data you want to sign. This is typically a hash of some more complex data on its own that your module constructs. + +- `Signing ID` is your module's 32-byte signing ID. The signer service will load this for your module from its configuration file. + +- `Domain` is the 32-byte output of the [compute_domain()](https://eth2book.info/capella/part2/building_blocks/signatures/#domain-separation-and-forks) function in the Beacon specification. The 4-byte domain type in this case is not a standard Beacon domain type, but rather Commit Boost's own domain type: `0x6D6D6F43`. + +The data signed in a proposer commitment is the 32-byte root of this tree (the green `Root` box). Note that calculating this will involve calculating the Merkle Root of two separate trees: first the blue data subtree (with the original request data and the signing ID) to establish the blue `Root` value, and then again with a tree created from that value and the `Domain`. + +Many languages provide libraries for computing the root of an SSZ Merkle tree, such as [fastssz for Go](https://github.com/ferranbt/fastssz) or [tree_hash for Rust](https://docs.rs/tree_hash/latest/tree_hash/). When verifying proposer commitment signatures, use a library that supports Merkle tree root hashing, the `compute_domain()` operation, and validation for signatures generated by your key of choice. + + +## Requesting a Proposer Commitment from the Signer + +Prior to requesting a signature from the signer service, first ensure that Commit Boost has been [configured](#configuring-a-module-for-proposer-commitments) with your module's signing ID and JWT secret. + +The signer service can be accessed by an HTTP API. In Docker mode, this will be within the `cb_signer` container at the `/signer/v1/request_signature` route (for example, using the default port of `20000`, the endpoint will be `http://cb_signer:20000/signer/v1/request_signature`). Submitting a request must be done via the `POST` method. + + +### Headers + +- Set `Content-Type` set to `application/json`. +- Set `Accept` to `application/json`, as responses are quoted strings. Other formats are not currently supported. +- Set `Authorization` to a standard JWT string representing your module's JWT authentication information. For the claims, you can add a `module` claim indicating the human-readable name of your module. + + +### BLS Proposer Keys + +If requesting a signature directly from a proposer pubkey, use the following body specification: + +```json +{ + "type": "consensus", + "pubkey": "0x1234abcd...", + "object_root": "0x01020304..." +} +``` + +where: + +- `pubkey` is the 48-byte BLS public key, with optional `0x` prefix, of the proposer key that you want to request a signature from. +- `object_root` is the 32-byte data you want to sign, with optional `0x` prefix. + + +### BLS Proxy Keys + +If requesting a signature indirectly from a proposer key via a [proxy key](./commit-module.md#with-a-proxy-key), use the following body specification: + +```json +{ + "type": "proxy_bls", + "proxy": "0x1234abcd...", + "object_root": "0x01020304..." +} +``` + +where: + +- `proxy` is the 48-byte BLS public key, with optional `0x` prefix, of the proxy key that you want to request a signature from. +- `object_root` is the 32-byte data you want to sign, with optional `0x` prefix. + + +### ECDSA Proxy Keys + +**NOTE:** ECDSA proxy key support is not available when using Dirk. + +If requesting a signature indirectly from an Ethereum private key via a [proxy key](./commit-module.md#with-a-proxy-key), use the following body specification: + +```json +{ + "type": "proxy_ecdsa", + "proxy": "0x1234abcd...", + "object_root": "0x01020304..." +} +``` + +where: + +- `proxy` is the 20-byte Ethereum address of the proxy key, with optional `0x` prefix, of the ECDSA private key that you want to request a signature from. +- `object_root` is the 32-byte data you want to sign, with optional `0x` prefix. + + +### Response + +The response for any of the above will be one of the following, provided in plaintext format (not JSON). + + +#### `200 OK` + +A successful signing request, with the signature provided as a plaintext quoted hex-encoded string, with a `0x` prefix. For example, the response body would look like: +``` +"0xa43e623f009e615faa3987368f64d6286a4103de70e9a81d82562c50c91eae2d5d6fb9db9fe943aa8ee42fd92d8210c1149f25ed6aa72a557d74a0ed5646fdd0e8255ec58e3e2931695fe913863ba0cdf90d29f651bce0a34169a6f6ce5b3115" +``` + +#### `401 Unauthorized` + +Your module did not provide a JWT string in the request's authorization header, or the JWT string was not configured in the signer service's configuration file as belonging to your module. + + +#### `400 Bad Request` + +This can occur in several scenarios: + +- You requested an operation while using the Dirk signer mode instead of locally-managed signer mode, but Dirk doesn't support that operation. +- Something went wrong while preparing your request; the error text will provide more information. + + +#### `502 Bad Gateway` + +The signer service is running in Dirk signer mode, but Dirk could not be reached. + + +#### `404 Not Found` + +You either requested a route that doesn't exist, or you requested a signature from a key that does not exist. + + +#### `429 Too Many Requests` + +Your module attempted and failed JWT authentication too many times recently, and is currently timed out. It cannot make any more requests until the timeout ends. + + +#### `500 Internal Server Error` + +Your request was valid, but something went wrong internally that prevented it from being fulfilled. \ No newline at end of file diff --git a/docs/docs/res/img/prop_commit_tree.png b/docs/docs/res/img/prop_commit_tree.png new file mode 100644 index 0000000000000000000000000000000000000000..1e36f4b4d3fcac21e74255fa6d617a206ad2a323 GIT binary patch literal 57442 zcmeFZg;$he_cjW{AVVW5-60`JsPxd?Aq^tkB`Gna(%s!9ASEeCNJ~j~cStwiGrZ@V z^?Sc_{)6+a^{nNCGI#8I?`vQCy7%~1Sy2iTjRXw=0RdA+T3i(Y0gQow01}0OfWH|) z>OcYhKynt9QHMYvb1O>A2nbXNGU6iY?gslAC`M%6t&iqzm1S?sv|`!hs@WPzN#~4Y z5keG2xuT<^7iA!H>_=HGNd^Xsa*)8Jz;zK^Ra73-WM$DFoK(lkSk|(sIG3WmAI* zhXDDXPdE(%Eh0Gx3`O`Kj}boN3)F3#|83AbNV_yNM{BKKDfiG3;f36dF|36FpKUV$!yRQ-+KgFLj zPF22kK1LePKt->t3kQu?XEi&YCJ*duZZV{k3Qa)z4ErOEx28nQQl5KZH^jex>17?0 zYPhVuXG>ztC=#J;@xmCc_0e}-mjGEjjnEB(f6VH#&@86o@D@Tg)dKF8#hhICSA{2> z9$)pyH01uc=VuzWZKA3dZfoq)t4Xra6N#%_v-Qitrn>SP5!B`Pb;90hkErcRC{w(8 zsZ?={?o%*^TvFuwx1vNfv}o2;Vuj{kbG&>ESVjf^6~+5xJ4-!x)Vsv;UL8jeyUsv+ z5ZwZPrcS|U`SrSEYb&jr_9ImRc7+&I+3e!KxlZbuI|1cmVeL#YOkt(6>WO)1dA-G~ zy}2YgQ7SzBe<7%(;|C@<4%l!6Q%yz)55%|oT#N) zY)d7Fq(Jg^FJR;Q*JIvp)aVbQX3-avKXUNf_U->Yi}DOO_z4XNNn?P((J$H)3;^u> z-*FO>00hX3EWh^9%F;o-w@2cs$5K;x-ZnyZ#5Vz#j`rO&=O;V=&cF1W-z8q;>v@d` zomE5$NhR@=-o>C^sd-2ujXT<;x>K||Fgt{&u4%Y%5-2)FCCNXIBcw*2T=F%>{f%A%oT(|J2}g&*>U5tlN?nps)dLN>_hp9h7k zm>TzI8j01Q(RkRaf;aZL%1*XI41{u*wI^q~cd<$f$7E*}3FQ;t7_(2V!C{1g_^WhM zlD{h_qW66YT8}2MW+;i@BGQNe=-*xrfNpY~X$}!2ImIuJyLG!%*6Vv%e7*<;W(jR8 z^<&Nhbik-#f@l}@_(QJrn%1w~>aplpua4uDyf^Oab`ocQ#AR}anOkGVqc<(Y7o|Q+ zm(pi9pD9kQTZq$6HH=5TxtiEyy&6%c%DWUa9xZ&Ke}k{rzl(7?wvoOHcMY2=d0$E(WW`}y|3HTptAWc3B7RNSEa4!nS)@^i z9y=bZ61T;ql%z*%HcPO%xHx&=^}0sHPY8bO4#oh|Dh@t#0~{(g7J73se|=;qnmbyY z7~Htx+UDhT+dii+^O7}?lNr$~hsD;?^`W^R>@^rQ)Ld3{@IWg$f#L-Ci(fk(s82N*P8&v`V^gRv&Or4J7Pa>@33nCvp z9$?T-%j?wS(%S+}C!Dwbz0sp|U<+q$bA;-@$=vI1SDa(y}#<;CH6 z4m+dgZOT(wF8*;#slYIXQTiglkL-)9fL@PVpObd=pKM|_Fq{u?Z~>b&v(w*x1~Q|G z{-m=n9k_V#&1?+4>BSd6w@UtzeR^1pQb>$ZwrfB!U=lTI1lkYGf1^>rP@_ZC{E2nt zf|Wmh_J&LIQ;{ShP{xs4TtgOn7KhMRe0!LPaAU{=#_+dHBMgQ4r9h@b^;`}KJUkl= ze{uV#kW9KaVH_T4(0)c&*O9ar*r9$6=Q#3kctOn*6$Nb3_1s}-IJhB;cDkBPzCC^O0{9n9C&(6(JwA2@I&|+F~f87(C z-}scOPtFnu@T+7T209QTjx;Nxt|JL5j@9qJ{;#$ufB&FfMV*f~(+{DM!Tr++?w|7W z4a%6{K{06UDSMd_X*l?_w#1Q#xP0pKBI^|*j~%I~6R=1%ymq(w5@Kp(_<5sBzd`TA z*}-Pe!-XJ#MB}+|5T2NyyaHf{k~E@zn4dNTf=d}2blbrG4g{e~^ut}*dc|@^34(7v zISJrur}b>yNQijPm63n$m^7epp)y#hua(l2nbeF0xBQ+!h<)12OVoDhcl0!LfElC( z5Mh6$v(0}X4hsL9=ctZbZFDKs7lG|rH% zqc@HGX%=4v2o`8Hu3t=aIU7HUHCcZebzhMFLoKw`!)5Tg>*kUEdk$-ncNR9sI8-1yG`FZ)G>iTqlGst%Qj zbIC>3DRrqiV6tN~T|2$403*yeu72ROHR23`l-iOM)gK63`7{JgL`70+G{73k-k`Hy zO(SFGa=alyd!I#*0 H{SVnQ{O%*6?fm+ONprYf73tO=3ggX&<^+U!BETZy9=oN~hsYsMWk(?$V3PPD zv;t_m4cLU2nKP{0>}1Gm#>w> z+WW&}8M(d^_gIMms*U9-W*=k-0ft!@Kz==uk_*KN_ik$bQ9?hXuUv;Ev2k zfSda+b(j`)EZJRxnn9WRf_$&(Nig4FkCk0sPu9}UG@1{)y&5zE4G7irYHr(HcZ%@D zb3_sSA~9^|U)bc)@{viK%6-iJ$~}hh409DY_7@R;eZGznlVs6E0D61|g${;v_;;@x zYS#-emRP23)oz@I#lRRadw&$|Bzfujv84Vh3V(gGE5$i#!k&SGuU$V`c=mtobzej&nhy$o(H7bcAx9l z#|Pec4%J68QNp1NsF$z-SAGHWdqags7gs3*alI~=xa(?&v;QFOhGZLH8CJs^*XZ;s zS0-RTLSj;&nQbaXGo`>S^-Rb1wBWgjOUk2pP@(d>>({Akv`^6<=6BsTG&Er_@(iq+ z;MX4kR`JrbU8TDHi2flV+L+xj!KX@6CBCAK{G5C@%9?Gu>&`7ru<~T1S$g%F^gVyNXhACy~u%nimNSqUAy> z!jDY$1kHo803>Pa5_-b?N+88IyLx^5ot)HW$fM8hJV>{6s~e8Ic0#Ojy;pjTk}K%b zx-*Z|>w;=bsB7;Imy>xz26?W7h}dztf{XMldMS!8U4)l#1^9lx9|PBD^~?*)EKkLCQ*gR*{pR;AG-65B_n#=_aUcbgFWs3L99`%jbpyR5^JhmlgG zf1hvpDlp4k3P);lmUBU zUU?#r^jr!|;qE|2RyPMtJ+#w8ZLT8+>yQ&&bO(;GcU?P7YQ$StHoY!;mL{@A90l-MKER#w6?Mhba5y*-Z3P%oy;nFr@OC1)iqLbd_Oh^t zoY{MMi^QWaxUuUOeUl@8ltfx2trCh3a}Bi)*KKRZ!>Vp4E`IucT5q-XSwuGF9**|) zvAk4MAlqE8>Ec)Z%-4~)hVkh-V*Yy}UGtk5qniREHPHZh0A4q#ja*R-PiyhH5n=HR z*|*c%X*Q|E;&<+xGN=5i+EZ~ovp&Akh9mMdXd9Fz1mtU1O}&|r@k!*XJwZowYOK)U z8o}ZEkuy;Dx(zd7ZAVeGY4TmPE+{ z6tj2rqZ(ex8&!f)5>gl4{m${0#rwnGS%(;PCny(x`Ug*x*lNt?dJu3d>w*(`riEN< z^4$B4@Y*B{Boi@ywtPU-UX>`BopEj9t{Fb)(xC9jv5LsE;KDz2`G_r?Yu;xY2q_(`jl@l3X@)2XcjzjQOj@Tu>G~A`Hq=WiUDW;_1@e)hN}onc}_k>sL1u3WbBU!!zz8QovmI|3ruYsQeBb zV>rG4Fkn_0X(+zh>HxE>ftXI!#tH3&WKi+!15|(NV?-^3h-}TPyX}*+0@_J#l)UPw zErqf_ybpR-v$A#jg#|aSZOMJh#^3Yw#tq{pF|J1SSrR+Cq<)*0m}8oe9lLTI-NZ_5 zBa{a_zvZk&Ld0qBFg=4yPqa8AHeii6jR8_!cynXwPLaZY4!!CGr}?xlR&>mW@WDkxDxnpD#6dihCE6QD4W;+dS?9bmS<^%$j=}2}H$V39uxF2n=EX z9yc@nvNNKMZthLiVbjMtU(=7n*V|v)4`g8_csrCZzvio?GNa?z^!YBB_hvZtD4RGS zw~VEyHfA-#)yDcoJx=YMT5m29sW35LD}&eaxne9`D+A+^zf;rPrDXF?n#@R2WV>wg zEj#Eh1eiDkF#I63jXE;$t^q1-f8YOmYCy3r*<_f=;sR3Lj?FqZ<%X+$A9A}>zOh1O(0 z{`|@3tnm{!KI-#cgh?Fc3EZ9A<)639cgV+x^8(&gBMtpKjQQf0=`*3Vy!xosSQ-i+ z@lp)z&^*<7n5i&eLcq&YfWJxXpv0+EXTR5~Tu<>YCF(9RK!@;vGui}mF(U%+42ZVl zGNu+@50-?%`&^>!_j>59X8BA31h^Zg0=dbtjD5uAB~-6~$^W9}cUAbS^#r5?v-gQW zvt**z=1v(y)T3#5PK5r=v=Ya5Lt~XZS0Jpwa9Rny zDkcG#LN561fPoh2d(Cf#RLmWy5g{b-oS2POFB7?*2^Ro!4}jB4XV&}C0qbO>%ig~A z_vDCk5EXBDZ4NdWl2-meH%RQL4hAq60vG~Ug!~ggM&!Tx?f*&*dJmL9J`of;Z{VL=L56@4pn&*$1J9W-h_s@Q4(Q2V zt?e=(rswGm>C>*z1Gsm{AskB93=kCnmBAncQRMQmzEe8+c{-Dc4%p^&oT&|WRjTy= zg^>h^yuBzUI}_T4V`%*^;{5W$n@N$qK>y#>5=_}G8tk9}SjL7w+f|a#zj(+3nM9!d zNo;0d0O)B~1o{NDT>uO^N6HWX=6A##fB-_Ji~dfdR4Mp(S1$Vn{5XN?BMqqUW*EXx z;FXAdJo3!ozbGxxjr(h>SnyHa^Qt2z)Im`X8`kPONFDs=M01%UA$rL z(MYjmcO8<3F@eDh5`KIJi$SKIi4Sl27!!!TJ_$4q0Ia;I=%eC*MVQ?W*uiaxq+z9al}77+$J7pDCIGm9=FU0=GaIBZV3nt3{<=sck9wmCAqW@ z=;2vZ4QprS5e`>5@xW=v!owH-_&Z}UQ8BzKv$-u_3(MJ68=hQ<=Id$hY1hXOV%Fdl z_~MOdfW^xP;*P;_2p!S4mC;!NW)Vl1L~nf{&;;T3OzFrvqVa1F$Lrn*?jiBug`nIrX-vz_53_2EU3E0xh3kgOKP^o;F*+br4@Yq7J4mkt&7XiJa$~~oc^D5?5 zQG^?PQGyKXJl?*0zK{dHKFfwDGX67IJUs#}wP_kfXn@}e>PVla_8wZzTf*5Bsf@1j zQfBnB=e}AOfozJp*+X)B3JJx`QYNqa*QM1XnXI%5TVwxzDOZ+f61AfXE$0Y&t)YRh zq5R^$Q(Yv%lsUYI1wR?;X3{T7`I7u|dTcKG*Vy9F@I_=9qg?1w?gEV#vKax}o3Gkn zz8Pq>b}hQd@)4niaju4O?8PWetSyyvF58@<k{Fz+PT%Z5h)puokETthqwo2OEpvCgUrnM(U0qwnVsHSH zNV`U&mb0W-%hdo?ADy#J&UB2;fI2Dkq%{8hU*lqd3fu_^vq+)k4c^AyR2#i_vyD=f z=_k%P?w%?%IZT<0$UcT@N&e!d(Is6h>y9o;_`FOc>0!8*#e4Y@su=WZIu4RuVn&2A ztM8=W#cWMYzswa!axL{IDpsXX&|^bq&-{U;QR%&&6{Ph^&pM`S+$6Iu8$(ji-tDq& zf_N8oP=}uBNiK_x2jY~@_Qe-?D(3DiJ5C%WjLP&yW1%4xzgwE+S^*imtq!$}0O)l+h40r%Qvn_uNhn znoCeAxrDAL?Kc{Rn(2-pw{jx|G}kF}vIXmDUa@)l|2e>sRgy#%q?^jjI4#=0Lw$z{ zz3#Lx4H15~MQ@?hv(v}N+ix-aCxu2v(^gT$t7Dkmoy1#F%&YuQ5z>|KU4@YTFT7Lh zgA_icJme;trI!YDm+3SjOa?3pX%*RSv3@Ou(prLoS&k!id#4!cWp9u=KA!1z1#eAl zGSr&Pwtd&jPkZx8^yS&z=h&dfVTr9D7A}A2vv-~gdAD!Ln5g9F$tUKevLwA3%8i(^ zR1GQ=z3M6MxI&JY?V}IhG!W+{TTvsOx0V0klBCz$s~cV+qdBC(stMA6%IQy%%2Ea5 zQ>Cbyj^O)SK2M$`e5`%Kf|V|f%XfyiQKP6RX|e&0P)EOZThseOuI%r&`seM(HTlZ` z6tYitNuwMN9*w3z5_qyT9^BSoiTI~{;Vj*rny6O&aFX7gHipJRu6+s)^FBLvx6m<) zrMW)}I>aC~bUTiy3BbJn#kj}DMqxddP0QNDK*pd=@uYqMWjG)cf|1@*h@$}d7dc3K zgC%TIKUJ-6F{RC~5KEFpahyu!5P{C|kbDkmWsZua&(z$A*1bIYX5-U_kvTd*Onet_4uh1f<9hr!bd42F!pjc z`{d!8T}e@<{(VA&V#dMkUc_5RHAt<`G`W!i(tdQ>!H%LmNtMDkgLOa#1<+Xf4Zw?H zze!MG1A-niZnj5&bK7GobR0SF?L*Wv0i+&Rx1$I19)6{h@{@Bx+q2q02J{O2rx#%TxYLckAWAChM&e%hkWc8|CN2G+$g;`=oO;Z?r5=h_Aw4Nb~y!zLaTTK?|(NFokaz*b-llP=9dTc26f!k(XJW=FVR!9d7|>#$QjhA-}9q; zzLN&&#vd5IW|t1j3>%-h(7fmsd4ZaTvw{RKCSH7jyEqS_IKbki^+AN@X6GA*>At@| z1^N-vX_4)?ydF7ro%Gj`uLgUi8eAVra7ml%#~e*YP%|GZv)@h*G$yGxR%-W?(bhL@ zpL}oIPcpEkf6MqpGY}B*aHG@zyw=@GQI;uwq9MTy%GE3VJe0TEt{a{T=zPSp6yz?H zpRc@vtY0OP?Kq9xj=lYpHWSx1`^+}>g;r|Mi9YKT@zub?{8r?QjcT#1ZUuBNVf?_R z^YbSWv+O=v_s(gi;gNgvjwikP9dOFwNXRO1G>E1HkA*TPEgUZ(Z+WZN@+LU=Roa+| zo(}lVe>&Q-uJ>6;F|1%diOlllo}Lm~10C(TYMLO2a%oAtISmA%c#gS#;&b~{O=ez8 z7r@UO)2t%H;B}e%r5$wZb=+e4TNeQt;P`$}8o16w=lvHG4nd66RSpA;n%?V06&?ho zWY5L<7>D>A&N94x?=BM$F4^1uh46;GRb8a@z0LZQp_G{)y)*AwH_=$a4ejBmd}-{} z{CClBg1pww-m%_8Y(txwJhOdDthl`1&o-5E8(TuR_1&8fd@V6ggeMj_x3PWBolHkz-))eJxaoE`3v0Vh-tK5_=|+)X-$t1c%v!Te zq7w#Nw4VIF9!*Ls@N=Da3Y+@p5}>An)n5C zmQW#iSHCxub!<{YPPlc|=HVJ`hRNr>A2u95Tg9|Q&}de2x$pD}HY=Tb-gC5zeS&*{r54@Mq@AGf}5`H*8? zGpRDsE1R*#E)p+Vpk=ufo^JV6iBw^Fdn(%;LL#Xk^{lk*LkXMnxQoPgQ zwnGIIWTda(1QnbIqdojSM;t>99PFKi%F( z3@@bN#LnrGr^y9azNvSj;GfQOx$W`9ds*7>WA0iqE!s3H-pq;EGNB_BRNurja*)&< zDVK%Aru_0hI~Io+t(=H6QDZ*28V6#S;J1@%-808FW+}_>h#C%ODTZ$Z{;4In5|ESc z^Gc*Wu35^iSUf=b#R!c z8U@S5Q*`>);p~@S~TF!f8X_CWDiYjfL9ZMTF+-619C}7wv@G(t^)9AY@MO z-dcB~wqx}GS39n!Yd1eAE}mbr3hrAhaU|eYlpOK>FaOlNHie>mZ8FQ*Ds2d+3x&@a zip#vn^c+`3lY}*UM%#`~Q{Cy}Fp6jDZw0QTn8m7sK3e;9_9E?>rrh?Lh>t{!;rPLt zv;uB}t5tz4&R70sWzY(#bwB*=;}Ss_-XGDy0={B&c$`9w^neWfyW3y-sW|cgp$|W9 z_eU)z7^qA1;E#)-n9y*yUER7JUYGyaF)52inTv|gx_2b`+D*3fXyx=O`&2opb4?eUgxj(#Ui3*=Xr{V=txg}f`vK<$F4VPzlyu@3xg^R_!=5n8H9(JZOZa^ zhGtYGxJzZDN#O$x0&R5ujk9>OfHTpdAe#GVpPT;pqwD=aFCF%=d6%-S(|i4V9f#v( zz83GWm#sqJB==s9aPLE{l4pTOSAB^LHA5i0J_au60k0&`C5PY74>BY2U6tRQu%qQl z;EQW(XmRWPIEef>{OGF5!eh5`WboHbKVtXqC7fF+u*L^*Iz&&{%jj?L~N;UI)ww<>8P;{wg@|WTF z=Qv)+?R1Iaj0HM4Xz;yZ$OC(Wkz(fpOQOR@95w#dv_E;`Y(FNR3#;Ipb}fGRn&o=r zE#wg>?<7NLic_a#mXnzN`uyi`#_+iSXL)zvBpw$>?!!ee| z$`fpL>P@Lb%sdpHP~9B2P{UHwW1nJB()+|4s2tD$2UJ9a{CE;s=nzF>w-Laz>)aGG zHAg{Bwr*pbrhch{pd+1;%ZH6pxwM)Xjf)>*`#Ax}LaBm_OvLb&yZ|~dK~K@mgvdu0 z!GM$QQQ#S@?90d|Y~g!qk2e(MGEQamd?QU0E4__FGmyXC=RDHeE$L^w2gwEv%YX3K z9nPSnkR6}WY^tAN(U-x1+`1%HZt|GI;@$6CzGAvBUdG@k57EP_2S=pd-x6XE(d-|X zJ>F^tGv>3Sr^tyY6Tyc{P6vo(+*^*nz=YGJNE`kAC|J~W zgD*p}OIZ2xN=e7i8^IwK)_y)PpQO=UFGGKn*Su7Ja3G`f>=)G&#ZC4DR)WV(qYey^ z>;cMU(lBrx@IMtIVQ&I5mJ~%BEy?lVXHzD}QgAid$xspIU&7-%=8q^%GY9r2*R*5m z17bq($q`^L0LR@%dooQT^jAN6qQUjiMLO>NZ|GVDZPe1D;pOJE%T+oVIC1k@*ja~T zsk`7S^Zw4Wi;&i1A=$`!7(VdXU_ey*Ri%LP3$7o+{AintPpa7oE1UJ$bt8f!@9r5D z*zfHKOGRV2So_|q@QLug8{z%6FP-QB%5Aq>ST@kb$6E(T`{KoZF%5$r2hcnM zUk?xizZ?{v!Qku3gaVixwscFtC#B7zb6!tba(f%)h20`>%9GPI!>F7{+^}&5{azoO z(h`+vYjSxId^x)BEQJY-vk4^9UwQ|qw!o<|$Ut$xC+s@z4?V`$>IDFPl4P}-$97YB zj6H8C@AsZi-5hC+^54JoVtupOnC!2`qlWpseEj;*((dI3aIIbQM)r-9Y72g+|^ zBi6g0ww3o+Kt~o_w?`>mjyzmTH>eFyaGB)Mm+C)Msi#RqYh0|pb{$r9 zr=+*{65#|sN(Xd^P|J{ji~(mJB!>a~meu3vy#=hA2@+cRM&O#ub4dTZ_~mnOz51E{ z`?99;7CWUc_m){_s%n}vV37>KBveKiz=py{6{CSm1S3g^Ng{h?;{_?P>A=~TC-zUL zt5M1uzCH7N2mIJOu#j`=?Vu{d$f8C#&&7D3HL;-2kR256cR+hK(Afj>1av)UDNgo% zSpRUV%+wpcZK12)OMX{@80LPlVQLU>(Wr6qa02ksf zP$lgId$E8VL;)+%ZPWJrt=UA2mRoc`0=sP!8fma@3H*WtQvnOr&w4L{Qe9VL^o1_c z7B2KNERCLO*%EkRU{%jw`wY<;mr3iPVwUvhM}@UvzXa*+-jBj|K0DZ(>I6DNSg>p2 z9Hp#HU2#Sxi^7fy#f!NG;tV01I6nRc6=vW}0bUpr-4%f}3XdH5_QaGQCjmdq-+!BO zKHK-UJIeY=a_+^Thk)zf&H+<6MGM}OD?}}O=V)J5X}d((Hv9fE_$5z}skl zzQ-RO1Oni`i4NWv2Y!oC3MgL5Sg`Q1*#ycapiou}Dt4;<*XXtPV}UmV1QbS+xLWb# zcgM`W^cmUN9+c#cHMg$(BscomPo^nk1hC*t0Rvg2y%wxF@8Y$R8peGd2)p^=y5MF# zJ#MRGo1?V~NmpBZgb{>VTYTtH!`xp9m(GdX_*&yw88!{Z>^E_EC{53YbmQq+$-zo(g9@&>T3JR5(QBhg@Fz0JIafCf0uq+?e z4E8Es*_G1(q3`F{2b4k_+5w{EKr17x`~5@PSdATy1p3PzYyR5P#WbW=HZ#o^P4x6I zHUjS{cD0Na(vxFZE)Pn8tn9?N=c4he_eXHT5RiO^o};em3x2CYPu_np?| z75Px*o#iWJs6G`(EHqeQQHS3_1X*oD>r4z~C*8~@F4PVv@`3I{4&1@<#1SxfP}XRu z$S2{43zwzT+V{hS$NBrc;WQ1kZBtgz0Q%jLE`{E^S~jbwEJ6476DQ}w@3nrla6saC z1^E4%u?QXDGO9RQ4!?TJUftCieTwFWWflrvE^JyZCgI15KyY5rILIV;S!347&3`F? zhHwji9?_oK0#n0}hJzazB-l;}f(iULTRm^(_m+?TW0#Ub85=&= zJD8sNdK1Z!wu>GLG1}}`G$re~e9!L+rN49uR|BBTaJTROhP@y{C4=5x@c5|~Z#_|QxQjxa;L)#lfM^M&`ULrsTYH1o z);oa(&4yZIy8`mSZZWokS_o9z?6^tiQO6F_1FDx%$e0W%q7EGryqciTydwx%SL~9r* z_!;V6jX0NIs4DDNL1lyk_%_CvG7cVCUc{&y#aF%I0#`o)K#ZlvEv`|&Dg0CsRGefc zn8eJ(o>c(LCZ>Iy&QcFW$D0zojSv^Wx1Gg-Bu565-;=2EV3C_^MSB7uJ1pAuHizl zhP~bS z;dRkgwdH1*6EOB$DFV)|BcO?5i#lA#7091OYRWm#-x5v7o!2T;!&C2*2($@L4ZP1H zccJa;V8WlbYGJQ#SjpohJ%a@oydI7des(@`HvUn@2Af5;hk69)T6X|4eHs3<&UNpu zOy1BSG%hJ(dUzWxK*s}7p`y5e>!9NF>N@=%`?e%Mz^+z{oUFh;?{-xwoDLPD18np{ zZu!LOa*~AMYv)`(g=5@hjJMnS+39?E>;aO37;xVE4iY6)(X!z3oyI!dHkB%;u!=M2 zc;>zDO=%?(dc0Z*9&{>&L3ugU?%Tg~?cW2KeS;OeWUXUB-x=|O&z}5L@ClG~p+>BL zoj!hD<^p(UJ|x}_xE=6nqtUwBJap=HJB~T}(e|*NFAFsWmW=mY>%D-xtwKY17CSC_ zs4QdU_cjWE1%FMZrT_*qgX?C*kxa;7)o7^ehNWEPy{D&2U4_?HBqNr<)qzEOoIg`G zCs5SG$`YFZ;oSNQ7Mq3L)?B{qqt#IAgr^E1vi=WfGztZ8FRz!f*rfqtkYHO|(M~(k zQWt3B<(T*zoMZ&p>ebV8r+c>!hwdJ#ype{3_MD$-BNFdn?2o+du} z8L+%$=nnCehQ4E2cTA2H`HXANcXJAy4jH+S8-`!?z<=s zGE-Mu*$-6WuKrgxv{;skrHTF=R7N&{YlkoRL{oq_gZ)gLH4RR#;UYp=TtGkw#k~kA zA=5CX_g@gG=Q-0bhTeNpEwwKD5fQYV&ua*-RUwK7HAhFk-jwwG6NTS$uLD#+S%7OD z0ftWtzgXC1`DvmsFnVt1b?}SU_t6myxIj56bb*e}L9uabHr1jcZkQQNGgxpCe$(;# zr-GFs&RjmjhcFR391dK-1thK5deK16@>F@X^*5n&PDJ)&d*^OEWS_U2?b5RadWQUV zq03$1^4!n&)vz!Y4>ZH%0!uZt=Rx*yH`C8;;yjHV8jyf5o-hNORFr^Ricrx5o$u-% zJ4A$;kKY?EKFu9k0EZX9kD<(WjjFzNRjEr@K4k6^3i_thJ6g|gbrD@+8ZJ+Ho1mY+7mul{aq3m%WHv^`)GGBGx2C=0>?`WpSVqDQO?ctM#{{MWPGZ;{@H8c!vu02>@jpTLw{7LJ3e{}Ko z@tGtrXE0Ut2epcloh8*@LFa+VArSQ|l!^eN zZt&T;J18sCdiL&wa*v`-4C`7|#BCb_eBGm}rQP?Ff`iN}!;$%ekEgW=arxXZ8n@a9H%8mI-QqoG@+^wiz{&yc1XTKGC^*uVpbCHFgrv_o4nq=- zXrF-6{A$=sl>{vw1+fOF*Hyft@;vif%HttIjm&pEf=sLB5bj%=clk4{Y?y-Z?k~Wr zkc)xNFAUM_)rBlNbm~_&Q-|ho{ch^hU#e9xc{p}G*V1_BuCL&pvuO^LIt3|r9h5*~ zTPX>izGlQ@hI|DgNA)X!b?R2O+h`y$Fj``37$H+p66+|fXy##4+hg(lw;m`2sEf+M zi#5~UwwQRxgc@46R4n82L9}+91U6pH09n#bBlm;>t#ZJc`1sesh_tv>Tlnt{J>^!K z2EYv4CvRJ?ZUL%LAt@@Uj=~8Xw0t`BgO0oP&~l+_W?pB{zUsbsP*N(8WXz(n64|Mq z2tznvC>LZLX=)JqpIZod41=tu{??O3`S|s=m-J;ILa9HrAMahO$#H7IhlQPO3(B$) zvPc@v8Ps?Ayc_%|2!8GGRyUwEjfx(!{z|ZBH1KtC$SjelZ!q zQUG_+U;C+Z`W8dH;c}E_NZo>jO6}K;rq^%BzURPQJ@8Ekpix;bPHaV;)@XpWxakZX zp!>MUDk=o3ZC+byAr!ZcRX^w_l%a;eXffbTb8uzld6wqjnuSP>go(uLY~Yj*P_2&h zJ#&=ixd&4dbR8 zg6FF5lx0r%eoq)lhbUvBYdxRz{i!s8ognb+aEQ*+Q;pp z*F*`!t^+M|C0ubh=z#47+q{8bf*5I(>#w~Dl0N_0%$4}U9n>B#U2Hd}lxlFb@O6w9 zNJzj}W^muR{c5k{af%E;&fQPRvvCXg=v#^Y1@4~F@I~3U6x%3*v8Ij#4?UY0|8(``wQ|7Of0gT4`%%n zE|#0*Q-pID_yePZ>)@!Q$TYBaI!Jt$m)g9j1LdruRK7>*r2>s` zEL?N_&(rP*I1yP0_&L1ao`A@QujVsyAL&2rXpfPbUBhr%emeMd&(kAq0Md$!Zm~t` zV-RhR(I_qrCBr;DcuXiJi{Lm=UeU(PWhwz7^L1$Dy8SjhV{glTA=6womalE&{eSlY zn5+C9(>l`?z|@x^y!TfkiB#Uj3IAN*>!p{Mc#gGF@dAx9*^h z*6+gMvHM-xHex3%_GhH{&8%7I0?jmCH*>#Y*lKmDt4!ggk^1(&-HX7wpiVB zD;rk19~L;Wl;{7r>AyzQ)G=jhmO&{EoKK;*$?`CkH2D*{FjgJixJHr0XK$%{tBx<< zoKLh()NN@ijSKN^D-eoM_mJTxXFadzM=fVhB-*I5=|N?g{7K!AA@_Ka)Nf*kWBr;r zZ1)K5k!c`Ye!AFr|6Ez> z6kjBXhSv(`YT>V_?_t)GLQyNQpOMCHwYO)Oztu1VLCY!v5f63HkA^}ZiW`RU{s~Ms zA=W|^Q~{pW?#lD7JH&vCOR=N*)Um~##EU;=Ys1jmD4pvwY0-ieF7J4EGzujoyZc1K zzBbrS@0%v6zH@X6B?*hhGopn8U3?jKwxo-Sw(WnA87HxScd4pla&egzbtH9e2mZ$H z5@M8}zU^e-Dkn6VzPRn3Ai2SfFjQW5|3&%XAYi|M!P7LvykZ3bT|?;{y)`<^1sXMV;2OykTD}W8u0U84kypZK-&lbE*8Tw&YnQLTpAR z^Yo?HY_jYjTN6D!w69jRYYn9j7*$9g!!ccM8#p)>hbsum6SALwky5?uwU)&jmyqi_ z8oLTsFGRWLCd!?&(P3@t89!E$D)+G+DM0oB54=iC)#h~_)`P3>US)3)v$;omF$=PwS1eYVZ^R|9=5bzmR6 zgs*%4ou|xcEeOySY&U0uQpi5mQk@=kGhE2Vokn|!C#C9AtB^tl-@plo!ix>1B)^H2 zMw7j_p_6*$MER1Pz-5KuE#Ka4WQ*c(a3_-fhqjQrla1wUT+A@d5EJJwvHI0B$Yl%0 z&Y^^)ZB=~9-OSqi@$JUW9DSlw{y}ZlVk#$x-*-Q%9^#8*_irb~Kg(S2pp`w_4qyC) zLLC$PZ%ZZ5hp?{4D=&WI^>^;?OKPDx_yV!Pee*%VQE@w}rxQ84vy4N!lFn@_lnq=_ znbHhxf%JLKkw3SK2tqw<2`c4M%n^gJMP}P6NGPD#jJ6HVdAN~Y%Cv5b- zaxo*CtKJ-yFgRy;?^w{CLQF)rdt~o%u+r4yBe0d5)6q!luz`DNsxfG}!Lz%zwn)W4 z-RbHgjYO?$sQe;)x+P~??ZeGLy1CLjgYZvg)gL96g+Ntqlei&{#jA{b~XQnY&)ZK?F%9MyrD&9NpFx;))=b9%P@R8 zDG3QQ7Go{KP8DjVaSUvQWe9f)Ug3^p0tH{VSPm&)l5H0>?gX=V=BK<}HNw9H-XG zpN$a`$H)<6c@e zQA1?2Ys@xwkPCYAvTK@U2rw_NO zo%lnrwYV6>W|&#ecYu#$vs3cnuyIy1tX7U#cH{2sVH9S*{%7 zqo%6~9slF`930YtgfeZ2{Yx>KM#e-=5fnTAmFI2W4EIYm4ZHJT)3*=NI1N#EEY>Sl zIw4h$QICsft4Hv|t)R3sgsvBm`Dm@Fb8vG`;;Eo0W2PGAP8&()8s(^GWTZCy z>9|&Da(yR*?QOPbp+>wMEb`%qW2kB1^rq)86X-BAhdv~nYnvqdHti%8tMtL^Q zD6ny8nblM>V)qu047JHj)Ca%UVr1m>XN;}bJxQ%F9GhEOWrO_^XJWPZ5`@1cM76LX z0&tK$)FE*n1vxU`Cjv=w5BJk{EZkQ&MTa36i%|u`fz`~sF+2-)$NS`kR6WhweOLD( z!)O=CM?E|3M?bA#!-gAq2T!jKPT9n>5(=Q+__LgNP3%v4MMa&LiU)bgPS(X;>9-as z;BmU57OEpetU&zR_E_-Yz30cOz%nQh8A_9vc2cj+WliHUYZy7FDLWK|ofLR&lwDhQ zbkFC#>K=$f<xj}}s z4wU6=+=4O9vCD%Q*pAioS(tnrXev-TpKl?qaVZKz$(6hSr-P5s^dFYjNj_#6(rk2h zmqhF1x9OWj#|{NE^hthS5OiyVd+KfUn?Ae>Ip7_Py9%gbUV_RqG4l9$JWWl49NIUT zRB2%s|97qS(UHSW%`mGv-7ti&*q=qIE{a_7As?!TbEvL4Pw>sbtP1bMd^^*S_rT9| zbFum={2%JGV{nL-O5Cvd9i5;cksGNQl@Ml+w7aSg(7uU$?3O*(Wpyl&Uju>SXwi4{eepl89N!Q!yYP-Rrgz z)ivTv$Eb<_tKcRp++20b_?`nZB7?XOhB0aIEupUlUde8u3L$aWE+@-7s7H*E-!a7+HZ-v336OAdqKf?( zi<&M>nv4;MMHVy3*XsF@-%`G&B^Za%ghycpFI)r%Y@NwSO^Ry-WO`!Tn@9m#O_EMB zo2T-E1UpAtw%bg2|9D#l(sbO%bl3vJ3@*09K6%Iuw;}wQ$hhwnkfQ zP423Lyt~!JHYztE7R6r680gBTLpkyFLHQFQi_qo=g|$QrwdkZTI4D;D+Ux74a+E75S3mL083#KB~~7-g9|Y}ax3pkW&b`Ed!}e80H! zWDqu7@#M--Wv*#>E1nT0NpULhg3(7tclrlp#xF`|+58ZDh9+$sdIXVa)F@!T2vBqw@?Z`b>Mm<8bMAcb^6YT86qEBM zt_(NHj%}WdJtvD=PqMn!*s7EV3!-c8Z(Ebq)ex67HG3Czheowfso=m7>gmbiEJ^^k z%*{;psQNP&-f4`IW9RnCG358Ua&CIjVWt%(tjOCetc086LP%;-xQgb7I&n~^(3FTM z*8b7(8%&M}FUwI)q0SW07F$X&#Wor&*i9-)8qi40z(pSArKD6mZE-W%Ko2)xq7#*& z)hSAvbWxU1l4(U=Qi&p@G2-Jaw(nma-$#4$6g$gyFohoa=^;W}JM_@ap|0 zh5Kmf_o#KrAo^cS^~r_2u@pK%& zadBbvtA<@BE?XR@;@{Op!%HUDxO&v9FR|!Cz{>HcWq&6Ip_KTXC`J+pv^NUMi}o+$ksG(CEA(6*3s)ly!`# z)qW$g<6NJ?Dr17jw#~KZfmthC!pF#2ZR5Ou>|doB+X^3%FdlI{88-Vd>e>{9a3Al^ z5@HSMi_xex|atWrMozJ5S-7ybdy;(p=M-`aZl$Tetm4$(I{JnbB`492?Xa zzXs`(RRp#^qk`fyCn@#{DX#=Ks0+@$I?7aaTLV*C)?jsbOLG1ya2t0-N@0ncmg zWyF9IEhsMgy47!qXr-d*ykq7^3B8LZP}31P(V##j-6fr&ViYIAhS%{N}U}m&(t$ZnOX#sDzpj? zwqHA(Xl5#Ulm~i~BPHB2YIe93@XioQAM|6>ul%eVq)-n_(6uf$++{psjru)61eFa^Y!WsMm@2eJpAH2tWw7v5eEjL6I=qsm?#-|AR)MOmlSmkk z`#oaUq2yA*ms(IupX~T3`sx}C41MHry7a>m!-aCPMFBAxc(d%I&0|nM1Q0iEzYM^S zyW9Ba#ZI)lR=9deP)X}c3 zZFX_ao@6`0138$r#?`+A#(u&1M8XKjqqM?9CXREGw# zA$IRM$rCkoirl7@RuK!2mpkzL=&yCN?hRIo=J(8%5YznG##tAt$W`WtVu-uJj-=(} z$0X8DVueov^ryEHMo!IAgvkdwCQPP@=va@J` z%L%#uWA$i0OcbHcL4Z5NZoVF+c%1AuJNab7FxGF$y0{djQ4?U?V@jXK6@?Vjcx|{g zJr*ZcQ<%=R5w?cXdr@oav@_wl%{W*tpvTx^*mrMokfmBZq-wx?4GhmHYngAEnF>>p zFk+SN%*KHulbmDKen?9c_ut=|BSACQ)jO&7?ONst8uHj}K=gk-7r{jA+Wpufttkah zZ_e_&vDGX!Z~8j#5|2srsp-8=tfA032@5O&n$-6WEw&~S*xt{ma)OlbkH+oIGJ}cA zI;{`6uGFF5&azLh2)}Z#sW@r%cGG80(t~O+A0kiiXF{BAJ^~fVh$jc!GMERj1&)*H zq~}FYhExLZ-l^j2?S}#S4=d|a3st$5FZ6cH*+56vcRdCEiU3x;;tQg$?o+iweDBk= z>sUXgoXi@fB!f~pE*91YqQJ}PUIT|~jCP^u`L&AW1WnK3=ZTPvqwf+v2OVKH3z}P9 zXz_a9;7|(OZ~oj+U1r5;Fqikr8nCn3i#!(KyLXQv1=8*}tZ>p(gviFYz*We-C9D~z z^IbfV0C$egk_f2hNRVx(+5vTs*cK~+`w^d33YNiRm?=wH+M-DUk3o^ed+$sFM{ALx zhg&s0uTuJ2!=%`fD?;#}vd`ne8aCD%sFn*D7-PPk*)iYtxMXfMfa_1iTHUnp(5k?|*#c`Q@l!NKQ<2)mXr<=rdP z32@GyiR83S+e%RJ*zIZq^Y6N<5Y+p5SsBp|-?o^|bu4iAwyLh=76%)tFfq=bcsfYK z2O;gv?GvSe0&Yt3>Z%mJ4oc}@bl3R9*GpbU*?smJ&L*UehS!%$nAP%gDHeo;4xqrF zf;3%39O5QBk)wHt=goiD$vH=b_}^R+;Jw^PqcyMdr zHxnzi$Ir@P&3H!gA~BrxE^?5pViOsYwa;?D`q!1J+s_KTZO>hnK!>2hl7 z3tp#7zCf1U};KN_%DQPwz?Lka|pLMzimXO zigLahs_&}glZ0>lU^9(hsU~8)=NfhdiXl9afZK?a`5e{xy(9Qj6)HLuBpR6zA>9hf z3fppAnvm6(Jt_lSPmYd@-7=`so$_|3435Qxol~BJQwyLtp?I6EDVZSbk!|}hoI78) z=-+a}KfxjJEUUOI?C}YL9#6~uCj7T2+w0XW59=k+h~n6i&a^~lq<9)(@aw1|VNkf8?MA6=ECalyD(2Z(Qcu1w=W(I;IkH?9@#{rj^H*KG*AKH#_^aQL@)b9_H zg@-%D;fUOLz*u#_69fTo_oM=Lb1HAQ>(0!Z@D!pZi~PrK@t-Y00yoyVi^q*nxgrsw zFPlMUiJVY&ZeTDLeuIR7M~a6$O6zfE^4n~q@)d{-{*nH%)QQT=771)dC%N97BVzgm z4mm771*PX!f1-N7KT`wj{cz;TN6mdIa<%IMv{xr*yre?1jix znI+|O(a*hFcVE>()I@UJd9Qv8tb#N(KE?2H>5y!nXyj8gX*Z);nDGxj+(n+RQ(LdG z0X(ip%KV7i*QQXS6DO`tOgb}i`qC)k7vjC%^`E?U<5!5^YXYZg?>&>n>^hB`6&EbnOPzYalAIg5MPuIYIyueYfRu(|Gma_J> zjKlO#`v9~+Z-|*9XTE`KF$ktf{Wc)YBOxDLdYUP!v%v52c;aCfVGf^Sp#E)I++6#vEaO*AJTBlHO4Y|R7jvrWg{kgG-c4vDG@)L?%z5c|J3}ZqZ zHD>=RKqxr_sh*2Oo7LqWLFTT!U4*}|a1&mqGMF%k%hO9gfHKt<=Hh%I8YjtvQqBl`9^St<)ueqra~%`3623VP**Amhv*7K7Xrm&w0CXXhcWg&S ztV~6o4i3eSznQ<;&U+422r$q3g0+fzZ1LM)3=2>VM(y}|)pT=c6LZ!{G?NyaSqphDgFT@`PGnUeHF zQLzqgnYVSlqyc?7=T%A$F+r@{h7eNgvjH<%9y!0u^yLl<;~2)I)4(b(i=+P!gDaoz^H@+=zM46G#IxF8K&(!LEsX+2MQ@=b)m zgCSLn0Jxu_bCCmWiY&ou^|2FM^mC*=Y#SUmGPjt8NnOEbp#j}UWm6ra?fu3Vib5I} z72gc&ySe~0uFen*k8_JemCcY2LB2`xKj+7y#tpkikh(z`@A7NfQSDf{+YXlZMyTbv zGRKwZE_<1qP}?XtfAMfJAd}Xs3{aB>r*^e-iLRm1{{s}?PxH>(d0HTP;Ph8GH zOKVe#%{0(f11b3e;Kqa`rAQ%ZwjwL!J0QI6vs8mmMj7!$GZUzeo_jRk3tkvF)uqw% zyB-4_tw1f2FB~5q!77;KH^Y(?40YJh;UI&~<$31eKplbmp2tHo+8?dpIQoIZs-fw^ z$5L9CaHAxXX|OfOfKTQ#B+wrYnyJj=bnn?$AShUZjK@TLbqJ2nXrFMLdJ(tPA$-eYqyTUjUlRDlyl`@T=8PH#VbS+f_f^VsK{xQRL!8X;`& zSj{gKEVW+jp8AYB=u4v?GD13l#kbP9%;IpEcO%RIJ!aR7Hh{rAEUu+-J;0?EYZyoQ z2Sn^Q5z!Um-kuXPS%I!T8yC28zVoPqa|YK>a<^~U%*b{e+JWm&W^SVvCQBk$?sw8T z4{W8_PVBt5DuHeeDfjc38N2(LYAo;5LmnAg7<{+<`yUfjT5j#_cBtiYdyCcbmBs+9 zjNu8Uk45tz8K>2nl2K$i;`u!@tA3tu5%q)Dm`iHJ_=JkOf?8OjPZWx?upjdCNzfxD zQR4M#C0ULDm%q@ef-6EYROD| z&{-U7SE3l-6XH!d>D}td7fi7k;+GdsZHy)6LqF5IbxwkFUtW4tV!@n}w8gAf1NsPh z3}LYHLF?|r!D>4l%*-yPxz^KRI_VO9W&MN27Maep(0i#C;c240A&8d+vBT`^{!xeC zmnQS}&F}_gFDZFK58~kkImYp8*T;eA8?{xE^INmP0%RvBdDf2juO(5$BtVc!4&tRD z$n+7wRSo*MF`#Jrq~$<69j`?CVP#-wrpap8u3d`tty7-P$uz4InXd!E^X}`I?w5>O zLy|FUl_e3Rf4u;+YiFy>Or&dTqOX~4mv6#yn;!1e_h&sA6H-l^hgbUx(A=5(oJ8Q* zK?1}9ASo#ybg8$Hz1e+nlOC{RP9|GxsisQ_X4h-WbEks5g|&1;HRQ4_%WbdyC?2)7 ztf$YPZf(~_Q#nrsH>;6<&k3)P+67F+@O@AH_HAl-p=y-Jc39d(@f8R4hxb?A$1lqK z0(!w;PE=lFr4YaQbp|BOf7iPaKcYli36)84+ABUSdpB4PXIpzK@EVyvWF=#{!3$!1 zrKQXb6Wu{$>VG{aT=i&Hc2I@V4jd?R_eiWBv$d{ik@aHKY%JaRB>|Hh}<~3YE5}0%Y&d%;dkH1Sq#yz(vIO2}}vS#Smeb?bl0_#-`%gxn+lBL;Y4R zPKCsiiEGdlMqIeq`-T>+3QdIQSTD8J_|j1@-DzJ`k5jd_k)!PXaB~(au~$wDS-`U7 zqr)4byq31Z(grW(`eQ4}&nqg5L;g+TYw+uZ4}S8|xrLHuA`&Shv})n}DiDAW+>l8Vt(h}t946wQ<~&Xm)@S?%q$hwDV2y|@~x2A;;*r2fFA zue7xj@3_f*^5^9s^AuOU%PE5nQ0z;wurQ%p!d72X3HJIHCr&4_sPlbM>?Axkx+90e zekkasc7U7DbNgpXRL#zNIRQ3TFbUP}d42R&?lLl0eW6R`DQqU{x|UMqJTQku1f`y4 zvQ|22vl4VQHT4V@UNyK^Iz^$deazPiVL4$~^JN=*|&3&{Fjp2phO6#TboDhlmx-?%H(MzDw(H{4htVuC<}Q9_^5n3IF~Kn1QVc|OGKR5JVIUR6>WKVrNv`u1bv zsFZb!M}}7Sd1npM*lUU!} zPQ}XO5ZeBLUZ8ehXt_aLnOn#9*_38*k%9qrzZ(-e;}-UVOzss!FpJ@c^77gSz5qtg zJ>IZ~0p{E>C5Yt!cVzEOobW~_xF^IrxUH-lJ|h{_f@e``^sJ1B3-_{k&|X@rpR9HJ zmaNM)H7nQ@Y9ynpgK!9}XurSm<300suBXjhS+>3fQ9Um~aWfRv;~xb1Wucb9a$f2` z5!3>AwXmeTs-xl_kIY1+TgYR5@^y_SI>p#B&gV0{!AaB6GCduyF`&?`@lR4bVw}dX zYJ!$_@})9l!ZTx~9NWJm+j4UgJtgJ}-fgS%LnQVPV6PEee!PBXp#g!@Ea=nmDaFPp zo_a^SdMp_fUyt8+EbL=Y${)ql159pYK}b%W3WpXN!_(G9 zw;CTbV72S~oX!s?`~*B;Mr2dC)=|xNfc}&MxjF;a8BNdOb{K+ow<=t>h_c zDGV_o=XcW>uIlDk|AkU`o4$zoyt+QEPK!G>t2*IY)Y+W;&M5b>VwjXXAW8aH3TWMi z>@^xe0+K`3zuke%{#E&-b4mla_ZGTGzNBKKj;0D|P@;lA!e?-)t^*Ig<1MV`7g$Djj_i19gMu_tb0n0SikXzPa${-Ddm*e)TI(|0lndV zpA>@Ca~~`u=`F}Rl|tZD8q~LhA@wP;POm&;6$>=YjJC<`)j;Ot_!t6fe088QdPFKF za<8jN%#mp+Lu8Pj@w7eG+l%**tFY_FCr<=F-q%Fh&y_`h+)*bF15;7v$&=!_83@U< z^a1}cyX=A+w*uBeEcY{>VK2f{SHbi38T=Y@^!sI9VBH?N?Tkglm2OystZP?y1PR1O zqg!kw?3a9)@SN3LUrq|hy|Hq*&Z?Z+uUA-EnkF`N(=uAfRO0=tWhLW6jzh{cwa}6z z;t$Cjf3|$Acx7q`e|d$B+@T zFXT+fkmoZBqBlWrmth>xF5Sb0vZqOU+^4>ONs~U)p{+8OCMHpP;;c;IlM=cHuD7Ti zj=ex==(bXr%+cJ*X@@VoSi*T$gxF$7+c)~0A)WTMr=v*xlVDFgz&Iyyh9^9@{> zz!t~KlupXR$M(-YjV!IMu-3&0;=R)N)m<&ItOspmAS()p^b>Hgv9O4IsjUHjI7k4w zotxmM+^>oVpQr*K<#v}!l--^D&5Wl1>f72-J&{S(eBRUG-ek+&3Nh!+W#aQ&6q=k zhrosF@rw^6r0{*ZeF;Dn*&LATI5&{gMz{Om`+}C_by>TUW&eBq{Gjc*;|I5=uaLCr zuNIYR7n}}DEJY_Vhg&lPa}4j8ji007QS9uqN4J+e4(B;G2oojb)g89g?gO=o@`6Rw zWb6bUW-i+}Zmyh93%3We{wk&4h30cSHXRj$dmj_&sdaA+*uN`iV?b_?wQNlvL;Eu< zmdPTWYAN>`a0=j%i5WV4JfvqmE~4wuxILnzfTlr0RT2a?e_$sXZmCrNoSIZ-LZ>W^ zl3S2MN5U>+u0}}CBnRqeZ29mUV5Ri+c@1&d;CYT|uG=^`UdS`JxonApQ5;C`VW$G2 z>uid%(wWKzwVQ~eP^R2Z{aj*&H5V!#`!ws2@6^nL0oXjQ%fDeOUc9onNE4@&H5Zdm z_%>cFFV4%0OH)@!$$r7(yU+Qgdb0l6s#x#!4nv_eM6p{m9Q4b{)VWSx88SQK$8;Z5 zqfJE>m)=M5lKy_R;L+Vr^J_dKgod+A_$}+6L|{{lsjqHPJ{+)F-5ttqZVP`{yk|ua&8f ztQW>r{Uqd7tsGQ~D;)Yb69gC)shQyR$~E=nPlVkxTIg^-KFXD?-onST8=po2OA$3TzA|rNYlM-;%Qd>+Jy@<`a@(EXIIK4G~UW|dC*uk?2I^nRsAz>CK z6F=#BF0r-oo|ZWH9dltLdPLDcpD5T3Sm$sF@_NW2b(~F_Pde^>Tdl-Fq*aUfKY zvIRMt4E#b?nl1x}QfH)cL(K!K>!ISnrlWeBeS&|8=bR&0Q0?2Y= zSUi3SgeG*v6oO_qvne1+KJCt_9%2ySd$1`$7 zlOiQEthsJ43-72Yth$*hB$L{GG0wD49E?O$Y~e#Oc-K^tB)6%g(l;=U4kTe%-p-r& z!E$jQs89`fWLkv3JvN=y7YsB;s)TfrC`3UR$nykK4SV{Hx@@-&le8zDu~RT?5DCzH zs9~FsG!@x6iDao&DI=}bj3kaFqb;ES(3KLK^#fkHy#eKTw&Ib>;8vKB9#q+YNK3>4 zxS1eCdU!%;NNLA$S-}&Ut4sF;8#_CPI(A*%!?z+i%T}_J@t7T3dvQN9HPS*1blQZU znzr;4rwtYk!|zB~Llk%wt|Yc&v@VOTx}68Z_(*HH&S>+m_MM^qrJpEizRS3N{5?nA z<{cMeAWD(?1hr3H;HpB5T?SS3*0I|}hj_|Ffl9C|$D8CTP-WA5PZUm7?1mEE{4G~3Roy#6a+s~ zu(F*~3o^Y}lv=O-Yv1=Ky>FQ5IB#*M#y;*5k1^%Pl0{?<=DAcyK$XKCBN|C32fdk) z(a5&iZ$Wz3@wFnqMrdc(;232v#9Yu>?(ipiIuxUA;@j0$%+m1_bbg(DRvH(p3S0ig zSIl*Mp7f=?ll0VhW)r8wFFD`P&ak;5HxC2}gQb*a3z&!GCP{>LbaZwqB<5qek6t^ z*dcMSj67#z^Mzkba*01}A@{0B6HHbrNa4h$VN_Y?XZC7Yym+YS{#ajese-FWWTFT= z7YK!qUrzL%G(%JK=iG2A{o1bRy2HJ6Qgahs@EOu^U&}5U&$>5vGO?_v^0bU_^=9UD zal8Ta=Er85(gd8m>`1!dB+1=G`A9Tae+0e0xFsavygkYbDtGG4xLj3oMZdzV3Mj&# zCE(hi_p9wZtDimIHmKj9C`MZDtJr6f0a&n`5&(%9;zL2n&c#81X9(4or;`&)k8*BA zy+p76sHYI-wiT)OWFBf{9j5&Q!Y;Un5;~(#(zMf%BPc5hrtm#XfBCs%sIb+?K1KZg z@o2aVzTv%AE({Z;CchZ_oG>kJF+*!Jbo@YCvaQgG$e55r2lJ0KU(`^-;+eT@PBZjp;5d05A8b` z`t_MJ!6QZ)G7;BK8KR=m857HE5tFLlQ^}(3QehMI{tVrV8Fa&=P<~%Wsw_7p#oU{Q z5US22yNQ|a9xXh&8-irZ5NdxHg1ZVulTIsl@pX{;UfhMGR0a7H)30?BS*|^|{Lxq~ zQdIEa2oz?1MgQ~)c!db}T_ZTfN}LqQn<5H%nwhnjf*OAKY>2qNK! zTjHzikY3uXtA=QUCWH+mO@SQbZ@snq=VaZ@RN8#*o4=!EH={AX>vOk6WQnGaV@akp z0|ts>hwl)I;@;+wM)>`1?c22tn?Y>URe7PMStZ{Vs0~1X$`4AMe-q37C6P>3OxP3e~J3Os6yXT^;cRM{24k@G-}JQI*b=n=OlrjmGi&u==?sj%`##1XDaxfm5@r zOSG9+I7uE4xe#Ozkd$;Yb(>9*GeLr;BRedQk{j{fiZ3QGp6;2|ODN*(tUoHBEF&!( zYcF4-s!I8Q{(f&j@AZbn$$=#V6g$z;Gg{837vbPV(fX4_bNh&M9cE8HdWkK4UcITI zV1W?OVfDV*%!njff9X|~W5cyk{c`Yh z+s)ITMv~{*K2Mi{q zbyOeeD3>So>aATE3CgdZ5VF3H*v{dMX^qqR;~H&T?0By(5+U*)3`6F|pu-EJo4o%n z!+RsMTCKV9GvC4BxFVk2O>lO*(q)n7K^6D%zh5wq2R2&=u9=nX1I2vKOy%2ss=69- z$oHM?-L7g-JEGR;gC5W+XLq) zRAQ~m1h{1-UR}4tx|rtZbM>eMBp}N_Ul#C3VyJbqDYN2zpkHEa2fbEXg=yGnOwdNs z{3*ma-p^~!;$UIk#N^s8CU(xWK4LKI8&P0~T?a1D3%W?ZvPWiA8TOA&xLq_Z zBxvU_Hxj=SvsfLrpc$gqy*1iSFgtcOd#?23mi~90e^cqGi_%n~r#jWx9lFrbLFQmF zLf~Z96^pv{*QI`p8DCu7U&45|Zo+146WjoE0|iOw8c*XNkF7shjtim4wLC=@p--XO z&^s??fv2Xd?d;&fq3h{eM)*2+icLp}4Y)){rqgK~4tvgVit|6X2{&jAqc zDe1*38y+Kb?N2#FQ#cj#O}^UlRkfKu2X9*m?3I{GbuUV5K2h^|*2t|q?#DtHO?7RplDn`pF7e~ zuGzfa8TQNgs1n&sewp(aA0F;(Q{TsH@K|5p?6Srv*!j|hp_^#)cuT-YizUF^sRD~(ARz?U{Z zUl6l;n9=l8(3u(O|1TkL62qDRGlLuZ?mE@{?)=iVwflL0EuN}hzKni3s@C%C>(4uj zxkk|~XBTi-&$tw7woi3?!JIXWFE;j1*oi$s+c3h_`%)9HXA^wTwT~s zzaF>yF;;^Y+0UngfhZCxVQk{HuC3v>`5hbKA||t?jQvVcRNncu2j}nh$46j*c`ljBpKn3An z9!@}Z8zbL3T^lg?Bq|U4$<|^C`ot_B%8Us|OUN>H6CJxhcBSY^u8rUCZ5zS;-@!$E zSm6Ywke=G zQ`j@FRzquX<5om(d^8$^VaJAHQwe^N|7z(XwaMi_w4&3DR3Rx{JI=8eUhgqq75#sR z7yzVMf)?f_#gLH2@QfqPM&Gd)7WSuH*QU5ejSFP0?enB@1E;XPirjAf6EEdQ%2|!`iFR%q4b$wWF1bv* z5u5?J%8tk24g)@XAIn=?Z0DnGg+yA1mC?;pgIIOL_u1B4h1qZ>BC#K^A^lQDF6pDC zLmC3cva3~$xq1zg|KHar;TOpU>N`_~1cuX^uQa>jjbZK?G`w`+WwsR57Zl%DJlZN`M3$7E$>Wfe9gz(GttZ1}!xwM}Mz)7*;2;1y*&w|@){Hr%2V9GRZI z;Tq5>38cW)JjPrBM*sJd`T*n6bto?c@f7!lFXYqui;UAe^W?fOabe|4iQ!VmM+Qw! z{JrSU22Z@?^G8oEr@)_P5S0aWmop~=v53QH*l@XlPO2rJ^W3qvW{0=z@1%XL-lLj& zlPM9Y>83an%pNIo8Zh%W(rVLxcL`w!FgL&GFUl4w;qo$_R_m;Y22?;wMMDN{8$+dd z@Qv3prsL^$Np#tZG)BiJiOu*o`j-gm#@7|6)eItbOhJ?pw0e(T3&zVONO9%QOjMK< zY!rAAiuU~P5PpQGIzcu#i#n+#&Ty--RN?>kqXxu(l2#pGmOB&(NAs(&)J)lA*X+rq z?P1Sbq}KK4wjS_2t)GRU8x5c5I|du~n@@CPC+ri*l%(?|YvvMpWU@{U+YOa1ZfHiV z>~yIO^&Gw3wA5_Sc+}W${h5hS1+OKwhQlI7so;K){;$qLpq?=R+OxJj0e=1=^fasb zY5fK@%_Yml@je4lQEz5|(rgI$t1Yhk4};o2FA$F$`B?Ih_o|k* z0OFWGdeFDdi({ale*Ds0&43fP8*Gz-Lms;@UaFNw+ zK#4z3@N7&)jJc+M_BwkgjaPpTPkzj-l~d3|mS`#HDW<*|x*vfwhXdpE$oU~BH#-OL zK*ms)Ma7E>!WEw)hIpVAWa{s&(i(0^u)agZ{4wZS*~v!fl0o-#l(1h8oDn}LF?%aX%4BbhjTOq2jT~)OvWX^ z8GEh|mi}jL?Q#HUwy*HS$*+PSySI>kvw49ns*sU?qqpK8={AUA&^XU({xX;f)m$!V zwF6JedwJ-Raj4kuUr%j)O7$bFBC6%4!k5h?V=0(FUJkIqRLf=zB>#8!dlJCv-^is5 zwO|dG&I9^7n3Eut-2rARHM0ZbdEcmbi1S^asmRSH3>|YZL-5%A8ags+`0CQdnpe@c ztq<)a#QA@*4+0hY4G=UUyuqxE5fX%xYdz1z`Jh+GnUwZ^c`fp#{c;QjS+FkiwuB9j zL#1>6UBaMPnH&=|sD1$*CTO5fkrx9Ep1qk?tpvS~^6&0CC;(%yOU(#Hg7-ZOv$zCi z>CuTdT{LPO>D}IK&+#TA$;!8D7R}i9X^b`Ov@CufNcQWok)P}DM7|?1(%kRQA<%2I zzu{k^M)~LXJ4t|?_so~rmxPRIV{`oz;|oFml(O-8=mH17$BzcRFn6ehuZDOW#_+P~ z$oPWLI6SzeB4`5zqaA}_t?;Chwzcm6&>q6zPgc306L3)!<3mB}bu+OZmTpg%cWPd; zb~npTQM#_2Pbo^m)?mq?_smd$LMpkN1SzUxB}+##RUNAU8x|t@OYMl2A3eSb;U56_ z8Ud`85%xxnxgbf@m*MM~3U3}_I_L)tn@7vzb%y>=uwFSHM3(bA<<<-%k%WQ+sL?*Z z6nJu}`D`4X;$l-}T^kn?Z@-W+{^uLMrUNEd$q50a#ObTu*cQWgD$>^F<8@ZCaJqhZ z-fR?4{I3@P?O^`1RK8vDqHgtuch{GFk|AyPw)Wg?7A{+K`=#Ynyc;{5=@KpM|759z z3UJ$TY}yG1C?T0H!)d(hun`kJ^Vg4@jU|t<$s5y(nknt3|ngD>919a7N#XyNG&BXmYiykHGI25x%S7wvi}=VCzwRu>{SsFWhUPUR zo4F8MvaFyH_4=*ldGKaZ8Pv`%SPxzQF6-}vf{c7ND$nhRT18Bdq*d?!yY?64tmsK1 z89)$zolhQXk*V&X3YC%}6Y>))YIU3J4mcp+otU4)W0}F^5s1?G!OQ6c^d!>BNh;>$ z0+Tw`cF0f{nEx$C0(eWK!RQ(xL{ur=ulHrk`tQQz0&M4a+jTD&et-{$*@||hicPI- zr~rn%Z4H^-wqXt3XDkUth~vuxPLfv9YMW(Vo+1B#7!(W`;zpJES6>jhFHR$0qRUFW zd+v~h&GPlU|NZMT?GOT-ZwtQ_-n&MLhz#5Ba!x$fKsCtt>T2)YG1}a!7%zrK3-&*? z`SnM9_?&CoSRm5cor$)~@X6nF=F9em%frE5vuZUXjBG^I6e!>#>s6Sf95}@>iCHpT zT~(>4y*M|lI?lKrT8sbB?*5gIq=itEzTOknVm?n}BmZ^)!*_i1dK__$9koZc2bTlc zVI9XpzXIQy`TM#?cHFn3n}unp$hdlKu|~N;Z@EYQVxlh@T2{2Zsl@1p9S9)*FTKw7 zXYaU`ewG(}ycG>Maj?s)ka%=G8EPxoy-; zlTGXi_H=QtS>#B^1kcv@^z9( z5|UpV-kDwvaW{SceR`?f-JjZJ^NTRE#a#3HVOj(VX3*SURPb2`EcaKVykZ{`Nfg`d z6L(`9K6ZTA?86g#DIs@Zc}rB-Pm!#AV}qrjSAZr9qBIkEQ#j*mHujynkUjAfs27}v zN|nX^J>^q4fH2-xUk;YxeQ`LSt!BS3kPH@&*p9uPP3hSQuE@Au#h|lZTtb2 z@ApK3H0OP%u1`3yvY`Muuij@aZhI&Lgwgx1lbCSJ(w|Z$y{5b z^CjIteo00Fbx88hAN|4m)0DJ#{z5?dyyV(Wn9fbKJKta8{MuAweGEgsRST*zf-Lh( zD!Yf)a&jr<<|6&FqirwRoiQdr%L+yIcO7*=!?F%`a?kupn0Bso3-uncc~Y91jYn#n zBlSXmrTSBc+=dbR?$oSeqzWK@pMX`sCMy*Of2$4?eB7uzlHCy4$O2IC`4!d=&7Ev7Z%rlggZEcL$(n7_J}|i%Gnj{nK9F(k|(5 zW!ZbedUN4_|C?3=0?vAPA`HMu8eu-YMAp;v;LEkg*qL$m zVSV!-C>1s34?_1~Bzk4(?4jhEgT*70jUctu#&@)s5?a_Al7F?#@=wY6m|p50>3>{G z9bO_p8Ar|Pw10k{S(rc4Q4o3&eKT*kJY&ia@Xn^5;-$wo zwmjU+vtgr z5<`94%M>g|Ii~%)%9ZVCc@Oc6)`!0$EBa&P>Ku51Co;re`jc7h+(@{-;G-I08-_ao zQQh~!OgOF75?RfT&j_%g`22bfAwy}wOih3W^YDP5k`bIg{I5~^dI9P%si-u%^8*yN z(+S`O-_7Xqb-^31L^Ydd%V*Vv&c}=`;L?gwys{{)pte2M7Oo?cr34;w*n=f6(JG`|0&1URLcsz=>i~rMlAk zSyGs}n8Y|o8VHam$GPCoRZtMWXZ`abpa&o+%tqq+`MVX=pYEu6)7K%MIg!N)&_OL0 zLx@1o{7v(t%nm=~dn?_(oe6vbdS*0CDQc!hWM-bic0)9d$+L`)Wyycx_1mAGqD))D zcMGvXi$%q(whx*BOBpZw;FX^_&Hq3ziG{rZ{9gAzQh`_HnS33 zd>WN6kB9PH?<*yr3>O-!!1!vN(?7wSLa%j17|l%2bC?7~`tt?2HUF2=0^w9=urM&W z4C^D$bx@(Gv81p@Vo&?w7|f054Z4CFF^~8aKcqc#$>JF^he^;j+QWGZAWT37f$r&CYF=KLGBcW zc6JqucW^G|5#wo*2=4wQKtD`E$X$xwdh5pueLEx4zdCz|0pOz7+zO2n zFyK!tMKW3TNeF~Mc^S}$6vq3gz!lNzRB@BCX~Vdt-LmrxfzdrmZZ8IT?3@qqn~1PK z;1p8>Mj?#8P+u}sHL|m#+wyO=js7oTE(D2730Qhhc&Wx*l`J?cH6z}O_zXm?`+QRH z+0L?*CriVVs?iS3zBvsDCv8fpL&tQoIFru?`%H=O@Dm{_ISN+~mTOf1xd3d2Kjq&a zbRz`#zF&5qJ|4wj;zul}H{1}|y%hYqq*zpTv(G_jCs_VK1Jj9Xi?(XiE?iP@4q`SMjOF9%3X{5WmQ@Tr}JMYHl{q7(B?akRM z<{ER%G3J_;1I}TuWkvF&o-UIM4Sz+!J+vRl1OoMKSI)<&#i-Zc!`_2C`@Y+Z0XLj_ zWWb^j$6HR9)h_>x{?zE1_g*KNEcqXEfe;4~8`9;tttTc1?1SBvGncBG63jxJ)Cw=} z%gcs(4Qxb(LxxZgj2wi{wTA60UbXTz6;ZS;hI}9?fC*>u6|Vg88C2H(i`@Ba!1$}r z8)l+ni&EDzKl=6$E&M^0w9(%^wjA=kYyhO9Q%#MM+U&7qUJ_NX&V74k1(7Df#;OJK zvy0k^CdCVXmo4qB)!XdU|F6FB0A#2IN&>As0-!2B&wfO?jT^@3%GD+UbT|NlAu8E2 zq=I0S{&(PwQg(h2p#?A~tuuDrLK zVYCA$uB65C_o2juiXlaVj7{)Hg-PeNBQZIEGqKYU{7ZwFrpsz?u(xW5p)4Acg;T5ZfQddK&F5pAOdfIxS4%a-yrj{7UAzUQZ=S~A~1)?CB_G<;cGy|{^$Dj_1e z!|#6y1?O8rL1fi!DE*~laY;G!8$8~8X6b(@`|b^~zx+NSFXfykl5aaX?>)3A{207u zXl=cC-Cqgc?xPRl*b;@ZA`kc-4Dgd-R2JHWNmpKCD?8{6s9+8L^h?i=I&!M||Ik!m zJFwFaqLC^I1tEKxyO}=YIBP^rYyQ3Sx^!-sKV9~?qO1j>onb*X0xQLH0>?0tK=7TC ztJc!jki!yAjK5V}>K<|w^Z&>Eivg>(d-B5cGc35#T}EePjS><2#_#HO#of=|-kypI zZ4x4Isa0c)(sSyIb1bANz=!>~WbP)~j1AS{%*WICKhg)tNnpZ4MTS|i_jON!mtbvjtURp|7e|0ObYm?>SYa)BIabbP_zcO&at zj{&1{?sOsZ@v&UKO6*< zfv<`KHWq*F0GV&vsdf3%DWj(r^Z(9~{~sz~0kZLNJThGXNc@7t>h(*k`KoOHbi7s? zkL;U@L%qo4H!*YoD1%gHJIG`v)?BW8lsy*4+4cqR^{Z(4e*jxu7TAlj_~X>QNJ?1( zyUhb=7TJ{F0z(}(mecE*+TQtL3y~6sl0Y(}ie#@}Tod!!5>9DfUi0|^N-@^o@YZgX zZofD^Wyn7M-x7NVWJ^Ydtr^Q=B+tFpCHjwzlx#!0E{5yBZ^EK-Dp%b%S~MdfK++;; z-B_>u z7Z}f2=;U_P{je;V7?KT??)(V?=ndI+|1q>wv@|xz=tp!XMea~>5~L)}P~+YI?QsWF zASlL-M({SHN#}Ztjw8Mz$Pny1EFugm!g-#)kiK^KyM9^*0|{w9HDo%#npb6$k-%GW{hR=Zm0{?wAxq=8WA)(;3YmJZwYG=LW_Qb2kgQKRXRkh@ALqKsm=DU9j z9O)XVL>OrxpHLB(y;gvs|H+H7-u7_Qyj3oDY9&xLrWsO1@V`;aN7;~vA`WSX6OYTv zaeh{FOjc55YhrWs_Ed&TvkerS$e0B{PDQhs@HqetH#qoFTd=eWiBXS4Uwgl_^o zZsHdt{r@G`fr$X_F!~z#^LsmM$vWTRg)mG=-_h|$22lszW6#qkzlni({=bBD(?Nwv z0mx{b%j)gt$v{%qC^OJ=O=I32^nHpKQzy-+W*qk0H|hU=qMvlYiKO4+fz(G{4qof- z&|4E&8cO<)B#lQG3&2VI&GTan#Fv)}qy z%>O=?MHeWPvgn%5>DVnfYVPR1{-Wg5RwNNg44Ozk_IN{WIbNxSI}{sPAe*tezV!|) z98~OFt|1hZ6oGx8Hrjl5W%@Mr+ zp~-w~D8MEG9YsuKW^mC`ZL#_p0nX`n`I(!;FYdoymkp0;&(R|^&u2#Y!)s(4zg$x) z9PPhChzWk4|J|iBl)Bt;lO|A>{%^~6=7Alky7F{h^G`|y>VY!AAm{KRgwaBa|G{@g zQam6SR-(Wu%4lj7fR(+N9ey>#%Rby)v1IXWJ_FNWIiLD8?N-%Co!WScOI@V=N zfeY$AzsEs5XiHwfznkRjT_-@b zOAFpF6pN2HPoD0R+FuK)?}?;Ub~#@?9M+RvAdaO!mEiJGx5J%zdO=xJOHu6de8-^(XqrwpO&>M+@P3%CW zfbpa;jKLjcBg-cVpQvy|g`OJKt1-2y9FmAD-pr#hO0+ww7v31N#E;Hu{v(C@k$y0; zONp`)o&4(weqUp;^p!F+mi8~UHSmyfI}W`@sY;k2YbjmL;A`}8A?TeDUC+0Jp#m3# z#;A97B)F8DY%k$X4skYx7_2XJY$iVQAglx9s7#hPc(BBmMhd>r!95q!`0CmP%H0J2 zZ^}tu_z9POXa32Qb(=eWXm`18Ip}q13*46yBVnOqC9{YW}BUpvou)EvGJp&8eN zM4F?;7wvcOq=Lga6fEXoE4Ko>#7JI;e-8iI^P6vupp)(C7$J&0hHC(cYLfwU}-b;AH$N3eMo^lS6vi5K0Kstr-L$C5;SKL2uo zrUbjuySun~s^iBseS`Tbdz16whU336l@2(a@^2N)zygvD19tG{p_(eTK31}8Obnw3 zjmS5D<~>*h2h4?N(1ZX@g|npF{5fd{;N_|s8#RH@8rjzK1<~?sHww&%ive~ zba5CXVH&rVMhfH=D2-jImEHQq&{Zb%&}XvN;A|%(HCEi-z$`;?Bevcpb$+;y4rypu zDaLZ!30(DCj}&Sle=u@=Oi}vZ>4Op(6cZx+snqi29D9QbHiGIP(&;w&zM0}2irL#% zd|vGM=F{^^$K}g6omwmO26E5dLA@AL#0c#^{MxQWf{VUJ(B0H*Ew9IsgUE_@{&Q1S zK4viE?i|hf`P}=ns!z&!5uEi0gddTKpLZLug!6EQ(V6;iB+w7LIkk5Jh;U`9(CFCf%`iIyo_!h%G)lA4Ov8wGmZ3#3KHDoBik&W_uGGxb-(pxu5_b> z=xG1SyZ>#xjf94&Xc?lTw&>drA0?f+z`cv7;-uESC_yIvy)Ipk;4`|&Tg8}Y6C0ho zhY?mMLZK)>I-yL760G!IdZ6Y0=54$Ss}<8cOh#`)Qw^HP{r4SBS+zAGmEuua11sIW z=%SQJ1mhGvZ7!1j4VCx6wU8{WY+`-yKs(FSPtiLJQ5key(ezluw^-O-=_IxH@2DumENwO~>=f;z}4r;yUWx>2%!6kR+oHPNDG@cba{BdKijN!j#+ z@)M2+jMtz$hdRNX*XK=l=32)8P)Jxpz)fsOnM8+tu6v-$xqhdSeMR|dZN2FnKcFN< zAeF+}6$Yv98)Q2!jR6{h9~Wy%c?@?xk+0wVIWnmx1cm~QWuoJ?bcOJ?_TyN{mBd9F~*L z;YlP?A`&z0CVePT$Mhq1u4a@V$wmAzYFhRez<_d4W&-&9%*N*DACgWD1|E58f~vUQ zHq0ag(5wfHdZ^}Pr)7*3ja9zyR0v=5XKcNG4#T>h^~WL{nux#q=x5{aLG1&gjgOJevhh*W7qA2=S!$?Arx^V?K@qAQ!{4m zKpKMo;z_ti8Rvk{{2$ds5E)<|8#3zbpXfJTr%arJ;^FvzT7V*R4V^Sw??a}(rchWg z0%c&6us=P6Et{+j?S#Q?uf|P%e*uFYy&3dCu~NtGeSU^If=P6NdrpPd0C(tmyRJJ0 zv2Zy=K2eV%gkJzUlP8PS3c)N;abB#ydbPS-K=Ljx`^P77C;(bWm&sZ+_RR5){pZFX zB_f2@&VR$G{dIpm31_OrM%`fqCTZA*Q&|`n{S27Q7@Xn-M6b8B=RTr1!9xPi#TG0p z*4EbIcPbGnw`;_19k0^fwvo&cQn zUHZdvz+vHg+C}Yn2e;=5bOgN_3oJm^YQOek{;m6Fg_`SBnkeG!>p54?tQOyd^K%Hj zBu}OK5SA@LVYxRCe z1*0>yuE(_9dmOmHL8Q91dJtFt#?l|Ml{@VvG6ji2^(f@ml{(Bg3rjFoEnB=C_ia|h z?Y=FbfRA8HD=O0chZ0mhRMLAc0be~Sv2P!jW4!K3AZ^Obk&xnXI1vLGPN8NU}xz(r}^!oHruw<#ZkIAn4ndVvR^L33`30QC2wqz0qZLGPb^dh5mK2d_r=gD_U-d@|pRMqgs;f z*D?3e95}R3e-E9@d7qR!>)BRAo%C1Wz*@6+4(>oon9Ge`($yQ2<#ihn`si%nCuG2hAqedm13 zqYyF!Bx6DVA}KJxynbqPV2Zmn4JzRmQRMz6yW{g)m+Q|F_SlX83(aH&{t`0<*_P7OwYg>;cMtv15Nk-eQE zXEVv1ZME8W5|$9D^9>0Y`K{g#TLb7(`=lreao*mMj|KIK1#M%&2%xFnxr~jU&UWWG zS*42nJ<;Gxk8s3b$5Ooa^8x@4WG4BQsoWPh=AJi`FW}{cwnA3Q*^t21`u9!W69ce- zw(hhdqNkTdD`8DO;ydyR1JTgOjYmS3+x&6YHM5Q|TP=GW!^L=|BVK@_%K63cj zNLwI`Vp+b3%z1tCjqN4h(VY;nScIaEJH@@DbAn<65lV4)fmEco;f)FaW@6Id?ddBo zR5%^x{wR-xfLk!nUt`IUyz-AY(l{-3oX#_7r^2x*A}Go+AhCc@j{gql2PF}BLhIh- z!;bDz86Yxr65x@SDnk`K>`&dxJ}^VR+PJUJE&)lCT37;KW6~P8PQ)AKaYqMfivxC2 z2$HL`xflkfRE~s(OdoEtNxi?N+vA%XwWOV%V$)2yCdlv`#Re$~L6#!ke`_w7Bp*yJ zN)5ws?%%{?go5D#4@Mw`6qEkyK|*+PeI=$<(;T%W5Y)UR_j4L%pi4Rdi=J4foQv%m z;KgwMtu_iALGzT<-6R3&&K8h-AlcoIp3j#RIQnI)-?5#&m4l(QMb`$mo+RaP+FE=#qh6GIxTyA*PBA&q>>VHdiKM`P z^9Lg@qV`-5DZxFBuW--`1Clz?sqf8O9e{lrL^uoE`5jHAdxgKUV`*Qy*jfhQdaK*A zWN&=LDvCK%ZXAr|<@i|8trciUl=H8oXuP~gYNhkOP}WOp$7W(y8t>b`$cKew!~y3< zV7|+g!ErufEK+KL7D?LIn^bPB^V+`vDDAzAwww7dqLd6452!i*j@r7}OtHe)v?_q% z-pVWgQEWQq4V%_sV_Syn(wXzh99-G48;P)kjeek0%yveE-2D*l7WQ%B{R>p7 zQth8WCTlqNL`+9?wAk#7CW;-d3;4+Dwy$23WPpOQ@_s~Od*a@+JBEFyy=1Q{BSsC- z%XiQ1{t;e!z3$MVgNM<(L$%NLnacx?voimSBY+_yHzz6us=gj1*M7uGW+JEC%3Y^L z1V+Wz0K2bM^;eG2$Wa`|Q4b!%Eh&DhXw_*GCi5ux44kL<2opH{5MH#u>7p*A` zo}Eq{2bVpcr;+xv7d9884(+8+O9rpyBZf*e0`~Vgf!XtbYL^Vh79zY zGTSgL>EDe-Uq3c1W|m&}2PA|{0qTaYqb-wYsWIy#JNMC3(Tw^pRxjcHj(mUp9uKq% zlf%W`Gaw&3JmyWop^3^@&p$VRpp#1~Ni9dY{e%KciU)ZssrM#%xf&K-P!JEo%8eF8<%H?z{?_x9) z*Fi_&I0`6=Vnt57xbsAuMJ!Uz3lf<9hGl4qi>j?w(KSPgP2+)_JLJ#;XEg0R`Xn;T z>0%!H^cR-~vV>rGO5jC!>!{HCod;=dg40kD35+EGg2wUxd6gI4%uTamni$B3$X;oN zro-{S(L3Yq?KqYrTDh6l^Zj+4po_P8^gw|q%FhFMVZWjJscoxP_ew-xMMwQ z|N8>bn0;2E{ko%ZrfFA6+dp?u2F=SYu*z#NCD^hjj6XGo*9`5in6)SfFtH&WW0WmG zDgDybzvC-#s{4d!aNV9X0OTwZud1_jZ$kQSslaTp1cxhqa9}C7eB;3a_0u_Q@9c#Z z9A`_Ky{^;qk<>W7&ZXZ3lf82v5v@b_)eO60b47d_VUirVuAaZC9nIpehe~X_*9m6l z_50`C_`HM$zd1O*>2gg|;oF=z& zprGJqc?N=eef!pNskEa>0wintY?Z`4DV;im6x6r5O%Snv3$=~6syz-3O&vK&dr`im z!x!50WuZO9a1OKTN%-tWq&0)KF35SBoSOM?Y5F1GOnYMA69xfVW|PPp(ZlPoE0aou zCN@F%&Xh)Y{Xk^8nzRs*WPoMPzx2>8A@+PuG+qOIw z5Wt))e-JTGFE=@*R>JVRZl$HDG4a5fboZT>Y-|n`+VSuOp{LhgvlLP5!4m{7c;;W+ zj-FPzHRB_$##l~#%J^6DTOxmhCHrofM;(8f>b@z~Q}*Nas8a7e?Z{j)I<0#)0u4!W zxg<$aVu)917(p&tfX3C`+K(YRL{2b`^b;9+xdJC(S-J6jnJ9`03(@B{cD(W#PcqL* zZo3Nv?q-~~z0V(w+@L5@dI9$0BJH~B@6}m3Yg(B(uWT4+*Z?d{}t?{+?jlMo!+yHXzXSsPmOhT0pxKG`9$f3VAU&bw z`>lpyR7CkGq$~Zse+5y0s{Bx9g(itOt@f4Eb%+V5cj>EX?PqK*RA(si9fx1)myC64 ztB4Wy0umahYSlvt)4IW z=E^28I!_7y7|@?`HiNEDcn4A%&tEgf{p<1kQd(~Cdl}C=*~{>=7bR5cvCdU1di(aH zkhj+;ZiCVC``%h+y-U9afvOdhBu>)(tBpmDbj5hz-&J^+akGE*>O~XaiJI2{)%@~B z_g^^k-Re-UkBn*#rx|MYaYVw{@t{o1pWlrz3ZB{pYg=x8<0T`7P;qF{xjAY1zVlMK zesen)i-*f{{;6s8%TY$P3L?AzP?1NEmr4+Owb zT6Y->+w92@QlgOS`KJRBvP;V`>vsHfQv*562=HEj`pmu(x=WnsAm8{T?MNyDVubD_ zYEX!zQFFfCX;{tmYu{rSz z`iPVocbXP(lC2_T##|QNpY}C#SDhQ$oF?v^{gH%OzbCk!tX5_69S#4vwdK)`&AkHj zqku>}W1w;r!?b@`v&_!^Cu5)G6{`KMX#x@R3qb~9t-MEM|uUmwqU}jwZ3e%L=~{Q3Gw;X#mm z))GA~4hTrkc}qO;AQER;(SCo@Q*XMKyX$nZ#z9b0Y&&t(f&)YhziX}(d1=?Re)TM= z5@VFGyMvo(>_a>vW`%CbmRlrEdfA#YA_q1Nb~12rzNhg9r)J1?C-WQT3ol;x#wK*Fs~n1?NrR=z7I%83}rIgi_b2^Dj6(?mn76xn{OxKo0?~+ zPD>xuUCtuP2@4O4s8dBBJYRWr+R;AkeFI~^a*?pHS{*Y?jDp<&f3`VNY*@QxP4iP- zeLV~g`&st~>QOJdR9N!kK-c(|^a`%MY$Woo-J`zXpE`dd*~c?ocJr!=acj{-*BNXX z#%de5O+FoFcnfz&S^bg*8W6O-Uy&>HrYcCM_e-MCG|On+wA&%IvHr1sIfVY$;U%=2 z(FX)7rWpS=$a6QK|G^u)$M$$m?p!*^ zV+`-0u80{N6orY=cgKXlRA|{d1}Zdj1Fymxpb3`XJ?l&OWpN?lU&Irn@YWPQ|7sVX8ZM$0F^JXO(b`jSm@6}r znz%q2!v%MCz3Dh z)bc@{Jdc_-lvp>=0;-J;UC>R@K{t#8&Co!h!x5}r&$VFpun-GKjRhORsbac#o2@-h zg{Nm|rNg98o6|p5UU$3uOk;L`=$u9IigHj&`aMauSctq&>&$%6kw{|8atOU(?V71H zJK{clkXw0K*zWK=ka^c_Z_t0{zZ4X0LY)_)3C?l?QCsY_zgSN!`bhi0g!Fp&Ym!6G z1n2rjpd#Wbx&tOAa!V(VVUP#<(A*P_Zh~Y`fitrOPq#v|uK3u|clzb39ct}pztHM{ zgY!$4#*Ht>M|FHA0W~2#hn;SxY=hG;W)tdvKMDQ0(PWuK;lBS`acXnaJtBbh0cau6 zdi%0CrHq-I8OxjvwD!0NjL-2nCeha^s|vyZv|W!gR>9uD*1tg6bJW&37wFDqyWym52!5r6fZCHhv zrrui%baP<0NB<+KI}WhwJiQ;J$AApYP{Jk0EujJE;kCc`hwm`h^C~Pd{RwGRNWRs^ z?*=PAGyvgXZOCToOSsF0!0nCKRsy8{GfzrX&*vU**Tz)pwh#Dg;_2vZ{!h_)Uc)=| zFCo~lQx%!f?pTbk@ns8U=&YXaQaM{LI+NeQerQ5=l=FOfpswtn1u=1&%=vvLDwA?~ zIIk|BaRyfww6Z>Kt#9V^A-%)TZO4KPs#3zuCDWoh!?9snX1u&ALpp@3i|M%glpulW zb5CH*L<${7U(6Sb)(aOTVEElM=at@5zj||rO;uJ7m6rkHd=I1bH(0#e1%IyC1RvPmU;vOKzJqkvCDHh~)U;jG&VIF%SaSio$8Lta@RLq>FRvvATQA6kNP z1tsO0X>qXAr^wfCao<)#^GJd7+$dG38Pw^9SUAW39aSl`u66Hx1Sp%Bmr2~ruCHlD zI@@p^%XAI4={^Tx`;1&R$iR0w&((s91_;Lsds#Y31-7zp80AhANX0jwsE9xG?))v{ zb2N!K$dQ9DFyFsud-ayzOV`lq-WF&mA$AYV-ms!uYJa|dZ1F^f89^k|Q}p5Yv^(67 zgtPtX1Miyi>v(%jv~}D!g-bp?C0}DJ^%%F_D8tIM`rj@_gz|OGoh>Y|Q26kww+~3_ zRa*k8GAfD=;rwb|h&2^jcM5v7_YJ>$kL%gnd7oDRE>b}n)BR2if;iccdA|sMU$vyT zzSwX{sxB80K@O@r^erDBHCAs^4UbkBNlITBMXZwW%K%R}VgP1yZldCBTs#|l|F+=C zoiBXwiQ<~K|I*`-nv64cEbp#lnY(<3=f|fQ_TdHpU$@)TOWxuU;YukClQ=Xm3ajC3 zEYHQ1fkxFoUz;0WI|bd-E4*L@?GfsRA?1|3L_Fj_jN@vx&|Y_&4(@$(KO%55trDjO z`Zd$_nV%8huu3Q)5!*7>?Ij$9WY284OpR?1t}!u?tsoia{=fuOer5dnJ84cLDuBifeor#MAsZRg;j+B881p3c?xrZvn^E&bR0F)i{J1qN5&T+{;inWPK17$}ls$m7_AMg*Lr@SjMBfjJ*RANy?l=LA&(uF}}5e`rV4 z7|IXL4H|bRHh}mZlvA%W(hQXbX6R6wqK|2idx2u`7V^k=*nn>Qt7-7g)pE| zf@qgKtQ$nU{I>@(?ce#G8-QhwwjvDC{rO<L+@aa&i(dQWTE4rMKevR!tPFP3dt@7IaOVaT?=dHrL)es~2 z=M|`|6Gu~Gt-KCZBm1ShcKJ?!_&%lsh6iQwjp$UJDCd41HDFLO^fF_5*ad7i8{`bx zzChb;FpP0aeah%(Yu8Gq13V>KqkdcxT>g8|mw_J)ZJm=g(>7C=J&rQUfS#!lP%E_#ix4Ig z?P_6k-EPFW2-DJ?0}<8iX2yUt$TLacQQ#1cLb$uVFE}ay@-E^F(}a9JrD}*uKcvN1 zpyO2X{z*!dFHaX2Rc>$Y+-72bL)2*1pPcX(MP0*vsjofdOjXXtGvqpWK1eO2JihRR z5D>)EGUB4@NnMo4j@i2+lD~_UnujMRoIktv4AEV-de<;LaQ3sYD6^fWpDMMTfW?sx zv|L(^p*%0??JZ@K=#iD3~_1?d_F;YjRL2IS36@FTa;4CSa{CtZg7#AC!7 zImZ1HoDVA1 zP=k_;w6K5l(*L(sYpumf)TpJ{4 zz4;Syy*%PdX1@57ht-hCx3ki2hA2`J66-1EDQM|0?xJ%A@B_OQ*x%t^^u}@?1%UgH z$%j<~057T-;ng&i-c9(dXSAy%IoXim4TNU(AgJ27ii6;%`V`YpEw*cP)C@ml-@wqW ze%rD)mTk%K7f{oN1qEkcGJ2jfpH)c9M)v4Ej;ww8VmKw4$IYmyQHE#|y;cI`d`q@e z(w3`B80bvgv>Pdy#n?!`a6<4^lX!OLC14tg9NnZgE-_KY(PZq#USP?S3c}t0%(4Nu z{?-0eK7WU0*-Bw8Ap-rO%;|-4w>B+hl#hB3 zg0CJ~Rv5}#;264__>0=J2M=0GlN;c;=Ee}g@09ZDNpUaFtBkNLh znHEcp_A3_yl2@l81Qs*Ay$_B-p&94C6(uB=s6!935`TtA82pD29ibXeN~ho&6}D^n zn2|aogfNm%GT$G2Oup5o8OI& zoO(aqlVUky(lFdSK%~V5%c96w$;(|xQRda^Gp}ljBp2q~{`ypHdrGqGz>;FX`lEGE zz=L(V@18Gip;Aa$S-aeqO_6UuN-6hcKjWS2_xESd)4!bmCY^Q!9EyRgmx-{05V?f; zk{vIZ1nx>oSSEugd#*6o`y@h&Whdw19wP(Il)D=v$Tn>rL?fQ&q*d)e2LxOY&mafSu`Rb!+=L0kszd+bPsC5eMx zRIML9fA!RQG6b_E3rTkCF|+HGvQ+d>a{71YPt~i?Uam?j?fRp52QQ&7g=)qUjx$9K z*}i|!55BUP&yZ$6avJ-vpo>!nIM3~^{(oVlc=PuHEhRQfjJ?YKM^^G)8wl+*q7sc#p;R9akA&fZD*MC#+i3R76}AE=%2> z3%AVb7GZPhPhhL{225Y{WnKqX+SMilXtih5OP-8G$IYt1mU4B?wEKUIIvO}5pJF54 zKO|y_>B}G_bxGB3-#F-bjC+>+ZfWi5V9dhj@A}q2PSuCCyR(LUedlzaKk1r%1SS1! zT_~peKX3oUSACGw4Z=xihzrTS;S|#53+u)!8%_F zbkCa<4Jx2M*12B%CHF#Qt{YO&Q!HgKIZBnW_lLJn>x@c|10g!z#`Ez^s&bRu{Q26{ zbLgs;<$AIH)r3cSjz5EylGwQ?)4v89HOj7hIRsbvI~n$BEz&z?1la`q;+st`GaAe1 z3bAj2-f67Kd(u~t+px+^Fcuq;aAs!+=n-~ExI)}zIAyQ**R@O?h${9*)}gy`51bR+ zK4<={BZo`r{!xvJCZ$%b)Zw4Sro6DQ_KX~m@M{kOpf!`($fjWnN4gFyhcED12&1nS zTr7!acvr|)0YMC;R*G9K%a#Xw}@Zw4+e;+E^Ao3V^KS0gqz9-jlQR76P*GW zELcplOOuA~Sf`F>xlb9Zm4eDD&icz;=#FSr`0s_3N*Ft;9iCjOVGy9J_#k&hl_am0 z*M$Yt8^QesM5}eKvW-Tac;v15g>lboH(y=$H+NcuKJ&PUhaeEq10Xq9BB6JXu3*_hgg2Zk40B~!BykLc0_~H zvmNv1CRl$Vs_&uVIn#4`q>o6vI)rzh>v&ZU`ewp$diwXcOnAr{bp>4`=e2HHTkid^ z(g3%w#dbw;j&=T%3&A4RDKzE{SJk)q@l{x6CW4@ag(98xxDK6<&TEBPiqo(x0gLA= z9Y`y~ydJq+jqmYYV!jy%88$i`^kBPnIOSbM7f6tp|^f-M6>Axw}6)Ki0Y%m()FfEfRlAR1DRisLCli z-<9IMdLLmD8T(!l)G3v|T?7@bGVX}O=QgWC_dPJ0`F&^nHsAKNCLH9S0 zKPPfma#pN31j{a_-P_l}T6g)^ci@CYEJ5)R5y4`wPYCxygyfc&k1g zDi&!8M?)%gPbsF}t451B?*+z6_n=*LFF@x;z6m$Z@Ok(Pn77S}*HrZ*rTmlSi?<3_ z{^ekBKs*r_TW&`CmDC_H)3U-Vv8Cmkp@8}$0UZVmhi^f<-`qgd75`1jCKYFiWXmGH zSh_2J>0rwc`@0v{QnI$}hwjRVo0QPTL$ENbEO=E`CIFhel`uv%5m!~7h9KjM%gVa~1k#a1mBISTv-3-%^SrEEV)NU6$= z*>un!A)E0)QiDhk1jU5Nw74=WphzUiNfghTR zY(r3BIm<{7noh14>67b6MWV)oU=j?fQU3$ldbgwPzm!eAbvtHZTyETQBWID@cAdUH zGdHz~w1n)Jw1%f);6`sOCk7+#CE1D27?XxrtvyYjg(w$aM#00|UVx|*N(N}77IOo@ z^k9|23bmDJfy~%%@1hFC)89i61I_Mr38%}y9G>1}MAldayc6m|@xzf5TRe%6@M~oI z4j(w>6F%@A8j*m#<0%+yVGxHd{*hJ{YCy^!MG+qa;{FJ;ld*LKQhLqVkq7i88*X^TuLE=NQUts~4OPZ&0dIeu{ zoyEN;uB?>(-jEj#BNP=4SlbE}_hU)mntJGfte3M3VXO_))ea_wptE^~K2%qLa2wRu zHWwvDB2zdQi~6Jhx;y?XcY#N88|gFmI?K|C%~($k@!~!rj1z&k zxi;$!eMmLG6~YFIyfX2BmmARl-t&MqaE77X*K)p~v$3@WTl*#IulzOFw=;NF@cl}s zLT)5#rp4J{w8^ztG=wu0$&B@nzJy_A6frxL z8|-dmD3Nu?9{eKwbzn0|{@zcQVX0~x@+{nRQEAOwgG*t`!F`yEP&RXYBaB7qFb3Xv48m0Ut%k$>4R zPYHZr3UgyeN-dL~BmD7^{KRtc`mqP zfKqxUgB;be5dvKsz=7T8GvrCipX+3jm4U8&fe56AAxR0zm7n*qG|l1MqCT@hrc2R- zwYJy_{A2TfJgowas`3l1Kob|JXwpo9K{LgqVsvmr--cI!ZI><586qlgLrL1<5`?}K zkX>#h+qj%h@dRSXH-|xOn z3*6iA`sOfJF#M4IJ=%v0ne!o2A-~azi7w*HhzM{8Dkov78zWFcD$m}EDnL8q1IHi* zc7xdm7MS+`{w|^v zJzC6v$y4*$>?6<4jcI(AcL6)NYMzT!xoJa{QDz{}mJ2n~_)SnLQvCKJxCh_%G)aY4 z_1FoI1Q9$ihC)W{G(Wyuu}au4?A4-X(_QWK5UdHm+3_hRZ_OA9eqOtTJHb$ zI3y%`3P`=>@eY!Zxx{8K2~~N6oFYLSs_EO7*-zKgFp9k|j@or6sL@49N1?dXywF@> zosIV+@YLf-un!xSWxlTi{j<@SZ=U18DN@6jq$&d)0tGE6XvR3n{W&ArV6^YLjorIW zEqW(h7Ra&yz(5c>lRb_KW5vb5W=;}0f>Sr}LT7XLY`t4;ws$^yZ#@1FJy1Z3rG)gm zR_1#Ky%0U+y1Z~oa$(Y^qJoNbuM`FkPd{7#Zet<`DJCY2wah=X=;$#mJase+OGvQj zpy2@5dNpar|H%AvY9ra0RFvhvu<-xdyY6qewl9uo!9$b~!U#hmx`&ARL>V(YJ$jiS zBx*z*F=O-=y+4!xN`(Ahnmkt}Ei|lDFG=@CDH8XgfkEyXRUs zH?Zc{4>qC>IhI}a=A+Z=Ptf{WF=T|?SCT;yJZ*VM&?&wIad)P0PIw(LXHRTZ)4h@9 zJt-ekA}5e(NMdj9?QOZzmOV!S-59CH$2xmve7;RfeV(ro@Eiz@N%&N|-=4jwJQZkN zt-Kd$_HpUSsW{K{haTp7r;&uqOMYZKfT?N*t+C&0xWP${ZcXc&MdltRE-P(7yY5nT zC@V=77WaPVE=x1!7S>uk!R^`uR>KReyr&lhfhj`*b53v+Ur>bwBK>tZkg*$Q;UZxi z3`F+EQ)Bk%#dqHVO5msEz{Sh~6Jk{>BkCE!qhovVdTu5KeM-&a)*5ZAeNT0E+OVw# z#GZcdTYzokR#M{muV*LK=ENKkuM@lLO=V%vUaV;!XG(YMFETF}Wg^z+6oZv_?zaSy z>Or7gikKuLan4XR$8XpKGvJ(V&AREG$1BO%6ZhKE28CnwzS8$hbdfrg*XxhJ@RHiR zU6eLaJ%@J|cW?VydI}_cuE#)>`&~!qy3T5xLrC(V0E=HGY>Ca^Xifdg)-?uo4IF%ryvfr+ru%JcT4hl!DVeF@MW!fZN5THzvhW~RuF zT^29p@{rtY%NTKYnl1#nP#wX$gd>m6i_nq!lxc;)+{qc=HxIvsett|{MGbiq1u03f zWTquIU6YbQflc~#fjaKP9Fj&;D_vckvhUXRC{~RIi|pueLJgxV+7`%ki-B<#i;>#X zns8T1bs0Ra(jLwC zaOp=m_+kIzdLl7Kb137kC&$>K8ve!7IGyO`BG)&n6f4omC@$z0|EAY73m0wH86aO8 z#TjfOJ5GRlblVn)dYKBE=f{^qIHJIWm2@}Av15u&Q$|!UJ0k+LfAWco0=E80z@$>9eZam z3)k-1YHB-s;>DA8tftI?gF5T#+SK<&<>P7CMBisUsbj`1+a)dO>ICmWx3Rw?crRF7k+lH$%SXTpbiO{^)vkDNS0-(y)e50LCYlv0W%&J8w6;xg;H?}jrvF+n-D04-9xaS>{wT21( z2mz#$`fRJZtC0*J1Nooo;F&}k4eL<)%T7G<1}u5ZuAX4t6ek1iVK~Hsb)Eh(YE+4R z6b@nXydU`Ws{n#?i;hCV%R_T{kyuDcT7!QJ@_SfejQPAjs=qsE709|Pu*bQy9F7>+ zh>yx7Rq5*NWV%jpJLVQGBwVK2neEMQ`8!={sAynDY)HgRqNi$NX~XrAStf+^1?f2C zEF{oH*Xc#3`*2pWJwi8D3n%$I=5ru?lGuuHH0i#V#Oyfd^fGSEB$|AvMgbGG%Tkxw zoeH;QD#acS*VNYMV?*8YLG_jSI19yd?N=G-mJm8baf;g;iu~ z_6-4ah-L>?J~%uKOIS?yWNy4S@QOR1{J5SF9en4#pbY2uvw*LOm~7!WN6id79g~qr zByF^5eHDZ&-KJf$iPuuD@Qmrh zO_VEpv%f?M66+g~I~{`Iqq<~Bb}1hkUKkLfB=O?2K%lgdZ+QJ$>rNhoM_FMq~D=+XC6m^2`J^q(n!wuBsd{=oQ`od zF~#z}6x^+=n(Qv`+{Q$aFNBVPT+A`%%vPm@@7Q)xyYOJB5r8pSpf)hudL+OB=C843 z)8^Grge90d`Rul3`z;ltANYYaNah*SIg(&1=6bvl_xmHJpQ>&`P9cm5CY_}f&+|R3 z>jZ0f&R*N%;$G$7u{|l|Q{1B|!W9j_{_sgFc6f+LD|&QZP~0&}U$T5~P1N$*O=~d~ zLyZb)9s#MH0ignUv}tg>+spB22k3(8!-pI6U~)NPB-|NPA++=5-NCA2+>=&0H8rAf zO?z|1cyNf-z0#8Sg}9hkTWZU9%&IR_a=C8}u*mpw`de5+@tYt?P5hN_mAknu#q`g< z?M5Idv>_3J9xJPES+lvWC4HxDWoaq0P(yBRTLVsfxG3ihE&D+^vx@B8x9e0Pz_ zmpZDlyYA6nJ=q1GqDC#it7=Q-Tqy%$XhFckeSw)&cR_Ub*M^=uQNPI-Vo91zR(Tjc z$feU{Z>zq3zCH|k*&)HAhGw;1ZSZ9(?;KSRD!p$lFO%9C_E>a^?HY0A*Fff!o9yq* zq+49Y47Pmar_5ay?9fq7u#l+}vFr&FO}hs7?@dQ2%t)w?m1{Fy6$%42ay-uC*ea2J z^Z95mk|Hf`?eB-8qgaq=MYyK#K zJ@=P?&>|y&8-l$m^zSD>T9 z{)tS1z-_Tc%#pBv{Urn8CWTT5ToD`<@lRxa1a3Y5DwyG41^=|;{|^feIHO3pN*5i@ SRYXcexWKAfD&_Z}f&T$W;}+`x literal 0 HcmV?d00001 From adb1cb85c063e699316a3d8872475060e42d26f5 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 1 Jul 2025 17:38:07 -0400 Subject: [PATCH 50/67] Added quotes to some HTML --- docs/docs/developing/prop-commit-signing.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/developing/prop-commit-signing.md b/docs/docs/developing/prop-commit-signing.md index 97263ba7..175b1e02 100644 --- a/docs/docs/developing/prop-commit-signing.md +++ b/docs/docs/developing/prop-commit-signing.md @@ -43,7 +43,7 @@ The form proposer commitment signatures take depends on the type of signature be
- +
From 1c3a07d6bea3725fea638059657ff50d90845037 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 2 Jul 2025 01:22:15 -0400 Subject: [PATCH 51/67] Added some simple JWT secret info --- docs/docs/developing/prop-commit-signing.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/developing/prop-commit-signing.md b/docs/docs/developing/prop-commit-signing.md index 175b1e02..fa6660de 100644 --- a/docs/docs/developing/prop-commit-signing.md +++ b/docs/docs/developing/prop-commit-signing.md @@ -25,7 +25,7 @@ Commit Boost's signer service must be configured prior to launching to expect re 1. An entry for your module into [Commit Boost's configuration file](../get_started/configuration.md#custom-module). This must include a unique ID for your module, the line `type = "commit"`, and include a unique [signing ID](#the-signing-id) for your module. Generally you should provide values for these in your documentation, so your users can reference it when configuring their own Commit Boost node. -2. A JWT secret used by your module to authenticate with the signer in HTTP requests. *{Placeholder for more details on setting this here}* +2. A JWT secret used by your module to authenticate with the signer in HTTP requests. This must be a string that both the Commit Boost signer can read and your module can read, but no other modules should be allowed to access it. The user should be responsible for determining an appropriate secret and providing it to the Commit Boost signer service securely; your module will need some way to accept this, typically via a command line argument that accepts a path to a file with the secret or as an environment variable. Once the user has configured both Commit Boost and your module with these settings, your module will be able to authenticate with the signer service and request signatures. From daf31478bee662190ef0bd76e19ce09792991755 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 2 Jul 2025 01:29:37 -0400 Subject: [PATCH 52/67] Adding a closing tag --- docs/docs/developing/prop-commit-signing.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/docs/developing/prop-commit-signing.md b/docs/docs/developing/prop-commit-signing.md index fa6660de..c838dcbf 100644 --- a/docs/docs/developing/prop-commit-signing.md +++ b/docs/docs/developing/prop-commit-signing.md @@ -43,7 +43,7 @@ The form proposer commitment signatures take depends on the type of signature be
- +
From d5641df97f389c25c7ca21cbf72ab07edd8c4283 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 9 Jul 2025 03:42:30 -0400 Subject: [PATCH 53/67] Added prop commit signature verification helpers for modules to use --- bin/src/lib.rs | 3 ++ crates/common/src/signature.rs | 54 ++++++++++++++++++++++++++++++++-- examples/da_commit/src/main.rs | 44 +++++++++++++++++++++++++-- 3 files changed, 96 insertions(+), 5 deletions(-) diff --git a/bin/src/lib.rs b/bin/src/lib.rs index 126847b6..122a35fc 100644 --- a/bin/src/lib.rs +++ b/bin/src/lib.rs @@ -10,6 +10,9 @@ pub mod prelude { load_pbs_custom_config, LogsSettings, StartCommitModuleConfig, PBS_MODULE_NAME, }, pbs::{BuilderEvent, BuilderEventClient, OnBuilderApiEvent}, + signature::{ + verify_proposer_commitment_signature_bls, verify_proposer_commitment_signature_ecdsa, + }, signer::{BlsPublicKey, BlsSignature, EcdsaSignature}, types::Chain, utils::{initialize_tracing_log, utcnow_ms, utcnow_ns, utcnow_sec, utcnow_us}, diff --git a/crates/common/src/signature.rs b/crates/common/src/signature.rs index cace9570..18d24b19 100644 --- a/crates/common/src/signature.rs +++ b/crates/common/src/signature.rs @@ -1,5 +1,5 @@ use alloy::{ - primitives::B256, + primitives::{Address, B256}, rpc::types::beacon::{constants::BLS_DST_SIG, BlsPublicKey, BlsSignature}, }; use tree_hash::TreeHash; @@ -8,7 +8,7 @@ use tree_hash_derive::TreeHash; use crate::{ constants::{COMMIT_BOOST_DOMAIN, GENESIS_VALIDATORS_ROOT}, error::BlstErrorWrapper, - signer::{verify_bls_signature, BlsSecretKey}, + signer::{verify_bls_signature, verify_ecdsa_signature, BlsSecretKey, EcdsaSignature}, types::{self, Chain}, }; @@ -109,6 +109,56 @@ pub fn sign_commit_boost_root( sign_message(secret_key, &signing_root) } +// ============================== +// === Signature Verification === +// ============================== + +/// Verifies that a proposer commitment signature was generated by the given BLS +/// key for the provided message, chain ID, and module signing ID. +pub fn verify_proposer_commitment_signature_bls( + chain: Chain, + pubkey: &BlsPublicKey, + msg: &impl TreeHash, + signature: &BlsSignature, + module_signing_id: B256, +) -> Result<(), BlstErrorWrapper> { + let object_root = msg.tree_hash_root().0; + let domain = compute_domain(chain, COMMIT_BOOST_DOMAIN); + let signing_root = compute_signing_root(&types::SigningData { + object_root: compute_signing_root(&types::PropCommitSigningInfo { + data: object_root, + module_signing_id: *module_signing_id, + }), + signing_domain: domain, + }); + verify_bls_signature(pubkey, &signing_root, signature) +} + +/// Verifies that a proposer commitment signature was generated by the given +/// ECDSA key for the provided message, chain ID, and module signing ID. +pub fn verify_proposer_commitment_signature_ecdsa( + chain: Chain, + address: &Address, + msg: &impl TreeHash, + signature: &EcdsaSignature, + module_signing_id: B256, +) -> Result<(), eyre::Report> { + let object_root = msg.tree_hash_root().0; + let domain = compute_domain(chain, COMMIT_BOOST_DOMAIN); + let signing_root = compute_signing_root(&types::SigningData { + object_root: compute_signing_root(&types::PropCommitSigningInfo { + data: object_root, + module_signing_id: *module_signing_id, + }), + signing_domain: domain, + }); + verify_ecdsa_signature(address, &signing_root, signature) +} + +// =============== +// === Testing === +// =============== + #[cfg(test)] mod tests { diff --git a/examples/da_commit/src/main.rs b/examples/da_commit/src/main.rs index 71b61c53..802cfa4f 100644 --- a/examples/da_commit/src/main.rs +++ b/examples/da_commit/src/main.rs @@ -1,6 +1,6 @@ use std::time::Duration; -use alloy::primitives::Address; +use alloy::primitives::{b256, Address, B256}; use commit_boost::prelude::*; use eyre::{OptionExt, Result}; use lazy_static::lazy_static; @@ -9,6 +9,13 @@ use serde::Deserialize; use tokio::time::sleep; use tracing::{error, info}; +// This is the signing ID used for the DA Commit module. +// Signatures produced by the signer service will incorporate this ID as part of +// the signature, preventing other modules from using the same signature for +// different purposes. +pub const DA_COMMIT_SIGNING_ID: B256 = + b256!("0xf30382906f594b88a33f4427c94062c2a66cf9bc4886475897ac0713f7f84ed7"); + // You can define custom metrics and a custom registry for the business logic of // your module. These will be automatically scaped by the Prometheus server lazy_static! { @@ -83,17 +90,38 @@ impl DaCommitService { ) -> Result<()> { let datagram = Datagram { data }; + // Request a signature directly from a BLS key let request = SignConsensusRequest::builder(pubkey).with_msg(&datagram); let signature = self.config.signer_client.request_consensus_signature(request).await?; - info!("Proposer commitment (consensus): {}", signature); + match verify_proposer_commitment_signature_bls( + self.config.chain, + &pubkey, + &datagram, + &signature, + DA_COMMIT_SIGNING_ID, + ) { + Ok(_) => info!("Signature verified successfully"), + Err(err) => error!(%err, "Signature verification failed"), + }; + // Request a signature from a proxy BLS key let proxy_request_bls = SignProxyRequest::builder(proxy_bls).with_msg(&datagram); let proxy_signature_bls = self.config.signer_client.request_proxy_signature_bls(proxy_request_bls).await?; - info!("Proposer commitment (proxy BLS): {}", proxy_signature_bls); + match verify_proposer_commitment_signature_bls( + self.config.chain, + &proxy_bls, + &datagram, + &signature, + DA_COMMIT_SIGNING_ID, + ) { + Ok(_) => info!("Signature verified successfully"), + Err(err) => error!(%err, "Signature verification failed"), + }; + // If ECDSA keys are enabled, request a signature from a proxy ECDSA key if let Some(proxy_ecdsa) = proxy_ecdsa { let proxy_request_ecdsa = SignProxyRequest::builder(proxy_ecdsa).with_msg(&datagram); let proxy_signature_ecdsa = self @@ -102,6 +130,16 @@ impl DaCommitService { .request_proxy_signature_ecdsa(proxy_request_ecdsa) .await?; info!("Proposer commitment (proxy ECDSA): {}", proxy_signature_ecdsa); + match verify_proposer_commitment_signature_ecdsa( + self.config.chain, + &proxy_ecdsa, + &datagram, + &proxy_signature_ecdsa, + DA_COMMIT_SIGNING_ID, + ) { + Ok(_) => info!("Signature verified successfully"), + Err(err) => error!(%err, "Signature verification failed"), + }; } SIG_RECEIVED_COUNTER.inc(); From 5da31bf46f98d227fa2014a2817c5d0d5e0db19a Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Thu, 10 Jul 2025 02:49:33 -0400 Subject: [PATCH 54/67] Fixed some params in da_commit --- examples/da_commit/src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/da_commit/src/main.rs b/examples/da_commit/src/main.rs index 802cfa4f..2f8845c8 100644 --- a/examples/da_commit/src/main.rs +++ b/examples/da_commit/src/main.rs @@ -14,7 +14,7 @@ use tracing::{error, info}; // the signature, preventing other modules from using the same signature for // different purposes. pub const DA_COMMIT_SIGNING_ID: B256 = - b256!("0xf30382906f594b88a33f4427c94062c2a66cf9bc4886475897ac0713f7f84ed7"); + b256!("0x6a33a23ef26a4836979edff86c493a69b26ccf0b4a16491a815a13787657431b"); // You can define custom metrics and a custom registry for the business logic of // your module. These will be automatically scaped by the Prometheus server @@ -114,7 +114,7 @@ impl DaCommitService { self.config.chain, &proxy_bls, &datagram, - &signature, + &proxy_signature_bls, DA_COMMIT_SIGNING_ID, ) { Ok(_) => info!("Signature verified successfully"), From 1a0efecb07a979e02e508f5220bd0ff35606b7f7 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 14 Jul 2025 17:01:19 -0400 Subject: [PATCH 55/67] Cleaned load_module_signing_configs a bit --- crates/common/src/config/signer.rs | 116 ++++++++++++++--------------- 1 file changed, 58 insertions(+), 58 deletions(-) diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index db2c9c01..76bfe4b8 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -279,68 +279,68 @@ pub fn load_module_signing_configs( jwt_secrets: &HashMap, ) -> Result> { let mut mod_signing_configs = HashMap::new(); - if let Some(modules) = &config.modules { - let mut seen_jwt_secrets = HashMap::new(); - let mut seen_signing_ids = HashMap::new(); - for module in modules { - // Validate the module ID - ensure!(!module.id.is_empty(), "Module ID cannot be empty"); - - // Make sure it hasn't been used yet - ensure!( - !mod_signing_configs.contains_key(&module.id), - "Duplicate module config detected: ID {} is already used", - module.id - ); + let modules = config.modules.as_ref().ok_or_eyre("No modules defined in the config")?; - // Make sure the JWT secret is present - let jwt_secret = match jwt_secrets.get(&module.id) { - Some(secret) => secret.clone(), - None => bail!("JWT secret for module {} is missing", module.id), - }; + let mut seen_jwt_secrets = HashMap::new(); + let mut seen_signing_ids = HashMap::new(); + for module in modules { + // Validate the module ID + ensure!(!module.id.is_empty(), "Module ID cannot be empty"); - // Make sure the signing ID is present - let signing_id = match &module.signing_id { - Some(id) => *id, - None => bail!("Signing ID for module {} is missing", module.id), - }; + // Make sure it hasn't been used yet + ensure!( + !mod_signing_configs.contains_key(&module.id), + "Duplicate module config detected: ID {} is already used", + module.id + ); - // Create the module signing config and validate it - let module_signing_config = - ModuleSigningConfig { module_name: module.id.clone(), jwt_secret, signing_id }; - module_signing_config - .validate() - .wrap_err(format!("Invalid signing config for module {}", module.id))?; - - // Check for duplicates in JWT secrets and signing IDs - match seen_jwt_secrets.get(&module_signing_config.jwt_secret) { - Some(existing_module) => { - bail!( - "Duplicate JWT secret detected for modules {} and {}", - existing_module, - module.id - ) - } - None => { - seen_jwt_secrets.insert(module_signing_config.jwt_secret.clone(), &module.id); - } - }; - match seen_signing_ids.get(&module_signing_config.signing_id) { - Some(existing_module) => { - bail!( - "Duplicate signing ID detected for modules {} and {}", - existing_module, - module.id - ) - } - None => { - seen_signing_ids.insert(module_signing_config.signing_id, &module.id); - signing_id - } - }; + // Make sure the JWT secret is present + let jwt_secret = match jwt_secrets.get(&module.id) { + Some(secret) => secret.clone(), + None => bail!("JWT secret for module {} is missing", module.id), + }; - mod_signing_configs.insert(module.id.clone(), module_signing_config); - } + // Make sure the signing ID is present + let signing_id = match &module.signing_id { + Some(id) => *id, + None => bail!("Signing ID for module {} is missing", module.id), + }; + + // Create the module signing config and validate it + let module_signing_config = + ModuleSigningConfig { module_name: module.id.clone(), jwt_secret, signing_id }; + module_signing_config + .validate() + .wrap_err(format!("Invalid signing config for module {}", module.id))?; + + // Check for duplicates in JWT secrets and signing IDs + match seen_jwt_secrets.get(&module_signing_config.jwt_secret) { + Some(existing_module) => { + bail!( + "Duplicate JWT secret detected for modules {} and {}", + existing_module, + module.id + ) + } + None => { + seen_jwt_secrets.insert(module_signing_config.jwt_secret.clone(), &module.id); + } + }; + match seen_signing_ids.get(&module_signing_config.signing_id) { + Some(existing_module) => { + bail!( + "Duplicate signing ID detected for modules {} and {}", + existing_module, + module.id + ) + } + None => { + seen_signing_ids.insert(module_signing_config.signing_id, &module.id); + signing_id + } + }; + + mod_signing_configs.insert(module.id.clone(), module_signing_config); } Ok(mod_signing_configs) From ee282da7f2756994ce755f050d5a7de35a3b789e Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 14 Jul 2025 17:28:05 -0400 Subject: [PATCH 56/67] Fixed some docs language --- docs/docs/developing/prop-commit-signing.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/docs/developing/prop-commit-signing.md b/docs/docs/developing/prop-commit-signing.md index c838dcbf..64bab425 100644 --- a/docs/docs/developing/prop-commit-signing.md +++ b/docs/docs/developing/prop-commit-signing.md @@ -7,9 +7,9 @@ Commit Boost takes advantage of this by offering a standard known as **proposer Commit Boost supports proposer commitment signatures for both BLS private keys (identified by their public key) and ECDSA private keys (identified by their Ethereum address). -## Rules of Preconfirmation Signatures +## Rules of Proposer Commitment Signatures -Preconfirmation signatures produced by Commit Boost's signer service conform to the following rules: +Proposer commitment signatures produced by Commit Boost's signer service conform to the following rules: - Signatures are **unique** to a given EVM chain (identified by its [chain ID](https://chainlist.org/)). Signatures generated for one chain will not work on a different chain. - Signatures are **unique** to Commit Boost proposer commitments. The signer service **cannot** be used to create signatures that could be used for other applications, such as for attestations on the Beacon chain. While the signer service has access to the same validator private keys used to attest on the Beacon chain, it cannot create signatures that would get you slashed on the Beacon chain. @@ -32,7 +32,7 @@ Once the user has configured both Commit Boost and your module with these settin ## The Signing ID -Your module's signing ID is a 32-byte value that is used as a unique identifier within the signing process. Preconfirmation signatures incorporate this value along with the data being signed as a way to create signatures that are exclusive to your module, so other modules can't maliciously construct signatures that appear to be from your module. Your module must have this ID incorporated into itself ahead of time, and the user must include this same ID within their Commit Boost configuration file section for your module. Commit Boost does not maintain a global registry of signing IDs, so this is a value you should provide to your users in your documentation. +Your module's signing ID is a 32-byte value that is used as a unique identifier within the signing process. Proposer commitment signatures incorporate this value along with the data being signed as a way to create signatures that are exclusive to your module, so other modules can't maliciously construct signatures that appear to be from your module. Your module must have this ID incorporated into itself ahead of time, and the user must include this same ID within their Commit Boost configuration file section for your module. Commit Boost does not maintain a global registry of signing IDs, so this is a value you should provide to your users in your documentation. The Signing ID is decoupled from your module's human-readable name (the `module_id` field in the Commit Boost configuration file) so that any changes to your module name will not invalidate signatures from previous versions. Similarly, if you don't change the module ID but *want* to invalidate previous signatures, you can modify the signing ID and it will do so. Just ensure your users are made aware of the change, so they can update it in their Commit Boost configuration files accordingly. From 133447da3c335ff285586cc214b4777d441b697a Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 15 Jul 2025 00:46:34 -0400 Subject: [PATCH 57/67] Refactored into compute_prop_commit_signing_root --- crates/common/src/signature.rs | 62 +++++++++++++++++++--------------- 1 file changed, 35 insertions(+), 27 deletions(-) diff --git a/crates/common/src/signature.rs b/crates/common/src/signature.rs index 18d24b19..19fade8f 100644 --- a/crates/common/src/signature.rs +++ b/crates/common/src/signature.rs @@ -21,6 +21,25 @@ pub fn compute_signing_root(signing_data: &T) -> [u8; 32] { signing_data.tree_hash_root().0 } +pub fn compute_prop_commit_signing_root( + chain: Chain, + object_root: [u8; 32], + module_signing_id: Option<[u8; 32]>, + domain_mask: [u8; 4], +) -> [u8; 32] { + let domain = compute_domain(chain, domain_mask); + match module_signing_id { + Some(id) => compute_signing_root(&types::SigningData { + object_root: compute_signing_root(&types::PropCommitSigningInfo { + data: object_root, + module_signing_id: id, + }), + signing_domain: domain, + }), + None => compute_signing_root(&types::SigningData { object_root, signing_domain: domain }), + } +} + // NOTE: this currently works only for builder domain signatures and // verifications // ref: https://github.com/ralexstokes/ethereum-consensus/blob/cf3c404043230559660810bc0c9d6d5a8498d819/ethereum-consensus/src/builder/mod.rs#L26-L29 @@ -51,20 +70,12 @@ pub fn verify_signed_message( module_signing_id: Option<&B256>, domain_mask: [u8; 4], ) -> Result<(), BlstErrorWrapper> { - let domain = compute_domain(chain, domain_mask); - let signing_root = match module_signing_id { - Some(id) => compute_signing_root(&types::SigningData { - object_root: compute_signing_root(&types::PropCommitSigningInfo { - data: msg.tree_hash_root().0, - module_signing_id: id.0, - }), - signing_domain: domain, - }), - None => compute_signing_root(&types::SigningData { - object_root: msg.tree_hash_root().0, - signing_domain: domain, - }), - }; + let signing_root = compute_prop_commit_signing_root( + chain, + compute_signing_root(msg), + module_signing_id.map(|id| id.0), + domain_mask, + ); verify_bls_signature(pubkey, &signing_root, signature) } @@ -83,8 +94,10 @@ pub fn sign_builder_root( object_root: [u8; 32], ) -> BlsSignature { let domain = chain.builder_domain(); - let signing_data = - types::SigningData { object_root: object_root.tree_hash_root().0, signing_domain: domain }; + let signing_data = types::SigningData { + object_root: compute_signing_root(&object_root), + signing_domain: domain, + }; let signing_root = compute_signing_root(&signing_data); sign_message(secret_key, &signing_root) } @@ -95,17 +108,12 @@ pub fn sign_commit_boost_root( object_root: [u8; 32], module_signing_id: Option<[u8; 32]>, ) -> BlsSignature { - let domain = compute_domain(chain, COMMIT_BOOST_DOMAIN); - let signing_root = match module_signing_id { - Some(id) => compute_signing_root(&types::SigningData { - object_root: compute_signing_root(&types::PropCommitSigningInfo { - data: object_root, - module_signing_id: id, - }), - signing_domain: domain, - }), - None => compute_signing_root(&types::SigningData { object_root, signing_domain: domain }), - }; + let signing_root = compute_prop_commit_signing_root( + chain, + object_root, + module_signing_id, + COMMIT_BOOST_DOMAIN, + ); sign_message(secret_key, &signing_root) } From 5935659e615b4590378ad8343e7f43f37b972b9c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20I=C3=B1aki=20Bilbao?= Date: Sun, 27 Jul 2025 18:13:51 -0300 Subject: [PATCH 58/67] CBST2-04: Update JWT secrets on reload and revoke module endpoint (#295) --- Cargo.lock | 1 + crates/cli/src/docker_init.rs | 22 +++--- crates/common/src/commit/constants.rs | 1 + crates/common/src/commit/request.rs | 38 ++++++++-- crates/common/src/config/constants.rs | 1 + crates/common/src/config/signer.rs | 5 +- crates/common/src/config/utils.rs | 9 +-- crates/common/src/types.rs | 6 ++ crates/common/src/utils.rs | 20 +++++- crates/signer/src/error.rs | 4 ++ crates/signer/src/service.rs | 72 ++++++++++++++++--- docs/docs/get_started/configuration.md | 9 +++ docs/docs/get_started/running/binary.md | 1 + tests/Cargo.toml | 1 + tests/src/utils.rs | 2 + tests/tests/signer_jwt_auth.rs | 92 ++++++++++++++++++++++++- 16 files changed, 250 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 63de92dd..7eed3c0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1596,6 +1596,7 @@ dependencies = [ "cb-pbs", "cb-signer", "eyre", + "jsonwebtoken", "reqwest", "serde_json", "tempfile", diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index 7f418e97..84473a6d 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -6,16 +6,16 @@ use std::{ use cb_common::{ config::{ - CommitBoostConfig, LogsSettings, ModuleKind, SignerConfig, SignerType, BUILDER_PORT_ENV, - BUILDER_URLS_ENV, CHAIN_SPEC_ENV, CONFIG_DEFAULT, CONFIG_ENV, DIRK_CA_CERT_DEFAULT, - DIRK_CA_CERT_ENV, DIRK_CERT_DEFAULT, DIRK_CERT_ENV, DIRK_DIR_SECRETS_DEFAULT, - DIRK_DIR_SECRETS_ENV, DIRK_KEY_DEFAULT, DIRK_KEY_ENV, JWTS_ENV, LOGS_DIR_DEFAULT, - LOGS_DIR_ENV, METRICS_PORT_ENV, MODULE_ID_ENV, MODULE_JWT_ENV, PBS_ENDPOINT_ENV, - PBS_MODULE_NAME, PROXY_DIR_DEFAULT, PROXY_DIR_ENV, PROXY_DIR_KEYS_DEFAULT, - PROXY_DIR_KEYS_ENV, PROXY_DIR_SECRETS_DEFAULT, PROXY_DIR_SECRETS_ENV, SIGNER_DEFAULT, - SIGNER_DIR_KEYS_DEFAULT, SIGNER_DIR_KEYS_ENV, SIGNER_DIR_SECRETS_DEFAULT, - SIGNER_DIR_SECRETS_ENV, SIGNER_ENDPOINT_ENV, SIGNER_KEYS_ENV, SIGNER_MODULE_NAME, - SIGNER_URL_ENV, + CommitBoostConfig, LogsSettings, ModuleKind, SignerConfig, SignerType, ADMIN_JWT_ENV, + BUILDER_PORT_ENV, BUILDER_URLS_ENV, CHAIN_SPEC_ENV, CONFIG_DEFAULT, CONFIG_ENV, + DIRK_CA_CERT_DEFAULT, DIRK_CA_CERT_ENV, DIRK_CERT_DEFAULT, DIRK_CERT_ENV, + DIRK_DIR_SECRETS_DEFAULT, DIRK_DIR_SECRETS_ENV, DIRK_KEY_DEFAULT, DIRK_KEY_ENV, JWTS_ENV, + LOGS_DIR_DEFAULT, LOGS_DIR_ENV, METRICS_PORT_ENV, MODULE_ID_ENV, MODULE_JWT_ENV, + PBS_ENDPOINT_ENV, PBS_MODULE_NAME, PROXY_DIR_DEFAULT, PROXY_DIR_ENV, + PROXY_DIR_KEYS_DEFAULT, PROXY_DIR_KEYS_ENV, PROXY_DIR_SECRETS_DEFAULT, + PROXY_DIR_SECRETS_ENV, SIGNER_DEFAULT, SIGNER_DIR_KEYS_DEFAULT, SIGNER_DIR_KEYS_ENV, + SIGNER_DIR_SECRETS_DEFAULT, SIGNER_DIR_SECRETS_ENV, SIGNER_ENDPOINT_ENV, SIGNER_KEYS_ENV, + SIGNER_MODULE_NAME, SIGNER_URL_ENV, }, pbs::{BUILDER_API_PATH, GET_STATUS_PATH}, signer::{ProxyStore, SignerLoader, DEFAULT_SIGNER_PORT}, @@ -333,6 +333,7 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut signer_envs = IndexMap::from([ get_env_val(CONFIG_ENV, CONFIG_DEFAULT), get_env_same(JWTS_ENV), + get_env_same(ADMIN_JWT_ENV), ]); // Bind the signer API to 0.0.0.0 @@ -366,6 +367,7 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re // write jwts to env envs.insert(JWTS_ENV.into(), format_comma_separated(&jwts)); + envs.insert(ADMIN_JWT_ENV.into(), random_jwt_secret()); // volumes let mut volumes = vec![config_volume.clone()]; diff --git a/crates/common/src/commit/constants.rs b/crates/common/src/commit/constants.rs index 7c9f948c..ea9cd9bb 100644 --- a/crates/common/src/commit/constants.rs +++ b/crates/common/src/commit/constants.rs @@ -3,3 +3,4 @@ pub const REQUEST_SIGNATURE_PATH: &str = "/signer/v1/request_signature"; pub const GENERATE_PROXY_KEY_PATH: &str = "/signer/v1/generate_proxy_key"; pub const STATUS_PATH: &str = "/status"; pub const RELOAD_PATH: &str = "/reload"; +pub const REVOKE_MODULE_PATH: &str = "/revoke_jwt"; diff --git a/crates/common/src/commit/request.rs b/crates/common/src/commit/request.rs index b8843234..9a67dcc2 100644 --- a/crates/common/src/commit/request.rs +++ b/crates/common/src/commit/request.rs @@ -1,4 +1,5 @@ use std::{ + collections::HashMap, fmt::{self, Debug, Display}, str::FromStr, }; @@ -9,13 +10,17 @@ use alloy::{ rpc::types::beacon::BlsSignature, }; use derive_more::derive::From; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use crate::{ - constants::COMMIT_BOOST_DOMAIN, error::BlstErrorWrapper, signature::verify_signed_message, - signer::BlsPublicKey, types::Chain, + config::decode_string_to_map, + constants::COMMIT_BOOST_DOMAIN, + error::BlstErrorWrapper, + signature::verify_signed_message, + signer::BlsPublicKey, + types::{Chain, ModuleId}, }; pub trait ProxyId: AsRef<[u8]> + Debug + Clone + Copy + TreeHash + Display {} @@ -198,6 +203,31 @@ pub struct GetPubkeysResponse { pub keys: Vec, } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReloadRequest { + #[serde(default, deserialize_with = "deserialize_jwt_secrets")] + pub jwt_secrets: Option>, + pub admin_secret: Option, +} + +pub fn deserialize_jwt_secrets<'de, D>( + deserializer: D, +) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + let raw: String = Deserialize::deserialize(deserializer)?; + + decode_string_to_map(&raw) + .map(Some) + .map_err(|_| serde::de::Error::custom("Invalid format".to_string())) +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RevokeModuleRequest { + pub module_id: ModuleId, +} + /// Map of consensus pubkeys to proxies #[derive(Debug, Clone, Deserialize, Serialize)] pub struct ConsensusProxyMap { @@ -288,7 +318,7 @@ mod tests { let _: SignedProxyDelegationBls = serde_json::from_str(data).unwrap(); - let data = r#"{ + let data = r#"{ "message": { "delegator": "0xa3366b54f28e4bf1461926a3c70cdb0ec432b5c92554ecaae3742d33fb33873990cbed1761c68020e6d3c14d30a22050", "proxy": "0x4ca9939a8311a7cab3dde201b70157285fa81a9d" diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index 8b07f732..743cdbe9 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -42,6 +42,7 @@ pub const SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV: &str = /// Comma separated list module_id=jwt_secret pub const JWTS_ENV: &str = "CB_JWTS"; +pub const ADMIN_JWT_ENV: &str = "CB_SIGNER_ADMIN_JWT"; /// Path to json file with plaintext keys (testing only) pub const SIGNER_KEYS_ENV: &str = "CB_SIGNER_LOADER_FILE"; diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 7e5fbd58..d0adcdf4 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -133,6 +133,7 @@ pub struct StartSignerConfig { pub store: Option, pub endpoint: SocketAddr, pub jwts: HashMap, + pub admin_secret: String, pub jwt_auth_fail_limit: u32, pub jwt_auth_fail_timeout_seconds: u32, pub dirk: Option, @@ -142,7 +143,7 @@ impl StartSignerConfig { pub fn load_from_env() -> Result { let config = CommitBoostConfig::from_env_path()?; - let jwts = load_jwt_secrets()?; + let (admin_secret, jwts) = load_jwt_secrets()?; let signer_config = config.signer.ok_or_eyre("Signer config is missing")?; @@ -177,6 +178,7 @@ impl StartSignerConfig { loader: Some(loader), endpoint, jwts, + admin_secret, jwt_auth_fail_limit, jwt_auth_fail_timeout_seconds, store, @@ -207,6 +209,7 @@ impl StartSignerConfig { chain: config.chain, endpoint, jwts, + admin_secret, jwt_auth_fail_limit, jwt_auth_fail_timeout_seconds, loader: None, diff --git a/crates/common/src/config/utils.rs b/crates/common/src/config/utils.rs index 13784316..7ab346f1 100644 --- a/crates/common/src/config/utils.rs +++ b/crates/common/src/config/utils.rs @@ -4,7 +4,7 @@ use alloy::rpc::types::beacon::BlsPublicKey; use eyre::{bail, Context, Result}; use serde::de::DeserializeOwned; -use super::JWTS_ENV; +use super::{ADMIN_JWT_ENV, JWTS_ENV}; use crate::{config::MUXER_HTTP_MAX_LENGTH, types::ModuleId, utils::read_chunked_body_with_max}; pub fn load_env_var(env: &str) -> Result { @@ -26,9 +26,10 @@ pub fn load_file_from_env(env: &str) -> Result { } /// Loads a map of module id -> jwt secret from a json env -pub fn load_jwt_secrets() -> Result> { +pub fn load_jwt_secrets() -> Result<(String, HashMap)> { + let admin_jwt = std::env::var(ADMIN_JWT_ENV).wrap_err(format!("{ADMIN_JWT_ENV} is not set"))?; let jwt_secrets = std::env::var(JWTS_ENV).wrap_err(format!("{JWTS_ENV} is not set"))?; - decode_string_to_map(&jwt_secrets) + decode_string_to_map(&jwt_secrets).map(|secrets| (admin_jwt, secrets)) } /// Reads an HTTP response safely, erroring out if it failed or if the body is @@ -71,7 +72,7 @@ pub fn remove_duplicate_keys(keys: Vec) -> Vec { unique_keys } -fn decode_string_to_map(raw: &str) -> Result> { +pub fn decode_string_to_map(raw: &str) -> Result> { // trim the string and split for comma raw.trim() .split(',') diff --git a/crates/common/src/types.rs b/crates/common/src/types.rs index 5293a789..3d07e89c 100644 --- a/crates/common/src/types.rs +++ b/crates/common/src/types.rs @@ -23,6 +23,12 @@ pub struct JwtClaims { pub module: String, } +#[derive(Debug, Serialize, Deserialize)] +pub struct JwtAdmin { + pub exp: u64, + pub admin: bool, +} + #[derive(Clone, Copy, PartialEq, Eq)] pub enum Chain { Mainnet, diff --git a/crates/common/src/utils.rs b/crates/common/src/utils.rs index ccaf8888..7f2fbbca 100644 --- a/crates/common/src/utils.rs +++ b/crates/common/src/utils.rs @@ -30,7 +30,7 @@ use crate::{ config::LogsSettings, constants::SIGNER_JWT_EXPIRATION, pbs::HEADER_VERSION_VALUE, - types::{Chain, Jwt, JwtClaims, ModuleId}, + types::{Chain, Jwt, JwtAdmin, JwtClaims, ModuleId}, }; const MILLIS_PER_SECOND: u64 = 1_000; @@ -405,6 +405,24 @@ pub fn validate_jwt(jwt: Jwt, secret: &str) -> eyre::Result<()> { .map_err(From::from) } +/// Validate an admin JWT with the given secret +pub fn validate_admin_jwt(jwt: Jwt, secret: &str) -> eyre::Result<()> { + let mut validation = jsonwebtoken::Validation::default(); + validation.leeway = 10; + + let token = jsonwebtoken::decode::( + jwt.as_str(), + &jsonwebtoken::DecodingKey::from_secret(secret.as_ref()), + &validation, + )?; + + if token.claims.admin { + Ok(()) + } else { + eyre::bail!("Token is not admin") + } +} + /// Generates a random string pub fn random_jwt_secret() -> String { rand::rng().sample_iter(&Alphanumeric).take(32).map(char::from).collect() diff --git a/crates/signer/src/error.rs b/crates/signer/src/error.rs index a2a113f3..b0fc88de 100644 --- a/crates/signer/src/error.rs +++ b/crates/signer/src/error.rs @@ -25,6 +25,9 @@ pub enum SignerModuleError { #[error("Dirk signer does not support this operation")] DirkNotSupported, + #[error("module id not found")] + ModuleIdNotFound, + #[error("internal error: {0}")] Internal(String), @@ -48,6 +51,7 @@ impl IntoResponse for SignerModuleError { (StatusCode::INTERNAL_SERVER_ERROR, "internal error".to_string()) } SignerModuleError::SignerError(err) => (StatusCode::BAD_REQUEST, err.to_string()), + SignerModuleError::ModuleIdNotFound => (StatusCode::NOT_FOUND, self.to_string()), SignerModuleError::RateLimited(duration) => { (StatusCode::TOO_MANY_REQUESTS, format!("rate limited for {duration:?}")) } diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index 1a41a008..59da3c3d 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -18,17 +18,17 @@ use cb_common::{ commit::{ constants::{ GENERATE_PROXY_KEY_PATH, GET_PUBKEYS_PATH, RELOAD_PATH, REQUEST_SIGNATURE_PATH, - STATUS_PATH, + REVOKE_MODULE_PATH, STATUS_PATH, }, request::{ - EncryptionScheme, GenerateProxyRequest, GetPubkeysResponse, SignConsensusRequest, - SignProxyRequest, SignRequest, + EncryptionScheme, GenerateProxyRequest, GetPubkeysResponse, ReloadRequest, + RevokeModuleRequest, SignConsensusRequest, SignProxyRequest, SignRequest, }, }, config::StartSignerConfig, constants::{COMMIT_BOOST_COMMIT, COMMIT_BOOST_VERSION}, types::{Chain, Jwt, ModuleId}, - utils::{decode_jwt, validate_jwt}, + utils::{decode_jwt, validate_admin_jwt, validate_jwt}, }; use cb_metrics::provider::MetricsProvider; use eyre::Context; @@ -63,7 +63,9 @@ struct SigningState { /// Map of modules ids to JWT secrets. This also acts as registry of all /// modules running - jwts: Arc>, + jwts: Arc>>, + /// Secret for the admin JWT + admin_secret: Arc>, /// Map of JWT failures per peer jwt_auth_failures: Arc>>, @@ -84,7 +86,8 @@ impl SigningService { let state = SigningState { manager: Arc::new(RwLock::new(start_manager(config.clone()).await?)), - jwts: config.jwts.into(), + jwts: Arc::new(ParkingRwLock::new(config.jwts)), + admin_secret: Arc::new(ParkingRwLock::new(config.admin_secret)), jwt_auth_failures: Arc::new(ParkingRwLock::new(HashMap::new())), jwt_auth_fail_limit: config.jwt_auth_fail_limit, jwt_auth_fail_timeout: Duration::from_secs(config.jwt_auth_fail_timeout_seconds as u64), @@ -113,20 +116,30 @@ impl SigningService { SigningService::init_metrics(config.chain)?; - let app = axum::Router::new() + let signer_app = axum::Router::new() .route(REQUEST_SIGNATURE_PATH, post(handle_request_signature)) .route(GET_PUBKEYS_PATH, get(handle_get_pubkeys)) .route(GENERATE_PROXY_KEY_PATH, post(handle_generate_proxy)) .route_layer(middleware::from_fn_with_state(state.clone(), jwt_auth)) + .with_state(state.clone()) + .route_layer(middleware::from_fn(log_request)); + + let admin_app = axum::Router::new() .route(RELOAD_PATH, post(handle_reload)) + .route(REVOKE_MODULE_PATH, post(handle_revoke_module)) + .route_layer(middleware::from_fn_with_state(state.clone(), admin_auth)) .with_state(state.clone()) .route_layer(middleware::from_fn(log_request)) - .route(STATUS_PATH, get(handle_status)) - .into_make_service_with_connect_info::(); + .route(STATUS_PATH, get(handle_status)); let listener = TcpListener::bind(config.endpoint).await?; - axum::serve(listener, app).await.wrap_err("signer server exited") + axum::serve( + listener, + signer_app.merge(admin_app).into_make_service_with_connect_info::(), + ) + .await + .wrap_err("signer server exited") } fn init_metrics(network: Chain) -> eyre::Result<()> { @@ -214,7 +227,8 @@ fn check_jwt_auth( SignerModuleError::Unauthorized })?; - let jwt_secret = state.jwts.get(&module_id).ok_or_else(|| { + let guard = state.jwts.read(); + let jwt_secret = guard.get(&module_id).ok_or_else(|| { error!("Unauthorized request. Was the module started correctly?"); SignerModuleError::Unauthorized })?; @@ -226,6 +240,22 @@ fn check_jwt_auth( Ok(module_id) } +async fn admin_auth( + State(state): State, + TypedHeader(auth): TypedHeader>, + req: Request, + next: Next, +) -> Result { + let jwt: Jwt = auth.token().to_string().into(); + + validate_admin_jwt(jwt, &state.admin_secret.read()).map_err(|e| { + error!("Unauthorized request. Invalid JWT: {e}"); + SignerModuleError::Unauthorized + })?; + + Ok(next.run(req).await) +} + /// Requests logging middleware layer async fn log_request(req: Request, next: Next) -> Result { let url = &req.uri().clone(); @@ -360,6 +390,7 @@ async fn handle_generate_proxy( async fn handle_reload( State(mut state): State, + Json(request): Json, ) -> Result { let req_id = Uuid::new_v4(); @@ -373,6 +404,14 @@ async fn handle_reload( } }; + if let Some(jwt_secrets) = request.jwt_secrets { + *state.jwts.write() = jwt_secrets; + } + + if let Some(admin_secret) = request.admin_secret { + *state.admin_secret.write() = admin_secret; + } + let new_manager = match start_manager(config).await { Ok(manager) => manager, Err(err) => { @@ -386,6 +425,17 @@ async fn handle_reload( Ok(StatusCode::OK) } +async fn handle_revoke_module( + State(state): State, + Json(request): Json, +) -> Result { + let mut guard = state.jwts.write(); + guard + .remove(&request.module_id) + .ok_or(SignerModuleError::ModuleIdNotFound) + .map(|_| StatusCode::OK) +} + async fn start_manager(config: StartSignerConfig) -> eyre::Result { let proxy_store = if let Some(store) = config.store.clone() { Some(store.init_from_env()?) diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index 5dd46329..b65e73ad 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -398,6 +398,15 @@ Commit-Boost supports hot-reloading the configuration file. This means that you docker compose -f cb.docker-compose.yml exec cb_signer curl -X POST http://localhost:20000/reload ``` +### Signer module reload + +The signer module takes 2 optional parameters in the JSON body: + +- `jwt_secrets`: a string with a comma-separated list of `=` for all modules. +- `admin_secret`: a string with the secret for the signer admin JWT. + +Parameters that are not provided will not be updated; they will be regenerated using their original on-disk data as though the signer service was being restarted. Note that any changes you made with calls to `/revoke_jwt` or `/reload` will be reverted, so make sure you provide any modifications again as part of this call. + ### Notes - The hot reload feature is available for PBS modules (both default and custom) and signer module. diff --git a/docs/docs/get_started/running/binary.md b/docs/docs/get_started/running/binary.md index 385e7a0c..97991ee5 100644 --- a/docs/docs/get_started/running/binary.md +++ b/docs/docs/get_started/running/binary.md @@ -26,6 +26,7 @@ Modules need some environment variables to work correctly. - `CB_MUX_PATH_{ID}`: optional, override where to load mux validator keys for mux with `id=\{ID\}`. ### Signer Module +- `CB_SIGNER_ADMIN_JWT`: secret to use for admin JWT. - `CB_SIGNER_ENDPOINT`: optional, override to specify the `IP:port` endpoint to bind the signer server to. - For loading keys we currently support: - `CB_SIGNER_LOADER_FILE`: path to a `.json` with plaintext keys (for testing purposes only). diff --git a/tests/Cargo.toml b/tests/Cargo.toml index f1b5c9d9..573cfa20 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -11,6 +11,7 @@ cb-common.workspace = true cb-pbs.workspace = true cb-signer.workspace = true eyre.workspace = true +jsonwebtoken.workspace = true reqwest.workspace = true serde_json.workspace = true tempfile.workspace = true diff --git a/tests/src/utils.rs b/tests/src/utils.rs index b677d800..04aa371a 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -118,6 +118,7 @@ pub fn get_start_signer_config( signer_config: SignerConfig, chain: Chain, jwts: HashMap, + admin_secret: String, ) -> StartSignerConfig { match signer_config.inner { SignerType::Local { loader, .. } => StartSignerConfig { @@ -126,6 +127,7 @@ pub fn get_start_signer_config( store: None, endpoint: SocketAddr::new(signer_config.host.into(), signer_config.port), jwts, + admin_secret, jwt_auth_fail_limit: signer_config.jwt_auth_fail_limit, jwt_auth_fail_timeout_seconds: signer_config.jwt_auth_fail_timeout_seconds, dirk: None, diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index 90a0365f..820afbcc 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -2,10 +2,14 @@ use std::{collections::HashMap, time::Duration}; use alloy::{hex, primitives::FixedBytes}; use cb_common::{ - commit::{constants::GET_PUBKEYS_PATH, request::GetPubkeysResponse}, + commit::{ + constants::{GET_PUBKEYS_PATH, REVOKE_MODULE_PATH}, + request::GetPubkeysResponse, + }, config::StartSignerConfig, + constants::SIGNER_JWT_EXPIRATION, signer::{SignerLoader, ValidatorKeysFormat}, - types::{Chain, ModuleId}, + types::{Chain, Jwt, JwtAdmin, ModuleId}, utils::create_jwt, }; use cb_signer::service::SigningService; @@ -16,6 +20,7 @@ use tracing::info; const JWT_MODULE: &str = "test-module"; const JWT_SECRET: &str = "test-jwt-secret"; +const ADMIN_SECRET: &str = "test-admin-secret"; #[tokio::test] async fn test_signer_jwt_auth_success() -> Result<()> { @@ -86,6 +91,74 @@ async fn test_signer_jwt_rate_limit() -> Result<()> { Ok(()) } +#[tokio::test] +async fn test_signer_revoked_jwt_fail() -> Result<()> { + setup_test_env(); + let module_id = ModuleId(JWT_MODULE.to_string()); + let start_config = start_server(20400).await?; + + // Run as many pubkeys requests as the fail limit + let jwt = create_jwt(&module_id, JWT_SECRET)?; + let admin_jwt = create_admin_jwt()?; + let client = reqwest::Client::new(); + + // At first, test module should be allowed to request pubkeys + let url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + let response = client.get(&url).bearer_auth(&jwt).send().await?; + assert!(response.status() == StatusCode::OK); + + let revoke_url = format!("http://{}{}", start_config.endpoint, REVOKE_MODULE_PATH); + let response = client + .post(&revoke_url) + .header("content-type", "application/json") + .body(reqwest::Body::wrap(format!("{{\"module_id\": \"{JWT_MODULE}\"}}"))) + .bearer_auth(&admin_jwt) + .send() + .await?; + assert!(response.status() == StatusCode::OK); + + // After revoke, test module shouldn't be allowed anymore + let response = client.get(&url).bearer_auth(&jwt).send().await?; + assert!(response.status() == StatusCode::UNAUTHORIZED); + + Ok(()) +} + +#[tokio::test] +async fn test_signer_only_admin_can_revoke() -> Result<()> { + setup_test_env(); + let module_id = ModuleId(JWT_MODULE.to_string()); + let start_config = start_server(20500).await?; + + // Run as many pubkeys requests as the fail limit + let jwt = create_jwt(&module_id, JWT_SECRET)?; + let admin_jwt = create_admin_jwt()?; + let client = reqwest::Client::new(); + let url = format!("http://{}{}", start_config.endpoint, REVOKE_MODULE_PATH); + + // Module JWT shouldn't be able to revoke modules + let response = client + .post(&url) + .header("content-type", "application/json") + .body(reqwest::Body::wrap(format!("{{\"module_id\": \"{JWT_MODULE}\"}}"))) + .bearer_auth(&jwt) + .send() + .await?; + assert!(response.status() == StatusCode::UNAUTHORIZED); + + // Admin should be able to revoke modules + let response = client + .post(&url) + .header("content-type", "application/json") + .body(reqwest::Body::wrap(format!("{{\"module_id\": \"{JWT_MODULE}\"}}"))) + .bearer_auth(&admin_jwt) + .send() + .await?; + assert!(response.status() == StatusCode::OK); + + Ok(()) +} + // Starts the signer moduler server on a separate task and returns its // configuration async fn start_server(port: u16) -> Result { @@ -107,7 +180,7 @@ async fn start_server(port: u16) -> Result { config.port = port; config.jwt_auth_fail_limit = 3; // Set a low fail limit for testing config.jwt_auth_fail_timeout_seconds = 3; // Set a short timeout for testing - let start_config = get_start_signer_config(config, chain, jwts); + let start_config = get_start_signer_config(config, chain, jwts, ADMIN_SECRET.to_string()); // Run the Signer let server_handle = tokio::spawn(SigningService::run(start_config.clone())); @@ -144,3 +217,16 @@ async fn verify_pubkeys(response: Response) -> Result<()> { } Ok(()) } + +fn create_admin_jwt() -> Result { + jsonwebtoken::encode( + &jsonwebtoken::Header::default(), + &JwtAdmin { + admin: true, + exp: jsonwebtoken::get_current_timestamp() + SIGNER_JWT_EXPIRATION, + }, + &jsonwebtoken::EncodingKey::from_secret(ADMIN_SECRET.as_ref()), + ) + .map_err(Into::into) + .map(Jwt::from) +} From 509dba8ec12bc2babcf748c9df1931fa1f00ee77 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 28 Jul 2025 12:53:48 -0400 Subject: [PATCH 59/67] Signing IDs are no longer optional in the config --- crates/common/src/config/module.rs | 2 +- crates/common/src/config/signer.rs | 48 +++++++++++++++--------------- tests/src/utils.rs | 6 ++-- tests/tests/signer_jwt_auth.rs | 2 +- tests/tests/signer_request_sig.rs | 4 +-- 5 files changed, 31 insertions(+), 31 deletions(-) diff --git a/crates/common/src/config/module.rs b/crates/common/src/config/module.rs index 09ccee89..71c4891b 100644 --- a/crates/common/src/config/module.rs +++ b/crates/common/src/config/module.rs @@ -39,7 +39,7 @@ pub struct StaticModuleConfig { #[serde(rename = "type")] pub kind: ModuleKind, /// Signing ID for the module to use when requesting signatures - pub signing_id: Option, + pub signing_id: B256, } /// Runtime config to start a module diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 76bfe4b8..381b37b4 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -299,16 +299,12 @@ pub fn load_module_signing_configs( Some(secret) => secret.clone(), None => bail!("JWT secret for module {} is missing", module.id), }; - - // Make sure the signing ID is present - let signing_id = match &module.signing_id { - Some(id) => *id, - None => bail!("Signing ID for module {} is missing", module.id), - }; - // Create the module signing config and validate it - let module_signing_config = - ModuleSigningConfig { module_name: module.id.clone(), jwt_secret, signing_id }; + let module_signing_config = ModuleSigningConfig { + module_name: module.id.clone(), + jwt_secret, + signing_id: module.signing_id, + }; module_signing_config .validate() .wrap_err(format!("Invalid signing config for module {}", module.id))?; @@ -336,7 +332,7 @@ pub fn load_module_signing_configs( } None => { seen_signing_ids.insert(module_signing_config.signing_id, &module.id); - signing_id + module.signing_id } }; @@ -385,10 +381,10 @@ mod tests { } } - async fn create_module_config(id: &ModuleId, signing_id: &B256) -> StaticModuleConfig { + async fn create_module_config(id: ModuleId, signing_id: B256) -> StaticModuleConfig { StaticModuleConfig { id: id.clone(), - signing_id: Some(*signing_id), + signing_id, docker_image: String::from(""), env: None, env_file: None, @@ -407,8 +403,8 @@ mod tests { b256!("0202020202020202020202020202020202020202020202020202020202020202"); cfg.modules = Some(vec![ - create_module_config(&first_module_id, &first_signing_id).await, - create_module_config(&second_module_id, &second_signing_id).await, + create_module_config(first_module_id.clone(), first_signing_id).await, + create_module_config(second_module_id.clone(), second_signing_id).await, ]); let jwts = HashMap::from([ @@ -466,8 +462,10 @@ mod tests { b256!("0202020202020202020202020202020202020202020202020202020202020202"); cfg.modules = Some(vec![ - create_module_config(&first_module_id, &first_signing_id).await, - create_module_config(&first_module_id, &second_signing_id).await, /* Duplicate module name */ + create_module_config(first_module_id.clone(), first_signing_id).await, + create_module_config(first_module_id.clone(), second_signing_id).await, /* Duplicate + * module + * name */ ]); let jwts = HashMap::from([ @@ -498,8 +496,8 @@ mod tests { b256!("0202020202020202020202020202020202020202020202020202020202020202"); cfg.modules = Some(vec![ - create_module_config(&first_module_id, &first_signing_id).await, - create_module_config(&second_module_id, &second_signing_id).await, + create_module_config(first_module_id.clone(), first_signing_id).await, + create_module_config(second_module_id.clone(), second_signing_id).await, ]); let jwts = HashMap::from([ @@ -530,8 +528,8 @@ mod tests { let second_module_id = ModuleId("2nd_test_module".to_string()); cfg.modules = Some(vec![ - create_module_config(&first_module_id, &first_signing_id).await, - create_module_config(&second_module_id, &first_signing_id).await, /* Duplicate signing ID */ + create_module_config(first_module_id.clone(), first_signing_id).await, + create_module_config(second_module_id.clone(), first_signing_id).await, /* Duplicate signing ID */ ]); let jwts = HashMap::from([ @@ -564,8 +562,8 @@ mod tests { b256!("0202020202020202020202020202020202020202020202020202020202020202"); cfg.modules = Some(vec![ - create_module_config(&first_module_id, &first_signing_id).await, - create_module_config(&second_module_id, &second_signing_id).await, + create_module_config(first_module_id.clone(), first_signing_id).await, + create_module_config(second_module_id.clone(), second_signing_id).await, ]); let jwts = HashMap::from([(second_module_id.clone(), "another-secret".to_string())]); @@ -589,7 +587,8 @@ mod tests { let first_signing_id = b256!("0101010101010101010101010101010101010101010101010101010101010101"); - cfg.modules = Some(vec![create_module_config(&first_module_id, &first_signing_id).await]); + cfg.modules = + Some(vec![create_module_config(first_module_id.clone(), first_signing_id).await]); let jwts = HashMap::from([(first_module_id.clone(), "".to_string())]); @@ -610,7 +609,8 @@ mod tests { let first_signing_id = b256!("0000000000000000000000000000000000000000000000000000000000000000"); - cfg.modules = Some(vec![create_module_config(&first_module_id, &first_signing_id).await]); + cfg.modules = + Some(vec![create_module_config(first_module_id.clone(), first_signing_id).await]); let jwts = HashMap::from([(first_module_id.clone(), "supersecret".to_string())]); diff --git a/tests/src/utils.rs b/tests/src/utils.rs index b4865be9..b897aa49 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -154,10 +154,10 @@ pub fn get_start_signer_config( } } -pub fn create_module_config(id: &ModuleId, signing_id: &B256) -> StaticModuleConfig { +pub fn create_module_config(id: ModuleId, signing_id: B256) -> StaticModuleConfig { StaticModuleConfig { - id: id.clone(), - signing_id: Some(*signing_id), + id, + signing_id, docker_image: String::from(""), env: None, env_file: None, diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index fce8ae72..cb825624 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -24,7 +24,7 @@ async fn create_mod_signing_configs() -> HashMap let module_id = ModuleId(JWT_MODULE.to_string()); let signing_id = b256!("0101010101010101010101010101010101010101010101010101010101010101"); - cfg.modules = Some(vec![utils::create_module_config(&module_id, &signing_id)]); + cfg.modules = Some(vec![utils::create_module_config(module_id.clone(), signing_id)]); let jwts = HashMap::from([(module_id.clone(), "supersecret".to_string())]); diff --git a/tests/tests/signer_request_sig.rs b/tests/tests/signer_request_sig.rs index 1990172e..26378f67 100644 --- a/tests/tests/signer_request_sig.rs +++ b/tests/tests/signer_request_sig.rs @@ -35,8 +35,8 @@ async fn create_mod_signing_configs() -> HashMap let signing_id_2 = b256!("0x61fe00135d7b4912a8c63ada215ac2e62326e6e7b30f49a29fcf9779d7ad800d"); cfg.modules = Some(vec![ - utils::create_module_config(&module_id_1, &signing_id_1), - utils::create_module_config(&module_id_2, &signing_id_2), + utils::create_module_config(module_id_1.clone(), signing_id_1), + utils::create_module_config(module_id_2.clone(), signing_id_2), ]); let jwts = HashMap::from([ From 2c507d7c8709a2acf2a2dcb8406d540a76ef3f57 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 28 Jul 2025 13:14:32 -0400 Subject: [PATCH 60/67] Refactored some of the signer consts for consistency --- crates/cli/src/docker_init.rs | 6 +++--- crates/common/src/config/constants.rs | 7 ++++++- crates/common/src/config/signer.rs | 20 +++++++++----------- crates/common/src/signer/constants.rs | 6 ------ crates/common/src/signer/mod.rs | 2 -- tests/src/utils.rs | 14 ++++++-------- 6 files changed, 24 insertions(+), 31 deletions(-) delete mode 100644 crates/common/src/signer/constants.rs diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index cc307f85..551d9245 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -15,10 +15,10 @@ use cb_common::{ PROXY_DIR_KEYS_ENV, PROXY_DIR_SECRETS_DEFAULT, PROXY_DIR_SECRETS_ENV, SIGNER_DEFAULT, SIGNER_DIR_KEYS_DEFAULT, SIGNER_DIR_KEYS_ENV, SIGNER_DIR_SECRETS_DEFAULT, SIGNER_DIR_SECRETS_ENV, SIGNER_ENDPOINT_ENV, SIGNER_KEYS_ENV, SIGNER_MODULE_NAME, - SIGNER_URL_ENV, + SIGNER_PORT_DEFAULT, SIGNER_URL_ENV, }, pbs::{BUILDER_API_PATH, GET_STATUS_PATH}, - signer::{ProxyStore, SignerLoader, DEFAULT_SIGNER_PORT}, + signer::{ProxyStore, SignerLoader}, types::ModuleId, utils::random_jwt_secret, }; @@ -73,7 +73,7 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut targets = Vec::new(); // address for signer API communication - let signer_port = cb_config.signer.as_ref().map(|s| s.port).unwrap_or(DEFAULT_SIGNER_PORT); + let signer_port = cb_config.signer.as_ref().map(|s| s.port).unwrap_or(SIGNER_PORT_DEFAULT); let signer_server = if let Some(SignerConfig { inner: SignerType::Remote { url }, .. }) = &cb_config.signer { url.to_string() diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index 8b07f732..406f1375 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -34,11 +34,16 @@ pub const SIGNER_MODULE_NAME: &str = "signer"; /// Where the signer module should open the server pub const SIGNER_ENDPOINT_ENV: &str = "CB_SIGNER_ENDPOINT"; +pub const SIGNER_PORT_DEFAULT: u16 = 20000; -// JWT authentication settings +/// Number of auth failures before rate limiting the client pub const SIGNER_JWT_AUTH_FAIL_LIMIT_ENV: &str = "CB_SIGNER_JWT_AUTH_FAIL_LIMIT"; +pub const SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT: u32 = 3; + +/// How long to rate limit the client after auth failures pub const SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV: &str = "CB_SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS"; +pub const SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT: u32 = 5 * 60; /// Comma separated list module_id=jwt_secret pub const JWTS_ENV: &str = "CB_JWTS"; diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 7e5fbd58..0674d1f7 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -12,15 +12,13 @@ use url::Url; use super::{ load_jwt_secrets, load_optional_env_var, utils::load_env_var, CommitBoostConfig, - SIGNER_ENDPOINT_ENV, SIGNER_IMAGE_DEFAULT, SIGNER_JWT_AUTH_FAIL_LIMIT_ENV, - SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV, + SIGNER_ENDPOINT_ENV, SIGNER_IMAGE_DEFAULT, SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT, + SIGNER_JWT_AUTH_FAIL_LIMIT_ENV, SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT, + SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_ENV, SIGNER_PORT_DEFAULT, }; use crate::{ config::{DIRK_CA_CERT_ENV, DIRK_CERT_ENV, DIRK_DIR_SECRETS_ENV, DIRK_KEY_ENV}, - signer::{ - ProxyStore, SignerLoader, DEFAULT_JWT_AUTH_FAIL_LIMIT, - DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, DEFAULT_SIGNER_PORT, - }, + signer::{ProxyStore, SignerLoader}, types::{Chain, ModuleId}, utils::{default_host, default_u16, default_u32}, }; @@ -32,20 +30,20 @@ pub struct SignerConfig { #[serde(default = "default_host")] pub host: Ipv4Addr, /// Port to listen for signer API calls on - #[serde(default = "default_u16::")] + #[serde(default = "default_u16::")] pub port: u16, /// Docker image of the module - #[serde(default = "default_signer")] + #[serde(default = "default_signer_image")] pub docker_image: String, /// Number of JWT auth failures before rate limiting an endpoint /// If set to 0, no rate limiting will be applied - #[serde(default = "default_u32::")] + #[serde(default = "default_u32::")] pub jwt_auth_fail_limit: u32, /// Duration in seconds to rate limit an endpoint after the JWT auth failure /// limit has been reached - #[serde(default = "default_u32::")] + #[serde(default = "default_u32::")] pub jwt_auth_fail_timeout_seconds: u32, /// Inner type-specific configuration @@ -70,7 +68,7 @@ impl SignerConfig { } } -fn default_signer() -> String { +fn default_signer_image() -> String { SIGNER_IMAGE_DEFAULT.to_string() } diff --git a/crates/common/src/signer/constants.rs b/crates/common/src/signer/constants.rs deleted file mode 100644 index 45e3ce23..00000000 --- a/crates/common/src/signer/constants.rs +++ /dev/null @@ -1,6 +0,0 @@ -pub const DEFAULT_SIGNER_PORT: u16 = 20000; - -// Rate limit signer API requests for 5 minutes after the endpoint has 3 JWT -// auth failures -pub const DEFAULT_JWT_AUTH_FAIL_LIMIT: u32 = 3; -pub const DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS: u32 = 5 * 60; diff --git a/crates/common/src/signer/mod.rs b/crates/common/src/signer/mod.rs index b6dce29d..e0a164a7 100644 --- a/crates/common/src/signer/mod.rs +++ b/crates/common/src/signer/mod.rs @@ -1,10 +1,8 @@ -mod constants; mod loader; mod schemes; mod store; mod types; -pub use constants::*; pub use loader::*; pub use schemes::*; pub use store::*; diff --git a/tests/src/utils.rs b/tests/src/utils.rs index b677d800..5392fd95 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -8,13 +8,11 @@ use alloy::{primitives::U256, rpc::types::beacon::BlsPublicKey}; use cb_common::{ config::{ PbsConfig, PbsModuleConfig, RelayConfig, SignerConfig, SignerType, StartSignerConfig, - SIGNER_IMAGE_DEFAULT, + SIGNER_IMAGE_DEFAULT, SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT, + SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT, SIGNER_PORT_DEFAULT, }, pbs::{RelayClient, RelayEntry}, - signer::{ - SignerLoader, DEFAULT_JWT_AUTH_FAIL_LIMIT, DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, - DEFAULT_SIGNER_PORT, - }, + signer::SignerLoader, types::{Chain, ModuleId}, utils::default_host, }; @@ -106,10 +104,10 @@ pub fn to_pbs_config( pub fn get_signer_config(loader: SignerLoader) -> SignerConfig { SignerConfig { host: default_host(), - port: DEFAULT_SIGNER_PORT, + port: SIGNER_PORT_DEFAULT, docker_image: SIGNER_IMAGE_DEFAULT.to_string(), - jwt_auth_fail_limit: DEFAULT_JWT_AUTH_FAIL_LIMIT, - jwt_auth_fail_timeout_seconds: DEFAULT_JWT_AUTH_FAIL_TIMEOUT_SECONDS, + jwt_auth_fail_limit: SIGNER_JWT_AUTH_FAIL_LIMIT_DEFAULT, + jwt_auth_fail_timeout_seconds: SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT, inner: SignerType::Local { loader, store: None }, } } From ff716a0ed7b10ffd97bb0cfe1b6233489703e3b3 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Tue, 29 Jul 2025 01:06:31 -0400 Subject: [PATCH 61/67] Updated the Signer API docs --- api/signer-api.yml | 83 ++++++++++++-- docs/docs/developing/prop-commit-signing.md | 115 -------------------- 2 files changed, 76 insertions(+), 122 deletions(-) diff --git a/api/signer-api.yml b/api/signer-api.yml index c876a3a2..69239e38 100644 --- a/api/signer-api.yml +++ b/api/signer-api.yml @@ -60,7 +60,7 @@ paths: /signer/v1/request_signature: post: - summary: Send a signature request + summary: Request a signature for a 32-byte blob of data (typically a hash), signed by the requested BLS or ECDSA key. tags: - Signer security: @@ -81,15 +81,15 @@ paths: type: string enum: [consensus, proxy_bls, proxy_ecdsa] pubkey: - description: Public key of the validator for consensus signatures + description: The 48-byte BLS public key, with optional `0x` prefix, of the proposer key that you want to request a signature from. $ref: "#/components/schemas/BlsPubkey" proxy: - description: BLS proxy pubkey or ECDSA address for proxy signatures + description: The 48-byte BLS public key (for `proxy_bls` mode) or the 20-byte Ethereum address (for `proxy_ecdsa` mode), with optional `0x` prefix, of the proxy key that you want to request a signature from. oneOf: - $ref: "#/components/schemas/BlsPubkey" - $ref: "#/components/schemas/EcdsaAddress" object_root: - description: The root of the object to be signed + description: The 32-byte data you want to sign, with optional `0x` prefix. type: string format: hex pattern: "^0x[a-fA-F0-9]{64}$" @@ -112,7 +112,7 @@ paths: object_root: "0x3e9f4a78b5c21d64f0b8e3d9a7f5c02b4d1e67a3c8f29b5d6e4a3b1c8f72e6d9" responses: "200": - description: Success + description: A successful signature response. The returned signature is the Merkle root hash of the provided `object_root` field and the requesting module's Signing ID as specified in the Commit-Boost configuration. For details on this signature, see the [signature structure documentation](https://commit-boost.github.io/commit-boost-client/developing/prop-commit-signing.md#structure-of-a-signature). content: application/json: schema: @@ -126,8 +126,45 @@ paths: value: "0xa3ffa9241f78279f1af04644cb8c79c2d8f02bcf0e28e2f186f6dcccac0a869c2be441fda50f0dea895cfce2e53f0989a3ffa9241f78279f1af04644cb8c79c2d8f02bcf0e28e2f186f6dcccac0a869c2be441fda50f0dea895cfce2e53f0989" ProxyEcdsa: value: "0x985b495f49d1b96db3bba3f6c5dd1810950317c10d4c2042bd316f338cdbe74359072e209b85e56ac492092d7860063dd096ca31b4e164ef27e3f8d508e656801c" + "400": + description: | + This can occur in several scenarios: + + - You requested an operation while using the Dirk signer mode instead of locally-managed signer mode, but Dirk doesn't support that operation. + - Something went wrong while preparing your request; the error text will provide more information. + content: + application/json: + schema: + type: object + required: + - code + - message + properties: + code: + type: number + example: 400 + message: + type: string + example: "Bad request: Invalid pubkey format" + "401": + description: The requesting module did not provide a JWT string in the request's authorization header, or the JWT string was not configured in the signer service's configuration file as belonging to the module. + content: + application/json: + schema: + type: object + required: + - code + - message + properties: + code: + type: number + example: 401 + message: + type: string + example: "Unauthorized" + "404": - description: Unknown value (pubkey, etc.) + description: You either requested a route that doesn't exist, or you requested a signature from a key that does not exist. content: application/json: schema: @@ -142,8 +179,24 @@ paths: message: type: string example: "Unknown pubkey" + "429": + description: Your module attempted and failed JWT authentication too many times recently, and is currently timed out. It cannot make any more requests until the timeout ends. + content: + application/json: + schema: + type: object + required: + - code + - message + properties: + code: + type: number + example: 429 + message: + type: string + example: "Too many requests" "500": - description: Internal error + description: Your request was valid, but something went wrong internally that prevented it from being fulfilled. content: application/json: schema: @@ -158,6 +211,22 @@ paths: message: type: string example: "Internal error" + "502": + description: The signer service is running in Dirk signer mode, but Dirk could not be reached. + content: + application/json: + schema: + type: object + required: + - code + - message + properties: + code: + type: number + example: 502 + message: + type: string + example: "Bad gateway: Dirk signer service is unreachable" /signer/v1/generate_proxy_key: post: diff --git a/docs/docs/developing/prop-commit-signing.md b/docs/docs/developing/prop-commit-signing.md index 64bab425..21dd6320 100644 --- a/docs/docs/developing/prop-commit-signing.md +++ b/docs/docs/developing/prop-commit-signing.md @@ -58,118 +58,3 @@ where: The data signed in a proposer commitment is the 32-byte root of this tree (the green `Root` box). Note that calculating this will involve calculating the Merkle Root of two separate trees: first the blue data subtree (with the original request data and the signing ID) to establish the blue `Root` value, and then again with a tree created from that value and the `Domain`. Many languages provide libraries for computing the root of an SSZ Merkle tree, such as [fastssz for Go](https://github.com/ferranbt/fastssz) or [tree_hash for Rust](https://docs.rs/tree_hash/latest/tree_hash/). When verifying proposer commitment signatures, use a library that supports Merkle tree root hashing, the `compute_domain()` operation, and validation for signatures generated by your key of choice. - - -## Requesting a Proposer Commitment from the Signer - -Prior to requesting a signature from the signer service, first ensure that Commit Boost has been [configured](#configuring-a-module-for-proposer-commitments) with your module's signing ID and JWT secret. - -The signer service can be accessed by an HTTP API. In Docker mode, this will be within the `cb_signer` container at the `/signer/v1/request_signature` route (for example, using the default port of `20000`, the endpoint will be `http://cb_signer:20000/signer/v1/request_signature`). Submitting a request must be done via the `POST` method. - - -### Headers - -- Set `Content-Type` set to `application/json`. -- Set `Accept` to `application/json`, as responses are quoted strings. Other formats are not currently supported. -- Set `Authorization` to a standard JWT string representing your module's JWT authentication information. For the claims, you can add a `module` claim indicating the human-readable name of your module. - - -### BLS Proposer Keys - -If requesting a signature directly from a proposer pubkey, use the following body specification: - -```json -{ - "type": "consensus", - "pubkey": "0x1234abcd...", - "object_root": "0x01020304..." -} -``` - -where: - -- `pubkey` is the 48-byte BLS public key, with optional `0x` prefix, of the proposer key that you want to request a signature from. -- `object_root` is the 32-byte data you want to sign, with optional `0x` prefix. - - -### BLS Proxy Keys - -If requesting a signature indirectly from a proposer key via a [proxy key](./commit-module.md#with-a-proxy-key), use the following body specification: - -```json -{ - "type": "proxy_bls", - "proxy": "0x1234abcd...", - "object_root": "0x01020304..." -} -``` - -where: - -- `proxy` is the 48-byte BLS public key, with optional `0x` prefix, of the proxy key that you want to request a signature from. -- `object_root` is the 32-byte data you want to sign, with optional `0x` prefix. - - -### ECDSA Proxy Keys - -**NOTE:** ECDSA proxy key support is not available when using Dirk. - -If requesting a signature indirectly from an Ethereum private key via a [proxy key](./commit-module.md#with-a-proxy-key), use the following body specification: - -```json -{ - "type": "proxy_ecdsa", - "proxy": "0x1234abcd...", - "object_root": "0x01020304..." -} -``` - -where: - -- `proxy` is the 20-byte Ethereum address of the proxy key, with optional `0x` prefix, of the ECDSA private key that you want to request a signature from. -- `object_root` is the 32-byte data you want to sign, with optional `0x` prefix. - - -### Response - -The response for any of the above will be one of the following, provided in plaintext format (not JSON). - - -#### `200 OK` - -A successful signing request, with the signature provided as a plaintext quoted hex-encoded string, with a `0x` prefix. For example, the response body would look like: -``` -"0xa43e623f009e615faa3987368f64d6286a4103de70e9a81d82562c50c91eae2d5d6fb9db9fe943aa8ee42fd92d8210c1149f25ed6aa72a557d74a0ed5646fdd0e8255ec58e3e2931695fe913863ba0cdf90d29f651bce0a34169a6f6ce5b3115" -``` - -#### `401 Unauthorized` - -Your module did not provide a JWT string in the request's authorization header, or the JWT string was not configured in the signer service's configuration file as belonging to your module. - - -#### `400 Bad Request` - -This can occur in several scenarios: - -- You requested an operation while using the Dirk signer mode instead of locally-managed signer mode, but Dirk doesn't support that operation. -- Something went wrong while preparing your request; the error text will provide more information. - - -#### `502 Bad Gateway` - -The signer service is running in Dirk signer mode, but Dirk could not be reached. - - -#### `404 Not Found` - -You either requested a route that doesn't exist, or you requested a signature from a key that does not exist. - - -#### `429 Too Many Requests` - -Your module attempted and failed JWT authentication too many times recently, and is currently timed out. It cannot make any more requests until the timeout ends. - - -#### `500 Internal Server Error` - -Your request was valid, but something went wrong internally that prevented it from being fulfilled. \ No newline at end of file From de61066bf949c97afb8f6be969d340096c5f1bb7 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 30 Jul 2025 14:42:47 -0400 Subject: [PATCH 62/67] Merge sigp-audit-fixes (#348) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Manuel Iñaki Bilbao Co-authored-by: ltitanb <163874448+ltitanb@users.noreply.github.com> --- Cargo.lock | 1 + crates/cli/src/docker_init.rs | 22 ++--- crates/common/src/commit/constants.rs | 1 + crates/common/src/commit/request.rs | 38 ++++++++- crates/common/src/config/constants.rs | 1 + crates/common/src/config/signer.rs | 5 +- crates/common/src/config/utils.rs | 9 +- crates/common/src/types.rs | 6 ++ crates/common/src/utils.rs | 20 ++++- crates/signer/src/error.rs | 10 +++ crates/signer/src/service.rs | 107 ++++++++++++++++++++---- docs/docs/get_started/configuration.md | 9 ++ docs/docs/get_started/running/binary.md | 1 + tests/Cargo.toml | 1 + tests/src/signer_service.rs | 20 ++++- tests/src/utils.rs | 2 + tests/tests/signer_jwt_auth.rs | 86 +++++++++++++++++-- tests/tests/signer_request_sig.rs | 5 +- 18 files changed, 297 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 63de92dd..7eed3c0f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1596,6 +1596,7 @@ dependencies = [ "cb-pbs", "cb-signer", "eyre", + "jsonwebtoken", "reqwest", "serde_json", "tempfile", diff --git a/crates/cli/src/docker_init.rs b/crates/cli/src/docker_init.rs index 551d9245..16ba3bfe 100644 --- a/crates/cli/src/docker_init.rs +++ b/crates/cli/src/docker_init.rs @@ -6,16 +6,16 @@ use std::{ use cb_common::{ config::{ - CommitBoostConfig, LogsSettings, ModuleKind, SignerConfig, SignerType, BUILDER_PORT_ENV, - BUILDER_URLS_ENV, CHAIN_SPEC_ENV, CONFIG_DEFAULT, CONFIG_ENV, DIRK_CA_CERT_DEFAULT, - DIRK_CA_CERT_ENV, DIRK_CERT_DEFAULT, DIRK_CERT_ENV, DIRK_DIR_SECRETS_DEFAULT, - DIRK_DIR_SECRETS_ENV, DIRK_KEY_DEFAULT, DIRK_KEY_ENV, JWTS_ENV, LOGS_DIR_DEFAULT, - LOGS_DIR_ENV, METRICS_PORT_ENV, MODULE_ID_ENV, MODULE_JWT_ENV, PBS_ENDPOINT_ENV, - PBS_MODULE_NAME, PROXY_DIR_DEFAULT, PROXY_DIR_ENV, PROXY_DIR_KEYS_DEFAULT, - PROXY_DIR_KEYS_ENV, PROXY_DIR_SECRETS_DEFAULT, PROXY_DIR_SECRETS_ENV, SIGNER_DEFAULT, - SIGNER_DIR_KEYS_DEFAULT, SIGNER_DIR_KEYS_ENV, SIGNER_DIR_SECRETS_DEFAULT, - SIGNER_DIR_SECRETS_ENV, SIGNER_ENDPOINT_ENV, SIGNER_KEYS_ENV, SIGNER_MODULE_NAME, - SIGNER_PORT_DEFAULT, SIGNER_URL_ENV, + CommitBoostConfig, LogsSettings, ModuleKind, SignerConfig, SignerType, ADMIN_JWT_ENV, + BUILDER_PORT_ENV, BUILDER_URLS_ENV, CHAIN_SPEC_ENV, CONFIG_DEFAULT, CONFIG_ENV, + DIRK_CA_CERT_DEFAULT, DIRK_CA_CERT_ENV, DIRK_CERT_DEFAULT, DIRK_CERT_ENV, + DIRK_DIR_SECRETS_DEFAULT, DIRK_DIR_SECRETS_ENV, DIRK_KEY_DEFAULT, DIRK_KEY_ENV, JWTS_ENV, + LOGS_DIR_DEFAULT, LOGS_DIR_ENV, METRICS_PORT_ENV, MODULE_ID_ENV, MODULE_JWT_ENV, + PBS_ENDPOINT_ENV, PBS_MODULE_NAME, PROXY_DIR_DEFAULT, PROXY_DIR_ENV, + PROXY_DIR_KEYS_DEFAULT, PROXY_DIR_KEYS_ENV, PROXY_DIR_SECRETS_DEFAULT, + PROXY_DIR_SECRETS_ENV, SIGNER_DEFAULT, SIGNER_DIR_KEYS_DEFAULT, SIGNER_DIR_KEYS_ENV, + SIGNER_DIR_SECRETS_DEFAULT, SIGNER_DIR_SECRETS_ENV, SIGNER_ENDPOINT_ENV, SIGNER_KEYS_ENV, + SIGNER_MODULE_NAME, SIGNER_PORT_DEFAULT, SIGNER_URL_ENV, }, pbs::{BUILDER_API_PATH, GET_STATUS_PATH}, signer::{ProxyStore, SignerLoader}, @@ -333,6 +333,7 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re let mut signer_envs = IndexMap::from([ get_env_val(CONFIG_ENV, CONFIG_DEFAULT), get_env_same(JWTS_ENV), + get_env_same(ADMIN_JWT_ENV), ]); // Bind the signer API to 0.0.0.0 @@ -366,6 +367,7 @@ pub async fn handle_docker_init(config_path: PathBuf, output_dir: PathBuf) -> Re // write jwts to env envs.insert(JWTS_ENV.into(), format_comma_separated(&jwts)); + envs.insert(ADMIN_JWT_ENV.into(), random_jwt_secret()); // volumes let mut volumes = vec![config_volume.clone()]; diff --git a/crates/common/src/commit/constants.rs b/crates/common/src/commit/constants.rs index 7c9f948c..ea9cd9bb 100644 --- a/crates/common/src/commit/constants.rs +++ b/crates/common/src/commit/constants.rs @@ -3,3 +3,4 @@ pub const REQUEST_SIGNATURE_PATH: &str = "/signer/v1/request_signature"; pub const GENERATE_PROXY_KEY_PATH: &str = "/signer/v1/generate_proxy_key"; pub const STATUS_PATH: &str = "/status"; pub const RELOAD_PATH: &str = "/reload"; +pub const REVOKE_MODULE_PATH: &str = "/revoke_jwt"; diff --git a/crates/common/src/commit/request.rs b/crates/common/src/commit/request.rs index d9286868..5d9f2d72 100644 --- a/crates/common/src/commit/request.rs +++ b/crates/common/src/commit/request.rs @@ -1,4 +1,5 @@ use std::{ + collections::HashMap, fmt::{self, Debug, Display}, str::FromStr, }; @@ -9,13 +10,17 @@ use alloy::{ rpc::types::beacon::BlsSignature, }; use derive_more::derive::From; -use serde::{Deserialize, Serialize}; +use serde::{Deserialize, Deserializer, Serialize}; use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use crate::{ - constants::COMMIT_BOOST_DOMAIN, error::BlstErrorWrapper, signature::verify_signed_message, - signer::BlsPublicKey, types::Chain, + config::decode_string_to_map, + constants::COMMIT_BOOST_DOMAIN, + error::BlstErrorWrapper, + signature::verify_signed_message, + signer::BlsPublicKey, + types::{Chain, ModuleId}, }; pub trait ProxyId: AsRef<[u8]> + Debug + Clone + Copy + TreeHash + Display {} @@ -199,6 +204,31 @@ pub struct GetPubkeysResponse { pub keys: Vec, } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ReloadRequest { + #[serde(default, deserialize_with = "deserialize_jwt_secrets")] + pub jwt_secrets: Option>, + pub admin_secret: Option, +} + +pub fn deserialize_jwt_secrets<'de, D>( + deserializer: D, +) -> Result>, D::Error> +where + D: Deserializer<'de>, +{ + let raw: String = Deserialize::deserialize(deserializer)?; + + decode_string_to_map(&raw) + .map(Some) + .map_err(|_| serde::de::Error::custom("Invalid format".to_string())) +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RevokeModuleRequest { + pub module_id: ModuleId, +} + /// Map of consensus pubkeys to proxies #[derive(Debug, Clone, Deserialize, Serialize)] pub struct ConsensusProxyMap { @@ -289,7 +319,7 @@ mod tests { let _: SignedProxyDelegationBls = serde_json::from_str(data).unwrap(); - let data = r#"{ + let data = r#"{ "message": { "delegator": "0xa3366b54f28e4bf1461926a3c70cdb0ec432b5c92554ecaae3742d33fb33873990cbed1761c68020e6d3c14d30a22050", "proxy": "0x4ca9939a8311a7cab3dde201b70157285fa81a9d" diff --git a/crates/common/src/config/constants.rs b/crates/common/src/config/constants.rs index 406f1375..39f3ed53 100644 --- a/crates/common/src/config/constants.rs +++ b/crates/common/src/config/constants.rs @@ -47,6 +47,7 @@ pub const SIGNER_JWT_AUTH_FAIL_TIMEOUT_SECONDS_DEFAULT: u32 = 5 * 60; /// Comma separated list module_id=jwt_secret pub const JWTS_ENV: &str = "CB_JWTS"; +pub const ADMIN_JWT_ENV: &str = "CB_SIGNER_ADMIN_JWT"; /// Path to json file with plaintext keys (testing only) pub const SIGNER_KEYS_ENV: &str = "CB_SIGNER_LOADER_FILE"; diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index 381b37b4..c82d2f69 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -165,6 +165,7 @@ pub struct StartSignerConfig { pub store: Option, pub endpoint: SocketAddr, pub mod_signing_configs: HashMap, + pub admin_secret: String, pub jwt_auth_fail_limit: u32, pub jwt_auth_fail_timeout_seconds: u32, pub dirk: Option, @@ -174,7 +175,7 @@ impl StartSignerConfig { pub fn load_from_env() -> Result { let config = CommitBoostConfig::from_env_path()?; - let jwt_secrets = load_jwt_secrets()?; + let (admin_secret, jwt_secrets) = load_jwt_secrets()?; // Load the module signing configs let mod_signing_configs = load_module_signing_configs(&config, &jwt_secrets) @@ -213,6 +214,7 @@ impl StartSignerConfig { loader: Some(loader), endpoint, mod_signing_configs, + admin_secret, jwt_auth_fail_limit, jwt_auth_fail_timeout_seconds, store, @@ -243,6 +245,7 @@ impl StartSignerConfig { chain: config.chain, endpoint, mod_signing_configs, + admin_secret, jwt_auth_fail_limit, jwt_auth_fail_timeout_seconds, loader: None, diff --git a/crates/common/src/config/utils.rs b/crates/common/src/config/utils.rs index 8acafa50..5e8e3a65 100644 --- a/crates/common/src/config/utils.rs +++ b/crates/common/src/config/utils.rs @@ -5,7 +5,7 @@ use eyre::{bail, Context, Result}; use serde::de::DeserializeOwned; use crate::{ - config::{JWTS_ENV, MUXER_HTTP_MAX_LENGTH}, + config::{ADMIN_JWT_ENV, JWTS_ENV, MUXER_HTTP_MAX_LENGTH}, types::ModuleId, utils::read_chunked_body_with_max, }; @@ -29,9 +29,10 @@ pub fn load_file_from_env(env: &str) -> Result { } /// Loads a map of module id -> jwt secret from a json env -pub fn load_jwt_secrets() -> Result> { +pub fn load_jwt_secrets() -> Result<(String, HashMap)> { + let admin_jwt = std::env::var(ADMIN_JWT_ENV).wrap_err(format!("{ADMIN_JWT_ENV} is not set"))?; let jwt_secrets = std::env::var(JWTS_ENV).wrap_err(format!("{JWTS_ENV} is not set"))?; - decode_string_to_map(&jwt_secrets) + decode_string_to_map(&jwt_secrets).map(|secrets| (admin_jwt, secrets)) } /// Reads an HTTP response safely, erroring out if it failed or if the body is @@ -74,7 +75,7 @@ pub fn remove_duplicate_keys(keys: Vec) -> Vec { unique_keys } -fn decode_string_to_map(raw: &str) -> Result> { +pub fn decode_string_to_map(raw: &str) -> Result> { // trim the string and split for comma raw.trim() .split(',') diff --git a/crates/common/src/types.rs b/crates/common/src/types.rs index a9c8ebfd..bbffb58a 100644 --- a/crates/common/src/types.rs +++ b/crates/common/src/types.rs @@ -24,6 +24,12 @@ pub struct JwtClaims { pub module: String, } +#[derive(Debug, Serialize, Deserialize)] +pub struct JwtAdmin { + pub exp: u64, + pub admin: bool, +} + #[derive(Clone, Copy, PartialEq, Eq)] pub enum Chain { Mainnet, diff --git a/crates/common/src/utils.rs b/crates/common/src/utils.rs index ccaf8888..7f2fbbca 100644 --- a/crates/common/src/utils.rs +++ b/crates/common/src/utils.rs @@ -30,7 +30,7 @@ use crate::{ config::LogsSettings, constants::SIGNER_JWT_EXPIRATION, pbs::HEADER_VERSION_VALUE, - types::{Chain, Jwt, JwtClaims, ModuleId}, + types::{Chain, Jwt, JwtAdmin, JwtClaims, ModuleId}, }; const MILLIS_PER_SECOND: u64 = 1_000; @@ -405,6 +405,24 @@ pub fn validate_jwt(jwt: Jwt, secret: &str) -> eyre::Result<()> { .map_err(From::from) } +/// Validate an admin JWT with the given secret +pub fn validate_admin_jwt(jwt: Jwt, secret: &str) -> eyre::Result<()> { + let mut validation = jsonwebtoken::Validation::default(); + validation.leeway = 10; + + let token = jsonwebtoken::decode::( + jwt.as_str(), + &jsonwebtoken::DecodingKey::from_secret(secret.as_ref()), + &validation, + )?; + + if token.claims.admin { + Ok(()) + } else { + eyre::bail!("Token is not admin") + } +} + /// Generates a random string pub fn random_jwt_secret() -> String { rand::rng().sample_iter(&Alphanumeric).take(32).map(char::from).collect() diff --git a/crates/signer/src/error.rs b/crates/signer/src/error.rs index a2a113f3..64a3e5b8 100644 --- a/crates/signer/src/error.rs +++ b/crates/signer/src/error.rs @@ -25,11 +25,17 @@ pub enum SignerModuleError { #[error("Dirk signer does not support this operation")] DirkNotSupported, + #[error("module id not found")] + ModuleIdNotFound, + #[error("internal error: {0}")] Internal(String), #[error("rate limited for {0} more seconds")] RateLimited(f64), + + #[error("request error: {0}")] + RequestError(String), } impl IntoResponse for SignerModuleError { @@ -48,9 +54,13 @@ impl IntoResponse for SignerModuleError { (StatusCode::INTERNAL_SERVER_ERROR, "internal error".to_string()) } SignerModuleError::SignerError(err) => (StatusCode::BAD_REQUEST, err.to_string()), + SignerModuleError::ModuleIdNotFound => (StatusCode::NOT_FOUND, self.to_string()), SignerModuleError::RateLimited(duration) => { (StatusCode::TOO_MANY_REQUESTS, format!("rate limited for {duration:?}")) } + SignerModuleError::RequestError(err) => { + (StatusCode::BAD_REQUEST, format!("bad request: {err}")) + } } .into_response() } diff --git a/crates/signer/src/service.rs b/crates/signer/src/service.rs index 2f20c8ba..4ecf5e75 100644 --- a/crates/signer/src/service.rs +++ b/crates/signer/src/service.rs @@ -18,17 +18,17 @@ use cb_common::{ commit::{ constants::{ GENERATE_PROXY_KEY_PATH, GET_PUBKEYS_PATH, RELOAD_PATH, REQUEST_SIGNATURE_PATH, - STATUS_PATH, + REVOKE_MODULE_PATH, STATUS_PATH, }, request::{ - EncryptionScheme, GenerateProxyRequest, GetPubkeysResponse, SignConsensusRequest, - SignProxyRequest, SignRequest, + EncryptionScheme, GenerateProxyRequest, GetPubkeysResponse, ReloadRequest, + RevokeModuleRequest, SignConsensusRequest, SignProxyRequest, SignRequest, }, }, config::{ModuleSigningConfig, StartSignerConfig}, constants::{COMMIT_BOOST_COMMIT, COMMIT_BOOST_VERSION}, types::{Chain, Jwt, ModuleId}, - utils::{decode_jwt, validate_jwt}, + utils::{decode_jwt, validate_admin_jwt, validate_jwt}, }; use cb_metrics::provider::MetricsProvider; use eyre::Context; @@ -63,7 +63,10 @@ struct SigningState { /// Map of modules ids to JWT configurations. This also acts as registry of /// all modules running - jwts: Arc>, + jwts: Arc>>, + + /// Secret for the admin JWT + admin_secret: Arc>, /// Map of JWT failures per peer jwt_auth_failures: Arc>>, @@ -85,7 +88,8 @@ impl SigningService { let state = SigningState { manager: Arc::new(RwLock::new(start_manager(config.clone()).await?)), - jwts: config.mod_signing_configs.into(), + jwts: Arc::new(ParkingRwLock::new(config.mod_signing_configs)), + admin_secret: Arc::new(ParkingRwLock::new(config.admin_secret)), jwt_auth_failures: Arc::new(ParkingRwLock::new(HashMap::new())), jwt_auth_fail_limit: config.jwt_auth_fail_limit, jwt_auth_fail_timeout: Duration::from_secs(config.jwt_auth_fail_timeout_seconds as u64), @@ -114,20 +118,30 @@ impl SigningService { SigningService::init_metrics(config.chain)?; - let app = axum::Router::new() + let signer_app = axum::Router::new() .route(REQUEST_SIGNATURE_PATH, post(handle_request_signature)) .route(GET_PUBKEYS_PATH, get(handle_get_pubkeys)) .route(GENERATE_PROXY_KEY_PATH, post(handle_generate_proxy)) .route_layer(middleware::from_fn_with_state(state.clone(), jwt_auth)) + .with_state(state.clone()) + .route_layer(middleware::from_fn(log_request)); + + let admin_app = axum::Router::new() .route(RELOAD_PATH, post(handle_reload)) + .route(REVOKE_MODULE_PATH, post(handle_revoke_module)) + .route_layer(middleware::from_fn_with_state(state.clone(), admin_auth)) .with_state(state.clone()) .route_layer(middleware::from_fn(log_request)) - .route(STATUS_PATH, get(handle_status)) - .into_make_service_with_connect_info::(); + .route(STATUS_PATH, get(handle_status)); let listener = TcpListener::bind(config.endpoint).await?; - axum::serve(listener, app).await.wrap_err("signer server exited") + axum::serve( + listener, + signer_app.merge(admin_app).into_make_service_with_connect_info::(), + ) + .await + .wrap_err("signer server exited") } fn init_metrics(network: Chain) -> eyre::Result<()> { @@ -215,7 +229,8 @@ fn check_jwt_auth( SignerModuleError::Unauthorized })?; - let jwt_config = state.jwts.get(&module_id).ok_or_else(|| { + let guard = state.jwts.read(); + let jwt_config = guard.get(&module_id).ok_or_else(|| { error!("Unauthorized request. Was the module started correctly?"); SignerModuleError::Unauthorized })?; @@ -227,6 +242,22 @@ fn check_jwt_auth( Ok(module_id) } +async fn admin_auth( + State(state): State, + TypedHeader(auth): TypedHeader>, + req: Request, + next: Next, +) -> Result { + let jwt: Jwt = auth.token().to_string().into(); + + validate_admin_jwt(jwt, &state.admin_secret.read()).map_err(|e| { + error!("Unauthorized request. Invalid JWT: {e}"); + SignerModuleError::Unauthorized + })?; + + Ok(next.run(req).await) +} + /// Requests logging middleware layer async fn log_request(req: Request, next: Next) -> Result { let url = &req.uri().clone(); @@ -268,7 +299,12 @@ async fn handle_request_signature( Json(request): Json, ) -> Result { let req_id = Uuid::new_v4(); - let signing_id = &state.jwts[&module_id].signing_id; + + let Some(signing_id) = state.jwts.read().get(&module_id).map(|m| m.signing_id) else { + error!(event = "request_signature", ?module_id, ?req_id, "Module signing ID not found"); + return Err(SignerModuleError::RequestError("Module signing ID not found".to_string())); + }; + debug!(event = "request_signature", ?module_id, %request, ?req_id, "New request"); let manager = state.manager.read().await; @@ -276,19 +312,19 @@ async fn handle_request_signature( SigningManager::Local(local_manager) => match request { SignRequest::Consensus(SignConsensusRequest { ref object_root, ref pubkey }) => { local_manager - .sign_consensus(pubkey, object_root, Some(signing_id)) + .sign_consensus(pubkey, object_root, Some(&signing_id)) .await .map(|sig| Json(sig).into_response()) } SignRequest::ProxyBls(SignProxyRequest { ref object_root, proxy: ref bls_key }) => { local_manager - .sign_proxy_bls(bls_key, object_root, Some(signing_id)) + .sign_proxy_bls(bls_key, object_root, Some(&signing_id)) .await .map(|sig| Json(sig).into_response()) } SignRequest::ProxyEcdsa(SignProxyRequest { ref object_root, proxy: ref ecdsa_key }) => { local_manager - .sign_proxy_ecdsa(ecdsa_key, object_root, Some(signing_id)) + .sign_proxy_ecdsa(ecdsa_key, object_root, Some(&signing_id)) .await .map(|sig| Json(sig).into_response()) } @@ -296,13 +332,13 @@ async fn handle_request_signature( SigningManager::Dirk(dirk_manager) => match request { SignRequest::Consensus(SignConsensusRequest { ref object_root, ref pubkey }) => { dirk_manager - .request_consensus_signature(pubkey, object_root, Some(signing_id)) + .request_consensus_signature(pubkey, object_root, Some(&signing_id)) .await .map(|sig| Json(sig).into_response()) } SignRequest::ProxyBls(SignProxyRequest { ref object_root, proxy: ref bls_key }) => { dirk_manager - .request_proxy_signature(bls_key, object_root, Some(signing_id)) + .request_proxy_signature(bls_key, object_root, Some(&signing_id)) .await .map(|sig| Json(sig).into_response()) } @@ -367,6 +403,7 @@ async fn handle_generate_proxy( async fn handle_reload( State(mut state): State, + Json(request): Json, ) -> Result { let req_id = Uuid::new_v4(); @@ -380,6 +417,31 @@ async fn handle_reload( } }; + if let Some(jwt_secrets) = request.jwt_secrets { + let mut jwt_configs = state.jwts.write(); + let mut new_configs = HashMap::new(); + for (module_id, jwt_secret) in jwt_secrets { + if let Some(signing_id) = jwt_configs.get(&module_id).map(|cfg| cfg.signing_id) { + new_configs.insert(module_id.clone(), ModuleSigningConfig { + module_name: module_id, + jwt_secret, + signing_id, + }); + } else { + let error_message = format!( + "Module {module_id} signing ID not found in commit-boost config, cannot reload" + ); + error!(event = "reload", ?req_id, module_id = %module_id, error = %error_message); + return Err(SignerModuleError::RequestError(error_message)); + } + } + *jwt_configs = new_configs; + } + + if let Some(admin_secret) = request.admin_secret { + *state.admin_secret.write() = admin_secret; + } + let new_manager = match start_manager(config).await { Ok(manager) => manager, Err(err) => { @@ -393,6 +455,17 @@ async fn handle_reload( Ok(StatusCode::OK) } +async fn handle_revoke_module( + State(state): State, + Json(request): Json, +) -> Result { + let mut guard = state.jwts.write(); + guard + .remove(&request.module_id) + .ok_or(SignerModuleError::ModuleIdNotFound) + .map(|_| StatusCode::OK) +} + async fn start_manager(config: StartSignerConfig) -> eyre::Result { let proxy_store = if let Some(store) = config.store.clone() { Some(store.init_from_env()?) diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index 0bfa4dc1..0c25e54b 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -432,6 +432,15 @@ Commit-Boost supports hot-reloading the configuration file. This means that you docker compose -f cb.docker-compose.yml exec cb_signer curl -X POST http://localhost:20000/reload ``` +### Signer module reload + +The signer module takes 2 optional parameters in the JSON body: + +- `jwt_secrets`: a string with a comma-separated list of `=` for all modules. +- `admin_secret`: a string with the secret for the signer admin JWT. + +Parameters that are not provided will not be updated; they will be regenerated using their original on-disk data as though the signer service was being restarted. Note that any changes you made with calls to `/revoke_jwt` or `/reload` will be reverted, so make sure you provide any modifications again as part of this call. + ### Notes - The hot reload feature is available for PBS modules (both default and custom) and signer module. diff --git a/docs/docs/get_started/running/binary.md b/docs/docs/get_started/running/binary.md index 385e7a0c..97991ee5 100644 --- a/docs/docs/get_started/running/binary.md +++ b/docs/docs/get_started/running/binary.md @@ -26,6 +26,7 @@ Modules need some environment variables to work correctly. - `CB_MUX_PATH_{ID}`: optional, override where to load mux validator keys for mux with `id=\{ID\}`. ### Signer Module +- `CB_SIGNER_ADMIN_JWT`: secret to use for admin JWT. - `CB_SIGNER_ENDPOINT`: optional, override to specify the `IP:port` endpoint to bind the signer server to. - For loading keys we currently support: - `CB_SIGNER_LOADER_FILE`: path to a `.json` with plaintext keys (for testing purposes only). diff --git a/tests/Cargo.toml b/tests/Cargo.toml index f1b5c9d9..573cfa20 100644 --- a/tests/Cargo.toml +++ b/tests/Cargo.toml @@ -11,6 +11,7 @@ cb-common.workspace = true cb-pbs.workspace = true cb-signer.workspace = true eyre.workspace = true +jsonwebtoken.workspace = true reqwest.workspace = true serde_json.workspace = true tempfile.workspace = true diff --git a/tests/src/signer_service.rs b/tests/src/signer_service.rs index c31e5a1c..5270e2a8 100644 --- a/tests/src/signer_service.rs +++ b/tests/src/signer_service.rs @@ -4,8 +4,9 @@ use alloy::{hex, primitives::FixedBytes}; use cb_common::{ commit::request::GetPubkeysResponse, config::{ModuleSigningConfig, StartSignerConfig}, + constants::SIGNER_JWT_EXPIRATION, signer::{SignerLoader, ValidatorKeysFormat}, - types::{Chain, ModuleId}, + types::{Chain, Jwt, JwtAdmin, ModuleId}, }; use cb_signer::service::SigningService; use eyre::Result; @@ -19,6 +20,7 @@ use crate::utils::{get_signer_config, get_start_signer_config}; pub async fn start_server( port: u16, mod_signing_configs: &HashMap, + admin_secret: String, ) -> Result { let chain = Chain::Hoodi; @@ -32,7 +34,7 @@ pub async fn start_server( config.port = port; config.jwt_auth_fail_limit = 3; // Set a low fail limit for testing config.jwt_auth_fail_timeout_seconds = 3; // Set a short timeout for testing - let start_config = get_start_signer_config(config, chain, mod_signing_configs); + let start_config = get_start_signer_config(config, chain, mod_signing_configs, admin_secret); // Run the Signer let server_handle = tokio::spawn(SigningService::run(start_config.clone())); @@ -69,3 +71,17 @@ pub async fn verify_pubkeys(response: Response) -> Result<()> { } Ok(()) } + +// Creates a JWT for module administration +pub fn create_admin_jwt(admin_secret: String) -> Result { + jsonwebtoken::encode( + &jsonwebtoken::Header::default(), + &JwtAdmin { + admin: true, + exp: jsonwebtoken::get_current_timestamp() + SIGNER_JWT_EXPIRATION, + }, + &jsonwebtoken::EncodingKey::from_secret(admin_secret.as_ref()), + ) + .map_err(Into::into) + .map(Jwt::from) +} diff --git a/tests/src/utils.rs b/tests/src/utils.rs index b897aa49..0493040c 100644 --- a/tests/src/utils.rs +++ b/tests/src/utils.rs @@ -138,6 +138,7 @@ pub fn get_start_signer_config( signer_config: SignerConfig, chain: Chain, mod_signing_configs: &HashMap, + admin_secret: String, ) -> StartSignerConfig { match signer_config.inner { SignerType::Local { loader, .. } => StartSignerConfig { @@ -146,6 +147,7 @@ pub fn get_start_signer_config( store: None, endpoint: SocketAddr::new(signer_config.host.into(), signer_config.port), mod_signing_configs: mod_signing_configs.clone(), + admin_secret, jwt_auth_fail_limit: signer_config.jwt_auth_fail_limit, jwt_auth_fail_timeout_seconds: signer_config.jwt_auth_fail_timeout_seconds, dirk: None, diff --git a/tests/tests/signer_jwt_auth.rs b/tests/tests/signer_jwt_auth.rs index cb825624..63f0783f 100644 --- a/tests/tests/signer_jwt_auth.rs +++ b/tests/tests/signer_jwt_auth.rs @@ -2,13 +2,13 @@ use std::{collections::HashMap, time::Duration}; use alloy::primitives::b256; use cb_common::{ - commit::constants::GET_PUBKEYS_PATH, + commit::constants::{GET_PUBKEYS_PATH, REVOKE_MODULE_PATH}, config::{load_module_signing_configs, ModuleSigningConfig}, types::ModuleId, utils::create_jwt, }; use cb_tests::{ - signer_service::{start_server, verify_pubkeys}, + signer_service::{create_admin_jwt, start_server, verify_pubkeys}, utils::{self, setup_test_env}, }; use eyre::Result; @@ -16,6 +16,8 @@ use reqwest::StatusCode; use tracing::info; const JWT_MODULE: &str = "test-module"; +const JWT_SECRET: &str = "test-jwt-secret"; +const ADMIN_SECRET: &str = "test-admin-secret"; async fn create_mod_signing_configs() -> HashMap { let mut cfg = @@ -26,7 +28,7 @@ async fn create_mod_signing_configs() -> HashMap cfg.modules = Some(vec![utils::create_module_config(module_id.clone(), signing_id)]); - let jwts = HashMap::from([(module_id.clone(), "supersecret".to_string())]); + let jwts = HashMap::from([(module_id.clone(), JWT_SECRET.to_string())]); load_module_signing_configs(&cfg, &jwts).unwrap() } @@ -36,7 +38,7 @@ async fn test_signer_jwt_auth_success() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); let mod_cfgs = create_mod_signing_configs().await; - let start_config = start_server(20100, &mod_cfgs).await?; + let start_config = start_server(20100, &mod_cfgs, ADMIN_SECRET.to_string()).await?; let jwt_config = mod_cfgs.get(&module_id).expect("JWT config for test module not found"); // Run a pubkeys request @@ -56,7 +58,7 @@ async fn test_signer_jwt_auth_fail() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); let mod_cfgs = create_mod_signing_configs().await; - let start_config = start_server(20101, &mod_cfgs).await?; + let start_config = start_server(20101, &mod_cfgs, ADMIN_SECRET.to_string()).await?; // Run a pubkeys request - this should fail due to invalid JWT let jwt = create_jwt(&module_id, "incorrect secret")?; @@ -77,7 +79,7 @@ async fn test_signer_jwt_rate_limit() -> Result<()> { setup_test_env(); let module_id = ModuleId(JWT_MODULE.to_string()); let mod_cfgs = create_mod_signing_configs().await; - let start_config = start_server(20102, &mod_cfgs).await?; + let start_config = start_server(20102, &mod_cfgs, ADMIN_SECRET.to_string()).await?; let mod_cfg = mod_cfgs.get(&module_id).expect("JWT config for test module not found"); // Run as many pubkeys requests as the fail limit @@ -104,3 +106,75 @@ async fn test_signer_jwt_rate_limit() -> Result<()> { Ok(()) } + +#[tokio::test] +async fn test_signer_revoked_jwt_fail() -> Result<()> { + setup_test_env(); + let admin_secret = ADMIN_SECRET.to_string(); + let module_id = ModuleId(JWT_MODULE.to_string()); + let mod_cfgs = create_mod_signing_configs().await; + let start_config = start_server(20400, &mod_cfgs, admin_secret.clone()).await?; + + // Run as many pubkeys requests as the fail limit + let jwt = create_jwt(&module_id, JWT_SECRET)?; + let admin_jwt = create_admin_jwt(admin_secret)?; + let client = reqwest::Client::new(); + + // At first, test module should be allowed to request pubkeys + let url = format!("http://{}{}", start_config.endpoint, GET_PUBKEYS_PATH); + let response = client.get(&url).bearer_auth(&jwt).send().await?; + assert!(response.status() == StatusCode::OK); + + let revoke_url = format!("http://{}{}", start_config.endpoint, REVOKE_MODULE_PATH); + let response = client + .post(&revoke_url) + .header("content-type", "application/json") + .body(reqwest::Body::wrap(format!("{{\"module_id\": \"{JWT_MODULE}\"}}"))) + .bearer_auth(&admin_jwt) + .send() + .await?; + assert!(response.status() == StatusCode::OK); + + // After revoke, test module shouldn't be allowed anymore + let response = client.get(&url).bearer_auth(&jwt).send().await?; + assert!(response.status() == StatusCode::UNAUTHORIZED); + + Ok(()) +} + +#[tokio::test] +async fn test_signer_only_admin_can_revoke() -> Result<()> { + setup_test_env(); + let admin_secret = ADMIN_SECRET.to_string(); + let module_id = ModuleId(JWT_MODULE.to_string()); + let mod_cfgs = create_mod_signing_configs().await; + let start_config = start_server(20500, &mod_cfgs, admin_secret.clone()).await?; + + // Run as many pubkeys requests as the fail limit + let jwt = create_jwt(&module_id, JWT_SECRET)?; + let admin_jwt = create_admin_jwt(admin_secret)?; + let client = reqwest::Client::new(); + let url = format!("http://{}{}", start_config.endpoint, REVOKE_MODULE_PATH); + + // Module JWT shouldn't be able to revoke modules + let response = client + .post(&url) + .header("content-type", "application/json") + .body(reqwest::Body::wrap(format!("{{\"module_id\": \"{JWT_MODULE}\"}}"))) + .bearer_auth(&jwt) + .send() + .await?; + assert!(response.status() == StatusCode::UNAUTHORIZED); + + // Admin should be able to revoke modules + let response = client + .post(&url) + .header("content-type", "application/json") + .body(reqwest::Body::wrap(format!("{{\"module_id\": \"{JWT_MODULE}\"}}"))) + .bearer_auth(&admin_jwt) + .send() + .await?; + assert!(response.status() == StatusCode::OK); + + Ok(()) +} diff --git a/tests/tests/signer_request_sig.rs b/tests/tests/signer_request_sig.rs index 26378f67..868a1f71 100644 --- a/tests/tests/signer_request_sig.rs +++ b/tests/tests/signer_request_sig.rs @@ -24,6 +24,7 @@ const MODULE_ID_1: &str = "test-module"; const MODULE_ID_2: &str = "another-module"; const PUBKEY_1: [u8; 48] = hex!("883827193f7627cd04e621e1e8d56498362a52b2a30c9a1c72036eb935c4278dee23d38a24d2f7dda62689886f0c39f4"); +const ADMIN_SECRET: &str = "test-admin-secret"; async fn create_mod_signing_configs() -> HashMap { let mut cfg = @@ -54,7 +55,7 @@ async fn test_signer_sign_request_good() -> Result<()> { setup_test_env(); let module_id = ModuleId(MODULE_ID_1.to_string()); let mod_cfgs = create_mod_signing_configs().await; - let start_config = start_server(20200, &mod_cfgs).await?; + let start_config = start_server(20200, &mod_cfgs, ADMIN_SECRET.to_string()).await?; let jwt_config = mod_cfgs.get(&module_id).expect("JWT config for test module not found"); // Send a signing request @@ -86,7 +87,7 @@ async fn test_signer_sign_request_different_module() -> Result<()> { setup_test_env(); let module_id = ModuleId(MODULE_ID_2.to_string()); let mod_cfgs = create_mod_signing_configs().await; - let start_config = start_server(20201, &mod_cfgs).await?; + let start_config = start_server(20201, &mod_cfgs, ADMIN_SECRET.to_string()).await?; let jwt_config = mod_cfgs.get(&module_id).expect("JWT config for 2nd test module not found"); // Send a signing request From ce29c3af368ef6af9ff39fdf2327559deec40686 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Wed, 30 Jul 2025 15:13:37 -0400 Subject: [PATCH 63/67] Move from [u8; 32] to B256 everywhere (#347) --- crates/common/src/commit/request.rs | 4 +- crates/common/src/pbs/types/beacon_block.rs | 6 +- .../common/src/pbs/types/execution_payload.rs | 2 +- crates/common/src/pbs/types/get_header.rs | 4 +- crates/common/src/signature.rs | 109 ++++++++--------- crates/common/src/signer/schemes/bls.rs | 14 +-- crates/common/src/signer/schemes/ecdsa.rs | 54 +++++---- crates/common/src/signer/store.rs | 8 +- crates/common/src/types.rs | 36 +++--- crates/common/src/utils.rs | 2 +- crates/pbs/src/mev_boost/get_header.rs | 4 +- crates/signer/src/manager/dirk.rs | 31 +++-- crates/signer/src/manager/local.rs | 112 ++++++++---------- examples/da_commit/src/main.rs | 6 +- tests/src/mock_relay.rs | 4 +- tests/tests/payloads.rs | 18 ++- tests/tests/pbs_get_header.rs | 6 +- tests/tests/pbs_get_status.rs | 4 +- tests/tests/pbs_mux.rs | 2 +- tests/tests/pbs_post_blinded_blocks.rs | 4 +- 20 files changed, 204 insertions(+), 226 deletions(-) diff --git a/crates/common/src/commit/request.rs b/crates/common/src/commit/request.rs index 5d9f2d72..5bc3a14b 100644 --- a/crates/common/src/commit/request.rs +++ b/crates/common/src/commit/request.rs @@ -6,7 +6,7 @@ use std::{ use alloy::{ hex, - primitives::{Address, B256}, + primitives::{aliases::B32, Address, B256}, rpc::types::beacon::BlsSignature, }; use derive_more::derive::From; @@ -63,7 +63,7 @@ impl SignedProxyDelegation { &self.message, &self.signature, None, - COMMIT_BOOST_DOMAIN, + &B32::from(COMMIT_BOOST_DOMAIN), ) } } diff --git a/crates/common/src/pbs/types/beacon_block.rs b/crates/common/src/pbs/types/beacon_block.rs index f377123a..485876e9 100644 --- a/crates/common/src/pbs/types/beacon_block.rs +++ b/crates/common/src/pbs/types/beacon_block.rs @@ -99,7 +99,7 @@ mod tests { // this is from mev-boost test data fn test_signed_blinded_block_fb_electra() { let data = include_str!("testdata/signed-blinded-beacon-block-electra.json"); - let block = test_encode_decode::(&data); + let block = test_encode_decode::(data); assert!(matches!(block.message, BlindedBeaconBlock::Electra(_))); } @@ -166,7 +166,7 @@ mod tests { // this is dummy data generated with https://github.com/attestantio/go-eth2-client fn test_signed_blinded_block_ssz() { let data_json = include_str!("testdata/signed-blinded-beacon-block-electra-2.json"); - let block_json = test_encode_decode::(&data_json); + let block_json = test_encode_decode::(data_json); assert!(matches!(block_json.message, BlindedBeaconBlock::Electra(_))); let data_ssz = include_bytes!("testdata/signed-blinded-beacon-block-electra-2.ssz"); @@ -181,7 +181,7 @@ mod tests { // this is dummy data generated with https://github.com/attestantio/go-builder-client fn test_execution_payload_block_ssz() { let data_json = include_str!("testdata/execution-payload-electra.json"); - let block_json = test_encode_decode::(&data_json); + let block_json = test_encode_decode::(data_json); let data_ssz = include_bytes!("testdata/execution-payload-electra.ssz"); let data_ssz = alloy::primitives::hex::decode(data_ssz).unwrap(); diff --git a/crates/common/src/pbs/types/execution_payload.rs b/crates/common/src/pbs/types/execution_payload.rs index f851e8da..fcf4cdda 100644 --- a/crates/common/src/pbs/types/execution_payload.rs +++ b/crates/common/src/pbs/types/execution_payload.rs @@ -129,7 +129,7 @@ mod tests { "excess_blob_gas": "95158272" }"#; - let parsed = test_encode_decode::>(&data); + let parsed = test_encode_decode::>(data); assert_eq!( parsed.parent_hash, diff --git a/crates/common/src/pbs/types/get_header.rs b/crates/common/src/pbs/types/get_header.rs index 69437f45..c5e40a21 100644 --- a/crates/common/src/pbs/types/get_header.rs +++ b/crates/common/src/pbs/types/get_header.rs @@ -94,7 +94,7 @@ pub struct ExecutionPayloadHeaderMessageElectra { #[cfg(test)] mod tests { - use alloy::primitives::U256; + use alloy::primitives::{aliases::B32, U256}; use super::*; use crate::{ @@ -177,7 +177,7 @@ mod tests { &parsed.message, &parsed.signature, None, - APPLICATION_BUILDER_DOMAIN + &B32::from(APPLICATION_BUILDER_DOMAIN) ) .is_ok()) } diff --git a/crates/common/src/signature.rs b/crates/common/src/signature.rs index 19fade8f..fbb6021d 100644 --- a/crates/common/src/signature.rs +++ b/crates/common/src/signature.rs @@ -1,5 +1,5 @@ use alloy::{ - primitives::{Address, B256}, + primitives::{aliases::B32, Address, B256}, rpc::types::beacon::{constants::BLS_DST_SIG, BlsPublicKey, BlsSignature}, }; use tree_hash::TreeHash; @@ -17,33 +17,36 @@ pub fn sign_message(secret_key: &BlsSecretKey, msg: &[u8]) -> BlsSignature { BlsSignature::from_slice(&signature) } -pub fn compute_signing_root(signing_data: &T) -> [u8; 32] { - signing_data.tree_hash_root().0 +pub fn compute_tree_hash_root(signing_data: &T) -> B256 { + signing_data.tree_hash_root() } pub fn compute_prop_commit_signing_root( chain: Chain, - object_root: [u8; 32], - module_signing_id: Option<[u8; 32]>, - domain_mask: [u8; 4], -) -> [u8; 32] { + object_root: &B256, + module_signing_id: Option<&B256>, + domain_mask: &B32, +) -> B256 { let domain = compute_domain(chain, domain_mask); match module_signing_id { - Some(id) => compute_signing_root(&types::SigningData { - object_root: compute_signing_root(&types::PropCommitSigningInfo { - data: object_root, - module_signing_id: id, + Some(id) => compute_tree_hash_root(&types::SigningData { + object_root: compute_tree_hash_root(&types::PropCommitSigningInfo { + data: *object_root, + module_signing_id: *id, }), signing_domain: domain, }), - None => compute_signing_root(&types::SigningData { object_root, signing_domain: domain }), + None => compute_tree_hash_root(&types::SigningData { + object_root: *object_root, + signing_domain: domain, + }), } } // NOTE: this currently works only for builder domain signatures and // verifications // ref: https://github.com/ralexstokes/ethereum-consensus/blob/cf3c404043230559660810bc0c9d6d5a8498d819/ethereum-consensus/src/builder/mod.rs#L26-L29 -pub fn compute_domain(chain: Chain, domain_mask: [u8; 4]) -> [u8; 32] { +pub fn compute_domain(chain: Chain, domain_mask: &B32) -> B256 { #[derive(Debug, TreeHash)] struct ForkData { fork_version: [u8; 4], @@ -51,7 +54,7 @@ pub fn compute_domain(chain: Chain, domain_mask: [u8; 4]) -> [u8; 32] { } let mut domain = [0u8; 32]; - domain[..4].copy_from_slice(&domain_mask); + domain[..4].copy_from_slice(&domain_mask.0); let fork_version = chain.genesis_fork_version(); let fd = ForkData { fork_version, genesis_validators_root: GENESIS_VALIDATORS_ROOT }; @@ -59,7 +62,7 @@ pub fn compute_domain(chain: Chain, domain_mask: [u8; 4]) -> [u8; 32] { domain[4..].copy_from_slice(&fork_data_root[..28]); - domain + B256::from(domain) } pub fn verify_signed_message( @@ -68,15 +71,15 @@ pub fn verify_signed_message( msg: &T, signature: &BlsSignature, module_signing_id: Option<&B256>, - domain_mask: [u8; 4], + domain_mask: &B32, ) -> Result<(), BlstErrorWrapper> { let signing_root = compute_prop_commit_signing_root( chain, - compute_signing_root(msg), - module_signing_id.map(|id| id.0), + &compute_tree_hash_root(msg), + module_signing_id, domain_mask, ); - verify_bls_signature(pubkey, &signing_root, signature) + verify_bls_signature(pubkey, signing_root.as_slice(), signature) } /// Signs a message with the Beacon builder domain. @@ -85,36 +88,36 @@ pub fn sign_builder_message( secret_key: &BlsSecretKey, msg: &impl TreeHash, ) -> BlsSignature { - sign_builder_root(chain, secret_key, msg.tree_hash_root().0) + sign_builder_root(chain, secret_key, &msg.tree_hash_root()) } pub fn sign_builder_root( chain: Chain, secret_key: &BlsSecretKey, - object_root: [u8; 32], + object_root: &B256, ) -> BlsSignature { let domain = chain.builder_domain(); let signing_data = types::SigningData { - object_root: compute_signing_root(&object_root), + object_root: compute_tree_hash_root(object_root), signing_domain: domain, }; - let signing_root = compute_signing_root(&signing_data); - sign_message(secret_key, &signing_root) + let signing_root = compute_tree_hash_root(&signing_data); + sign_message(secret_key, signing_root.as_slice()) } pub fn sign_commit_boost_root( chain: Chain, secret_key: &BlsSecretKey, - object_root: [u8; 32], - module_signing_id: Option<[u8; 32]>, + object_root: &B256, + module_signing_id: Option<&B256>, ) -> BlsSignature { let signing_root = compute_prop_commit_signing_root( chain, object_root, module_signing_id, - COMMIT_BOOST_DOMAIN, + &B32::from(COMMIT_BOOST_DOMAIN), ); - sign_message(secret_key, &signing_root) + sign_message(secret_key, signing_root.as_slice()) } // ============================== @@ -128,18 +131,18 @@ pub fn verify_proposer_commitment_signature_bls( pubkey: &BlsPublicKey, msg: &impl TreeHash, signature: &BlsSignature, - module_signing_id: B256, + module_signing_id: &B256, ) -> Result<(), BlstErrorWrapper> { - let object_root = msg.tree_hash_root().0; - let domain = compute_domain(chain, COMMIT_BOOST_DOMAIN); - let signing_root = compute_signing_root(&types::SigningData { - object_root: compute_signing_root(&types::PropCommitSigningInfo { + let object_root = msg.tree_hash_root(); + let domain = compute_domain(chain, &B32::from(COMMIT_BOOST_DOMAIN)); + let signing_root = compute_tree_hash_root(&types::SigningData { + object_root: compute_tree_hash_root(&types::PropCommitSigningInfo { data: object_root, module_signing_id: *module_signing_id, }), signing_domain: domain, }); - verify_bls_signature(pubkey, &signing_root, signature) + verify_bls_signature(pubkey, signing_root.as_slice(), signature) } /// Verifies that a proposer commitment signature was generated by the given @@ -149,12 +152,12 @@ pub fn verify_proposer_commitment_signature_ecdsa( address: &Address, msg: &impl TreeHash, signature: &EcdsaSignature, - module_signing_id: B256, + module_signing_id: &B256, ) -> Result<(), eyre::Report> { - let object_root = msg.tree_hash_root().0; - let domain = compute_domain(chain, COMMIT_BOOST_DOMAIN); - let signing_root = compute_signing_root(&types::SigningData { - object_root: compute_signing_root(&types::PropCommitSigningInfo { + let object_root = msg.tree_hash_root(); + let domain = compute_domain(chain, &B32::from(COMMIT_BOOST_DOMAIN)); + let signing_root = compute_tree_hash_root(&types::SigningData { + object_root: compute_tree_hash_root(&types::PropCommitSigningInfo { data: object_root, module_signing_id: *module_signing_id, }), @@ -170,30 +173,18 @@ pub fn verify_proposer_commitment_signature_ecdsa( #[cfg(test)] mod tests { + use alloy::primitives::aliases::B32; + use super::compute_domain; use crate::{constants::APPLICATION_BUILDER_DOMAIN, types::Chain}; #[test] fn test_builder_domains() { - assert_eq!( - compute_domain(Chain::Mainnet, APPLICATION_BUILDER_DOMAIN), - Chain::Mainnet.builder_domain() - ); - assert_eq!( - compute_domain(Chain::Holesky, APPLICATION_BUILDER_DOMAIN), - Chain::Holesky.builder_domain() - ); - assert_eq!( - compute_domain(Chain::Sepolia, APPLICATION_BUILDER_DOMAIN), - Chain::Sepolia.builder_domain() - ); - assert_eq!( - compute_domain(Chain::Helder, APPLICATION_BUILDER_DOMAIN), - Chain::Helder.builder_domain() - ); - assert_eq!( - compute_domain(Chain::Hoodi, APPLICATION_BUILDER_DOMAIN), - Chain::Hoodi.builder_domain() - ); + let domain = &B32::from(APPLICATION_BUILDER_DOMAIN); + assert_eq!(compute_domain(Chain::Mainnet, domain), Chain::Mainnet.builder_domain()); + assert_eq!(compute_domain(Chain::Holesky, domain), Chain::Holesky.builder_domain()); + assert_eq!(compute_domain(Chain::Sepolia, domain), Chain::Sepolia.builder_domain()); + assert_eq!(compute_domain(Chain::Helder, domain), Chain::Helder.builder_domain()); + assert_eq!(compute_domain(Chain::Hoodi, domain), Chain::Hoodi.builder_domain()); } } diff --git a/crates/common/src/signer/schemes/bls.rs b/crates/common/src/signer/schemes/bls.rs index f3a511e7..15367f36 100644 --- a/crates/common/src/signer/schemes/bls.rs +++ b/crates/common/src/signer/schemes/bls.rs @@ -1,5 +1,5 @@ -use alloy::rpc::types::beacon::constants::BLS_DST_SIG; pub use alloy::rpc::types::beacon::BlsSignature; +use alloy::{primitives::B256, rpc::types::beacon::constants::BLS_DST_SIG}; use blst::BLST_ERROR; use tree_hash::TreeHash; @@ -32,17 +32,17 @@ impl BlsSigner { } } - pub fn secret(&self) -> [u8; 32] { + pub fn secret(&self) -> B256 { match self { - BlsSigner::Local(secret) => secret.clone().to_bytes(), + BlsSigner::Local(secret) => B256::from(secret.clone().to_bytes()), } } pub async fn sign( &self, chain: Chain, - object_root: [u8; 32], - module_signing_id: Option<[u8; 32]>, + object_root: &B256, + module_signing_id: Option<&B256>, ) -> BlsSignature { match self { BlsSigner::Local(sk) => { @@ -55,9 +55,9 @@ impl BlsSigner { &self, chain: Chain, msg: &impl TreeHash, - module_signing_id: Option<[u8; 32]>, + module_signing_id: Option<&B256>, ) -> BlsSignature { - self.sign(chain, msg.tree_hash_root().0, module_signing_id).await + self.sign(chain, &msg.tree_hash_root(), module_signing_id).await } } diff --git a/crates/common/src/signer/schemes/ecdsa.rs b/crates/common/src/signer/schemes/ecdsa.rs index 73bf7272..53911141 100644 --- a/crates/common/src/signer/schemes/ecdsa.rs +++ b/crates/common/src/signer/schemes/ecdsa.rs @@ -1,7 +1,7 @@ use std::{ops::Deref, str::FromStr}; use alloy::{ - primitives::{Address, PrimitiveSignature}, + primitives::{aliases::B32, Address, PrimitiveSignature, B256}, signers::{local::PrivateKeySigner, SignerSync}, }; use eyre::ensure; @@ -9,7 +9,7 @@ use tree_hash::TreeHash; use crate::{ constants::COMMIT_BOOST_DOMAIN, - signature::{compute_domain, compute_signing_root}, + signature::{compute_domain, compute_tree_hash_root}, types::{self, Chain}, }; @@ -86,27 +86,29 @@ impl EcdsaSigner { pub async fn sign( &self, chain: Chain, - object_root: [u8; 32], - module_signing_id: Option<[u8; 32]>, + object_root: &B256, + module_signing_id: Option<&B256>, ) -> Result { match self { EcdsaSigner::Local(sk) => { - let domain = compute_domain(chain, COMMIT_BOOST_DOMAIN); + let domain = compute_domain(chain, &B32::from(COMMIT_BOOST_DOMAIN)); let signing_root = match module_signing_id { Some(id) => { let signing_data = types::SigningData { - object_root: compute_signing_root(&types::PropCommitSigningInfo { - data: object_root, - module_signing_id: id, + object_root: compute_tree_hash_root(&types::PropCommitSigningInfo { + data: *object_root, + module_signing_id: *id, }), signing_domain: domain, }; - compute_signing_root(&signing_data).into() + compute_tree_hash_root(&signing_data) } None => { - let signing_data = - types::SigningData { object_root, signing_domain: domain }; - compute_signing_root(&signing_data).into() + let signing_data = types::SigningData { + object_root: *object_root, + signing_domain: domain, + }; + compute_tree_hash_root(&signing_data) } }; sk.sign_hash_sync(&signing_root).map(EcdsaSignature::from) @@ -117,18 +119,18 @@ impl EcdsaSigner { &self, chain: Chain, msg: &impl TreeHash, - module_signing_id: Option<[u8; 32]>, + module_signing_id: Option<&B256>, ) -> Result { - self.sign(chain, msg.tree_hash_root().0, module_signing_id).await + self.sign(chain, &msg.tree_hash_root(), module_signing_id).await } } pub fn verify_ecdsa_signature( address: &Address, - msg: &[u8; 32], + msg: &B256, signature: &EcdsaSignature, ) -> eyre::Result<()> { - let recovered = signature.recover_address_from_prehash(msg.into())?; + let recovered = signature.recover_address_from_prehash(msg)?; ensure!(recovered == *address, "invalid signature"); Ok(()) } @@ -145,12 +147,12 @@ mod test { let pk = bytes!("88bcd6672d95bcba0d52a3146494ed4d37675af4ed2206905eb161aa99a6c0d1"); let signer = EcdsaSigner::new_from_bytes(&pk).unwrap(); - let object_root = [1; 32]; - let signature = signer.sign(Chain::Holesky, object_root, None).await.unwrap(); + let object_root = B256::from([1; 32]); + let signature = signer.sign(Chain::Holesky, &object_root, None).await.unwrap(); - let domain = compute_domain(Chain::Holesky, COMMIT_BOOST_DOMAIN); + let domain = compute_domain(Chain::Holesky, &B32::from(COMMIT_BOOST_DOMAIN)); let signing_data = types::SigningData { object_root, signing_domain: domain }; - let msg = compute_signing_root(&signing_data); + let msg = compute_tree_hash_root(&signing_data); assert_eq!(msg, hex!("219ca7a673b2cbbf67bec6c9f60f78bd051336d57b68d1540190f30667e86725")); @@ -164,20 +166,20 @@ mod test { let pk = bytes!("88bcd6672d95bcba0d52a3146494ed4d37675af4ed2206905eb161aa99a6c0d1"); let signer = EcdsaSigner::new_from_bytes(&pk).unwrap(); - let object_root = [1; 32]; - let module_signing_id = [2; 32]; + let object_root = B256::from([1; 32]); + let module_signing_id = B256::from([2; 32]); let signature = - signer.sign(Chain::Hoodi, object_root, Some(module_signing_id)).await.unwrap(); + signer.sign(Chain::Hoodi, &object_root, Some(&module_signing_id)).await.unwrap(); - let domain = compute_domain(Chain::Hoodi, COMMIT_BOOST_DOMAIN); + let domain = compute_domain(Chain::Hoodi, &B32::from(COMMIT_BOOST_DOMAIN)); let signing_data = types::SigningData { - object_root: compute_signing_root(&types::PropCommitSigningInfo { + object_root: compute_tree_hash_root(&types::PropCommitSigningInfo { data: object_root, module_signing_id, }), signing_domain: domain, }; - let msg = compute_signing_root(&signing_data); + let msg = compute_tree_hash_root(&signing_data); assert_eq!(msg, hex!("8cd49ccf2f9b0297796ff96ce5f7c5d26e20a59d0032ee2ad6249dcd9682b808")); diff --git a/crates/common/src/signer/store.rs b/crates/common/src/signer/store.rs index 9e251dd9..834f4bd8 100644 --- a/crates/common/src/signer/store.rs +++ b/crates/common/src/signer/store.rs @@ -533,7 +533,7 @@ mod test { proxy: proxy_signer.pubkey(), }; let signature = - consensus_signer.sign(Chain::Mainnet, message.tree_hash_root().0, None).await; + consensus_signer.sign(Chain::Mainnet, &message.tree_hash_root(), None).await; let delegation = SignedProxyDelegationBls { signature, message }; let proxy_signer = BlsProxySigner { signer: proxy_signer, delegation }; @@ -543,12 +543,12 @@ mod test { .join(consensus_signer.pubkey().to_string()) .join("TEST_MODULE") .join("bls") - .join(format!("{}.json", proxy_signer.pubkey().to_string())); + .join(format!("{}.json", proxy_signer.pubkey())); let sig_path = keys_path .join(consensus_signer.pubkey().to_string()) .join("TEST_MODULE") .join("bls") - .join(format!("{}.sig", proxy_signer.pubkey().to_string())); + .join(format!("{}.sig", proxy_signer.pubkey())); let pass_path = secrets_path .join(consensus_signer.pubkey().to_string()) .join("TEST_MODULE") @@ -647,7 +647,7 @@ mod test { proxy: proxy_signer.pubkey(), }; let signature = - consensus_signer.sign(Chain::Mainnet, message.tree_hash_root().0, None).await; + consensus_signer.sign(Chain::Mainnet, &message.tree_hash_root(), None).await; let delegation = SignedProxyDelegationBls { signature, message }; let proxy_signer = BlsProxySigner { signer: proxy_signer, delegation }; diff --git a/crates/common/src/types.rs b/crates/common/src/types.rs index bbffb58a..c747815b 100644 --- a/crates/common/src/types.rs +++ b/crates/common/src/types.rs @@ -1,6 +1,6 @@ use std::path::PathBuf; -use alloy::primitives::{hex, Bytes}; +use alloy::primitives::{aliases::B32, hex, Bytes, B256}; use derive_more::{Deref, Display, From, Into}; use eyre::{bail, Context}; use serde::{Deserialize, Serialize}; @@ -85,14 +85,14 @@ impl Chain { } } - pub fn builder_domain(&self) -> [u8; 32] { + pub fn builder_domain(&self) -> B256 { match self { Chain::Mainnet => KnownChain::Mainnet.builder_domain(), Chain::Holesky => KnownChain::Holesky.builder_domain(), Chain::Sepolia => KnownChain::Sepolia.builder_domain(), Chain::Helder => KnownChain::Helder.builder_domain(), Chain::Hoodi => KnownChain::Hoodi.builder_domain(), - Chain::Custom { .. } => compute_domain(*self, APPLICATION_BUILDER_DOMAIN), + Chain::Custom { .. } => compute_domain(*self, &B32::from(APPLICATION_BUILDER_DOMAIN)), } } @@ -156,28 +156,28 @@ impl KnownChain { } } - pub fn builder_domain(&self) -> [u8; 32] { + pub fn builder_domain(&self) -> B256 { match self { - KnownChain::Mainnet => [ + KnownChain::Mainnet => B256::from([ 0, 0, 0, 1, 245, 165, 253, 66, 209, 106, 32, 48, 39, 152, 239, 110, 211, 9, 151, 155, 67, 0, 61, 35, 32, 217, 240, 232, 234, 152, 49, 169, - ], - KnownChain::Holesky => [ + ]), + KnownChain::Holesky => B256::from([ 0, 0, 0, 1, 91, 131, 162, 55, 89, 197, 96, 178, 208, 198, 69, 118, 225, 220, 252, 52, 234, 148, 196, 152, 143, 62, 13, 159, 119, 240, 83, 135, - ], - KnownChain::Sepolia => [ + ]), + KnownChain::Sepolia => B256::from([ 0, 0, 0, 1, 211, 1, 7, 120, 205, 8, 238, 81, 75, 8, 254, 103, 182, 197, 3, 181, 16, 152, 122, 76, 228, 63, 66, 48, 109, 151, 198, 124, - ], - KnownChain::Helder => [ + ]), + KnownChain::Helder => B256::from([ 0, 0, 0, 1, 148, 196, 26, 244, 132, 255, 247, 150, 73, 105, 224, 189, 217, 34, 248, 45, 255, 15, 75, 232, 122, 96, 208, 102, 76, 201, 209, 255, - ], - KnownChain::Hoodi => [ + ]), + KnownChain::Hoodi => B256::from([ 0, 0, 0, 1, 113, 145, 3, 81, 30, 250, 79, 19, 98, 255, 42, 80, 153, 108, 204, 243, 41, 204, 132, 203, 65, 12, 94, 92, 125, 53, 29, 3, - ], + ]), } } @@ -293,8 +293,8 @@ impl<'de> Deserialize<'de> for Chain { /// Structure for signatures used in Beacon chain operations #[derive(Default, Debug, TreeHash)] pub struct SigningData { - pub object_root: [u8; 32], - pub signing_domain: [u8; 32], + pub object_root: B256, + pub signing_domain: B256, } /// Structure for signatures used for proposer commitments in Commit Boost. @@ -302,8 +302,8 @@ pub struct SigningData { /// SigningData for signatures. #[derive(Default, Debug, TreeHash)] pub struct PropCommitSigningInfo { - pub data: [u8; 32], - pub module_signing_id: [u8; 32], + pub data: B256, + pub module_signing_id: B256, } /// Returns seconds_per_slot and genesis_fork_version from a spec, such as diff --git a/crates/common/src/utils.rs b/crates/common/src/utils.rs index 7f2fbbca..3f658c92 100644 --- a/crates/common/src/utils.rs +++ b/crates/common/src/utils.rs @@ -475,7 +475,7 @@ mod test { let jwt = create_jwt(&ModuleId("DA_COMMIT".to_string()), "secret").unwrap(); let module_id = decode_jwt(jwt.clone()).unwrap(); assert_eq!(module_id, ModuleId("DA_COMMIT".to_string())); - let response = validate_jwt(jwt, "secret".as_ref()); + let response = validate_jwt(jwt, "secret"); assert!(response.is_ok()); // Check expired JWT diff --git a/crates/pbs/src/mev_boost/get_header.rs b/crates/pbs/src/mev_boost/get_header.rs index ec2716d7..aa6ed6fd 100644 --- a/crates/pbs/src/mev_boost/get_header.rs +++ b/crates/pbs/src/mev_boost/get_header.rs @@ -4,7 +4,7 @@ use std::{ }; use alloy::{ - primitives::{utils::format_ether, B256, U256}, + primitives::{aliases::B32, utils::format_ether, B256, U256}, providers::Provider, rpc::types::{beacon::BlsPublicKey, Block}, }; @@ -474,7 +474,7 @@ fn validate_signature( &message, signature, None, - APPLICATION_BUILDER_DOMAIN, + &B32::from(APPLICATION_BUILDER_DOMAIN), ) .map_err(ValidationError::Sigverify)?; diff --git a/crates/signer/src/manager/dirk.rs b/crates/signer/src/manager/dirk.rs index 08c73def..e1ebac11 100644 --- a/crates/signer/src/manager/dirk.rs +++ b/crates/signer/src/manager/dirk.rs @@ -1,12 +1,16 @@ use std::{collections::HashMap, io::Write, path::PathBuf}; -use alloy::{hex, primitives::B256, rpc::types::beacon::constants::BLS_SIGNATURE_BYTES_LEN}; +use alloy::{ + hex, + primitives::{aliases::B32, B256}, + rpc::types::beacon::constants::BLS_SIGNATURE_BYTES_LEN, +}; use blsful::inner_types::{Field, G2Affine, G2Projective, Group, Scalar}; use cb_common::{ commit::request::{ConsensusProxyMap, ProxyDelegation, SignedProxyDelegation}, config::{DirkConfig, DirkHostConfig}, constants::COMMIT_BOOST_DOMAIN, - signature::{compute_domain, compute_signing_root}, + signature::{compute_domain, compute_tree_hash_root}, signer::{BlsPublicKey, BlsSignature, ProxyStore}, types::{self, Chain, ModuleId}, }; @@ -192,7 +196,7 @@ impl DirkManager { pub async fn request_consensus_signature( &self, pubkey: &BlsPublicKey, - object_root: &[u8; 32], + object_root: &B256, module_signing_id: Option<&B256>, ) -> Result { match self.consensus_accounts.get(pubkey) { @@ -210,7 +214,7 @@ impl DirkManager { pub async fn request_proxy_signature( &self, pubkey: &BlsPublicKey, - object_root: &[u8; 32], + object_root: &B256, module_signing_id: Option<&B256>, ) -> Result { match self.proxy_accounts.get(pubkey) { @@ -228,15 +232,15 @@ impl DirkManager { async fn request_simple_signature( &self, account: &SimpleAccount, - object_root: &[u8; 32], + object_root: &B256, module_signing_id: Option<&B256>, ) -> Result { - let domain = compute_domain(self.chain, COMMIT_BOOST_DOMAIN); + let domain = compute_domain(self.chain, &B32::from(COMMIT_BOOST_DOMAIN)); let data = match module_signing_id { - Some(id) => compute_signing_root(&types::PropCommitSigningInfo { + Some(id) => compute_tree_hash_root(&types::PropCommitSigningInfo { data: *object_root, - module_signing_id: id.0, + module_signing_id: *id, }) .to_vec(), None => object_root.to_vec(), @@ -268,16 +272,16 @@ impl DirkManager { async fn request_distributed_signature( &self, account: &DistributedAccount, - object_root: &[u8; 32], + object_root: &B256, module_signing_id: Option<&B256>, ) -> Result { let mut partials = Vec::with_capacity(account.participants.len()); let mut requests = Vec::with_capacity(account.participants.len()); let data = match module_signing_id { - Some(id) => compute_signing_root(&types::PropCommitSigningInfo { + Some(id) => compute_tree_hash_root(&types::PropCommitSigningInfo { data: *object_root, - module_signing_id: id.0, + module_signing_id: *id, }) .to_vec(), None => object_root.to_vec(), @@ -289,7 +293,8 @@ impl DirkManager { SignerClient::new(channel.clone()) .sign(SignRequest { data: data_copy, - domain: compute_domain(self.chain, COMMIT_BOOST_DOMAIN).to_vec(), + domain: compute_domain(self.chain, &B32::from(COMMIT_BOOST_DOMAIN)) + .to_vec(), id: Some(sign_request::Id::Account(account.name.clone())), }) .map(|res| (res, *id)) @@ -359,7 +364,7 @@ impl DirkManager { let message = ProxyDelegation { delegator: consensus, proxy: proxy_account.inner.public_key() }; let delegation_signature = - self.request_consensus_signature(&consensus, &message.tree_hash_root().0, None).await?; + self.request_consensus_signature(&consensus, &message.tree_hash_root(), None).await?; let delegation = SignedProxyDelegation { message, signature: delegation_signature }; diff --git a/crates/signer/src/manager/local.rs b/crates/signer/src/manager/local.rs index a242a754..48ec757c 100644 --- a/crates/signer/src/manager/local.rs +++ b/crates/signer/src/manager/local.rs @@ -98,7 +98,7 @@ impl LocalSigningManager { let proxy_pubkey = signer.pubkey(); let message = ProxyDelegationBls { delegator, proxy: proxy_pubkey }; - let signature = self.sign_consensus(&delegator, &message.tree_hash_root().0, None).await?; + let signature = self.sign_consensus(&delegator, &message.tree_hash_root(), None).await?; let delegation = SignedProxyDelegationBls { signature, message }; let proxy_signer = BlsProxySigner { signer, delegation }; @@ -117,7 +117,7 @@ impl LocalSigningManager { let proxy_address = signer.address(); let message = ProxyDelegationEcdsa { delegator, proxy: proxy_address }; - let signature = self.sign_consensus(&delegator, &message.tree_hash_root().0, None).await?; + let signature = self.sign_consensus(&delegator, &message.tree_hash_root(), None).await?; let delegation = SignedProxyDelegationEcdsa { signature, message }; let proxy_signer = EcdsaProxySigner { signer, delegation }; @@ -132,7 +132,7 @@ impl LocalSigningManager { pub async fn sign_consensus( &self, pubkey: &BlsPublicKey, - object_root: &[u8; 32], + object_root: &B256, module_signing_id: Option<&B256>, ) -> Result { let signer = self @@ -140,8 +140,8 @@ impl LocalSigningManager { .get(pubkey) .ok_or(SignerModuleError::UnknownConsensusSigner(pubkey.to_vec()))?; let signature = match module_signing_id { - Some(id) => signer.sign(self.chain, *object_root, Some(id.0)).await, - None => signer.sign(self.chain, *object_root, None).await, + Some(id) => signer.sign(self.chain, object_root, Some(id)).await, + None => signer.sign(self.chain, object_root, None).await, }; Ok(signature) @@ -150,7 +150,7 @@ impl LocalSigningManager { pub async fn sign_proxy_bls( &self, pubkey: &BlsPublicKey, - object_root: &[u8; 32], + object_root: &B256, module_signing_id: Option<&B256>, ) -> Result { let bls_proxy = self @@ -159,8 +159,8 @@ impl LocalSigningManager { .get(pubkey) .ok_or(SignerModuleError::UnknownProxySigner(pubkey.to_vec()))?; let signature = match module_signing_id { - Some(id) => bls_proxy.sign(self.chain, *object_root, Some(id.0)).await, - None => bls_proxy.sign(self.chain, *object_root, None).await, + Some(id) => bls_proxy.sign(self.chain, object_root, Some(id)).await, + None => bls_proxy.sign(self.chain, object_root, None).await, }; Ok(signature) } @@ -168,7 +168,7 @@ impl LocalSigningManager { pub async fn sign_proxy_ecdsa( &self, address: &Address, - object_root: &[u8; 32], + object_root: &B256, module_signing_id: Option<&B256>, ) -> Result { let ecdsa_proxy = self @@ -177,8 +177,8 @@ impl LocalSigningManager { .get(address) .ok_or(SignerModuleError::UnknownProxySigner(address.to_vec()))?; let signature = match module_signing_id { - Some(id) => ecdsa_proxy.sign(self.chain, *object_root, Some(id.0)).await?, - None => ecdsa_proxy.sign(self.chain, *object_root, None).await?, + Some(id) => ecdsa_proxy.sign(self.chain, object_root, Some(id)).await?, + None => ecdsa_proxy.sign(self.chain, object_root, None).await?, }; Ok(signature) } @@ -280,7 +280,7 @@ impl LocalSigningManager { #[cfg(test)] mod tests { use alloy::primitives::B256; - use cb_common::signature::compute_signing_root; + use cb_common::signature::compute_tree_hash_root; use lazy_static::lazy_static; use super::*; @@ -303,6 +303,7 @@ mod tests { } mod test_bls { + use alloy::primitives::aliases::B32; use cb_common::{ constants::COMMIT_BOOST_DOMAIN, signature::compute_domain, signer::verify_bls_signature, types, @@ -318,31 +319,29 @@ mod tests { let module_signing_id = B256::random(); let sig = signing_manager - .sign_consensus( - &consensus_pk.try_into().unwrap(), - &data_root, - Some(&module_signing_id), - ) + .sign_consensus(&consensus_pk, &data_root, Some(&module_signing_id)) .await .unwrap(); // Verify signature - let domain = compute_domain(CHAIN, COMMIT_BOOST_DOMAIN); - let signing_root = compute_signing_root(&types::SigningData { - object_root: compute_signing_root(&types::PropCommitSigningInfo { - data: data_root.tree_hash_root().0, - module_signing_id: module_signing_id.0, + let domain = compute_domain(CHAIN, &B32::from(COMMIT_BOOST_DOMAIN)); + let signing_root = compute_tree_hash_root(&types::SigningData { + object_root: compute_tree_hash_root(&types::PropCommitSigningInfo { + data: data_root.tree_hash_root(), + module_signing_id, }), signing_domain: domain, }); - let validation_result = verify_bls_signature(&consensus_pk, &signing_root, &sig); + let validation_result = + verify_bls_signature(&consensus_pk, signing_root.as_slice(), &sig); assert!(validation_result.is_ok(), "Keypair must produce valid signatures of messages.") } } mod test_proxy_bls { + use alloy::primitives::aliases::B32; use cb_common::{ constants::COMMIT_BOOST_DOMAIN, signature::compute_domain, signer::verify_bls_signature, types, @@ -354,10 +353,8 @@ mod tests { async fn test_proxy_key_is_valid_proxy_for_consensus_key() { let (mut signing_manager, consensus_pk) = init_signing_manager(); - let signed_delegation = signing_manager - .create_proxy_bls(MODULE_ID.clone(), consensus_pk.clone()) - .await - .unwrap(); + let signed_delegation = + signing_manager.create_proxy_bls(MODULE_ID.clone(), consensus_pk).await.unwrap(); let validation_result = signed_delegation.validate(CHAIN); @@ -377,10 +374,8 @@ mod tests { async fn test_tampered_proxy_key_is_invalid() { let (mut signing_manager, consensus_pk) = init_signing_manager(); - let mut signed_delegation = signing_manager - .create_proxy_bls(MODULE_ID.clone(), consensus_pk.clone()) - .await - .unwrap(); + let mut signed_delegation = + signing_manager.create_proxy_bls(MODULE_ID.clone(), consensus_pk).await.unwrap(); let m = &mut signed_delegation.signature.0[0]; (*m, _) = m.overflowing_add(1); @@ -394,31 +389,29 @@ mod tests { async fn test_proxy_key_signs_message() { let (mut signing_manager, consensus_pk) = init_signing_manager(); - let signed_delegation = signing_manager - .create_proxy_bls(MODULE_ID.clone(), consensus_pk.clone()) - .await - .unwrap(); + let signed_delegation = + signing_manager.create_proxy_bls(MODULE_ID.clone(), consensus_pk).await.unwrap(); let proxy_pk = signed_delegation.message.proxy; let data_root = B256::random(); let module_signing_id = B256::random(); let sig = signing_manager - .sign_proxy_bls(&proxy_pk.try_into().unwrap(), &data_root, Some(&module_signing_id)) + .sign_proxy_bls(&proxy_pk, &data_root, Some(&module_signing_id)) .await .unwrap(); // Verify signature - let domain = compute_domain(CHAIN, COMMIT_BOOST_DOMAIN); - let signing_root = compute_signing_root(&types::SigningData { - object_root: compute_signing_root(&types::PropCommitSigningInfo { - data: data_root.tree_hash_root().0, - module_signing_id: module_signing_id.0, + let domain = compute_domain(CHAIN, &B32::from(COMMIT_BOOST_DOMAIN)); + let signing_root = compute_tree_hash_root(&types::SigningData { + object_root: compute_tree_hash_root(&types::PropCommitSigningInfo { + data: data_root.tree_hash_root(), + module_signing_id, }), signing_domain: domain, }); - let validation_result = verify_bls_signature(&proxy_pk, &signing_root, &sig); + let validation_result = verify_bls_signature(&proxy_pk, signing_root.as_slice(), &sig); assert!( validation_result.is_ok(), @@ -428,6 +421,7 @@ mod tests { } mod test_proxy_ecdsa { + use alloy::primitives::aliases::B32; use cb_common::{ constants::COMMIT_BOOST_DOMAIN, signature::compute_domain, signer::verify_ecdsa_signature, types, @@ -439,10 +433,8 @@ mod tests { async fn test_proxy_key_is_valid_proxy_for_consensus_key() { let (mut signing_manager, consensus_pk) = init_signing_manager(); - let signed_delegation = signing_manager - .create_proxy_ecdsa(MODULE_ID.clone(), consensus_pk.clone()) - .await - .unwrap(); + let signed_delegation = + signing_manager.create_proxy_ecdsa(MODULE_ID.clone(), consensus_pk).await.unwrap(); let validation_result = signed_delegation.validate(CHAIN); @@ -462,10 +454,8 @@ mod tests { async fn test_tampered_proxy_key_is_invalid() { let (mut signing_manager, consensus_pk) = init_signing_manager(); - let mut signed_delegation = signing_manager - .create_proxy_ecdsa(MODULE_ID.clone(), consensus_pk.clone()) - .await - .unwrap(); + let mut signed_delegation = + signing_manager.create_proxy_ecdsa(MODULE_ID.clone(), consensus_pk).await.unwrap(); let m = &mut signed_delegation.signature.0[0]; (*m, _) = m.overflowing_add(1); @@ -479,30 +469,24 @@ mod tests { async fn test_proxy_key_signs_message() { let (mut signing_manager, consensus_pk) = init_signing_manager(); - let signed_delegation = signing_manager - .create_proxy_ecdsa(MODULE_ID.clone(), consensus_pk.clone()) - .await - .unwrap(); + let signed_delegation = + signing_manager.create_proxy_ecdsa(MODULE_ID.clone(), consensus_pk).await.unwrap(); let proxy_pk = signed_delegation.message.proxy; let data_root = B256::random(); let module_signing_id = B256::random(); let sig = signing_manager - .sign_proxy_ecdsa( - &proxy_pk.try_into().unwrap(), - &data_root, - Some(&module_signing_id), - ) + .sign_proxy_ecdsa(&proxy_pk, &data_root, Some(&module_signing_id)) .await .unwrap(); // Verify signature - let domain = compute_domain(CHAIN, COMMIT_BOOST_DOMAIN); - let signing_root = compute_signing_root(&types::SigningData { - object_root: compute_signing_root(&types::PropCommitSigningInfo { - data: data_root.tree_hash_root().0, - module_signing_id: module_signing_id.0, + let domain = compute_domain(CHAIN, &B32::from(COMMIT_BOOST_DOMAIN)); + let signing_root = compute_tree_hash_root(&types::SigningData { + object_root: compute_tree_hash_root(&types::PropCommitSigningInfo { + data: data_root.tree_hash_root(), + module_signing_id, }), signing_domain: domain, }); diff --git a/examples/da_commit/src/main.rs b/examples/da_commit/src/main.rs index 2f8845c8..c73b2191 100644 --- a/examples/da_commit/src/main.rs +++ b/examples/da_commit/src/main.rs @@ -99,7 +99,7 @@ impl DaCommitService { &pubkey, &datagram, &signature, - DA_COMMIT_SIGNING_ID, + &DA_COMMIT_SIGNING_ID, ) { Ok(_) => info!("Signature verified successfully"), Err(err) => error!(%err, "Signature verification failed"), @@ -115,7 +115,7 @@ impl DaCommitService { &proxy_bls, &datagram, &proxy_signature_bls, - DA_COMMIT_SIGNING_ID, + &DA_COMMIT_SIGNING_ID, ) { Ok(_) => info!("Signature verified successfully"), Err(err) => error!(%err, "Signature verification failed"), @@ -135,7 +135,7 @@ impl DaCommitService { &proxy_ecdsa, &datagram, &proxy_signature_ecdsa, - DA_COMMIT_SIGNING_ID, + &DA_COMMIT_SIGNING_ID, ) { Ok(_) => info!("Signature verified successfully"), Err(err) => error!(%err, "Signature verification failed"), diff --git a/tests/src/mock_relay.rs b/tests/src/mock_relay.rs index a91a70c6..45c095b3 100644 --- a/tests/src/mock_relay.rs +++ b/tests/src/mock_relay.rs @@ -117,8 +117,8 @@ async fn handle_get_header( response.message.pubkey = blst_pubkey_to_alloy(&state.signer.sk_to_pk()); response.message.header.timestamp = timestamp_of_slot_start_sec(0, state.chain); - let object_root = response.message.tree_hash_root().0; - response.signature = sign_builder_root(state.chain, &state.signer, object_root); + let object_root = response.message.tree_hash_root(); + response.signature = sign_builder_root(state.chain, &state.signer, &object_root); let response = GetHeaderResponse::Electra(response); (StatusCode::OK, Json(response)).into_response() diff --git a/tests/tests/payloads.rs b/tests/tests/payloads.rs index a1bd5b52..c43df7ef 100644 --- a/tests/tests/payloads.rs +++ b/tests/tests/payloads.rs @@ -9,19 +9,19 @@ use serde_json::Value; #[test] fn test_registrations() { let data = include_str!("../data/registration_holesky.json"); - test_encode_decode::>(&data); + test_encode_decode::>(data); } #[test] fn test_signed_blinded_block() { let data = include_str!("../data/signed_blinded_block_holesky.json"); - test_encode_decode::(&data); + test_encode_decode::(data); } #[test] fn test_submit_block_response() { let data = include_str!("../data/submit_block_response_holesky.json"); - test_encode_decode::(&data); + test_encode_decode::(data); } // Unhappy path tests @@ -32,10 +32,8 @@ fn test_missing_registration_field(field_name: &str) -> String { // Remove specified field from the first validator's message if let Value::Array(arr) = &mut values { if let Some(first_validator) = arr.get_mut(0) { - if let Some(message) = first_validator.get_mut("message") { - if let Value::Object(msg_obj) = message { - msg_obj.remove(field_name); - } + if let Some(Value::Object(msg_obj)) = first_validator.get_mut("message") { + msg_obj.remove(field_name); } } } @@ -66,10 +64,8 @@ fn test_missing_signed_blinded_block_field(field_name: &str) -> String { let mut values: Value = serde_json::from_str(data).unwrap(); // Remove specified field from the message - if let Some(message) = values.get_mut("message") { - if let Value::Object(msg_obj) = message { - msg_obj.remove(field_name); - } + if let Some(Value::Object(msg_obj)) = values.get_mut("message") { + msg_obj.remove(field_name); } // This should fail since the field is required diff --git a/tests/tests/pbs_get_header.rs b/tests/tests/pbs_get_header.rs index 10a68c02..088fedb2 100644 --- a/tests/tests/pbs_get_header.rs +++ b/tests/tests/pbs_get_header.rs @@ -58,7 +58,7 @@ async fn test_get_header() -> Result<()> { assert_eq!(res.message.header.timestamp, timestamp_of_slot_start_sec(0, chain)); assert_eq!( res.signature, - sign_builder_root(chain, &mock_state.signer, res.message.tree_hash_root().0) + sign_builder_root(chain, &mock_state.signer, &res.message.tree_hash_root()) ); Ok(()) } @@ -67,7 +67,7 @@ async fn test_get_header() -> Result<()> { async fn test_get_header_returns_204_if_relay_down() -> Result<()> { setup_test_env(); let signer = random_secret(); - let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()).into(); + let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()); let chain = Chain::Holesky; let pbs_port = 3300; @@ -101,7 +101,7 @@ async fn test_get_header_returns_204_if_relay_down() -> Result<()> { async fn test_get_header_returns_400_if_request_is_invalid() -> Result<()> { setup_test_env(); let signer = random_secret(); - let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()).into(); + let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()); let chain = Chain::Holesky; let pbs_port = 3400; diff --git a/tests/tests/pbs_get_status.rs b/tests/tests/pbs_get_status.rs index 7112a46b..629bea69 100644 --- a/tests/tests/pbs_get_status.rs +++ b/tests/tests/pbs_get_status.rs @@ -19,7 +19,7 @@ use tracing::info; async fn test_get_status() -> Result<()> { setup_test_env(); let signer = random_secret(); - let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()).into(); + let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()); let chain = Chain::Holesky; let pbs_port = 3500; @@ -55,7 +55,7 @@ async fn test_get_status() -> Result<()> { async fn test_get_status_returns_502_if_relay_down() -> Result<()> { setup_test_env(); let signer = random_secret(); - let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()).into(); + let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()); let chain = Chain::Holesky; let pbs_port = 3600; diff --git a/tests/tests/pbs_mux.rs b/tests/tests/pbs_mux.rs index 624217d3..f5645e54 100644 --- a/tests/tests/pbs_mux.rs +++ b/tests/tests/pbs_mux.rs @@ -20,7 +20,7 @@ use tracing::info; async fn test_mux() -> Result<()> { setup_test_env(); let signer = random_secret(); - let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()).into(); + let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()); let chain = Chain::Holesky; let pbs_port = 3700; diff --git a/tests/tests/pbs_post_blinded_blocks.rs b/tests/tests/pbs_post_blinded_blocks.rs index 03c268ba..1119a1d6 100644 --- a/tests/tests/pbs_post_blinded_blocks.rs +++ b/tests/tests/pbs_post_blinded_blocks.rs @@ -20,7 +20,7 @@ use tracing::info; async fn test_submit_block() -> Result<()> { setup_test_env(); let signer = random_secret(); - let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()).into(); + let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()); let chain = Chain::Holesky; let pbs_port = 3800; @@ -54,7 +54,7 @@ async fn test_submit_block() -> Result<()> { async fn test_submit_block_too_large() -> Result<()> { setup_test_env(); let signer = random_secret(); - let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()).into(); + let pubkey: BlsPublicKey = blst_pubkey_to_alloy(&signer.sk_to_pk()); let chain = Chain::Holesky; let pbs_port = 3900; From 874e07daa6eabc194aaf321f2299a0e92c86a3d8 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 4 Aug 2025 15:28:59 -0400 Subject: [PATCH 64/67] Cleaned up some hashmap usage --- crates/common/src/config/signer.rs | 31 ++++++++---------------------- 1 file changed, 8 insertions(+), 23 deletions(-) diff --git a/crates/common/src/config/signer.rs b/crates/common/src/config/signer.rs index c82d2f69..a397d696 100644 --- a/crates/common/src/config/signer.rs +++ b/crates/common/src/config/signer.rs @@ -313,30 +313,15 @@ pub fn load_module_signing_configs( .wrap_err(format!("Invalid signing config for module {}", module.id))?; // Check for duplicates in JWT secrets and signing IDs - match seen_jwt_secrets.get(&module_signing_config.jwt_secret) { - Some(existing_module) => { - bail!( - "Duplicate JWT secret detected for modules {} and {}", - existing_module, - module.id - ) - } - None => { - seen_jwt_secrets.insert(module_signing_config.jwt_secret.clone(), &module.id); - } + if let Some(existing_module) = + seen_jwt_secrets.insert(module_signing_config.jwt_secret.clone(), &module.id) + { + bail!("Duplicate JWT secret detected for modules {} and {}", existing_module, module.id) }; - match seen_signing_ids.get(&module_signing_config.signing_id) { - Some(existing_module) => { - bail!( - "Duplicate signing ID detected for modules {} and {}", - existing_module, - module.id - ) - } - None => { - seen_signing_ids.insert(module_signing_config.signing_id, &module.id); - module.signing_id - } + if let Some(existing_module) = + seen_signing_ids.insert(module_signing_config.signing_id, &module.id) + { + bail!("Duplicate signing ID detected for modules {} and {}", existing_module, module.id) }; mod_signing_configs.insert(module.id.clone(), module_signing_config); From 3ad11a8873bac38afde67ef92f5a7758263c766b Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 4 Aug 2025 16:01:04 -0400 Subject: [PATCH 65/67] Removed compute_tree_hash_root() --- crates/common/src/signature.rs | 64 +++++++++-------------- crates/common/src/signer/schemes/ecdsa.rs | 42 ++++++--------- crates/signer/src/manager/dirk.rs | 18 +++---- crates/signer/src/manager/local.rs | 46 ++++++++-------- 4 files changed, 68 insertions(+), 102 deletions(-) diff --git a/crates/common/src/signature.rs b/crates/common/src/signature.rs index fbb6021d..cd960031 100644 --- a/crates/common/src/signature.rs +++ b/crates/common/src/signature.rs @@ -17,10 +17,6 @@ pub fn sign_message(secret_key: &BlsSecretKey, msg: &[u8]) -> BlsSignature { BlsSignature::from_slice(&signature) } -pub fn compute_tree_hash_root(signing_data: &T) -> B256 { - signing_data.tree_hash_root() -} - pub fn compute_prop_commit_signing_root( chain: Chain, object_root: &B256, @@ -29,17 +25,14 @@ pub fn compute_prop_commit_signing_root( ) -> B256 { let domain = compute_domain(chain, domain_mask); match module_signing_id { - Some(id) => compute_tree_hash_root(&types::SigningData { - object_root: compute_tree_hash_root(&types::PropCommitSigningInfo { - data: *object_root, - module_signing_id: *id, - }), - signing_domain: domain, - }), - None => compute_tree_hash_root(&types::SigningData { - object_root: *object_root, - signing_domain: domain, - }), + Some(id) => { + let object_root = + types::PropCommitSigningInfo { data: *object_root, module_signing_id: *id } + .tree_hash_root(); + types::SigningData { object_root, signing_domain: domain }.tree_hash_root() + } + None => types::SigningData { object_root: *object_root, signing_domain: domain } + .tree_hash_root(), } } @@ -75,7 +68,7 @@ pub fn verify_signed_message( ) -> Result<(), BlstErrorWrapper> { let signing_root = compute_prop_commit_signing_root( chain, - &compute_tree_hash_root(msg), + &msg.tree_hash_root(), module_signing_id, domain_mask, ); @@ -96,12 +89,10 @@ pub fn sign_builder_root( secret_key: &BlsSecretKey, object_root: &B256, ) -> BlsSignature { - let domain = chain.builder_domain(); - let signing_data = types::SigningData { - object_root: compute_tree_hash_root(object_root), - signing_domain: domain, - }; - let signing_root = compute_tree_hash_root(&signing_data); + let signing_domain = chain.builder_domain(); + let signing_data = + types::SigningData { object_root: object_root.tree_hash_root(), signing_domain }; + let signing_root = signing_data.tree_hash_root(); sign_message(secret_key, signing_root.as_slice()) } @@ -133,15 +124,13 @@ pub fn verify_proposer_commitment_signature_bls( signature: &BlsSignature, module_signing_id: &B256, ) -> Result<(), BlstErrorWrapper> { - let object_root = msg.tree_hash_root(); - let domain = compute_domain(chain, &B32::from(COMMIT_BOOST_DOMAIN)); - let signing_root = compute_tree_hash_root(&types::SigningData { - object_root: compute_tree_hash_root(&types::PropCommitSigningInfo { - data: object_root, - module_signing_id: *module_signing_id, - }), - signing_domain: domain, - }); + let signing_domain = compute_domain(chain, &B32::from(COMMIT_BOOST_DOMAIN)); + let object_root = types::PropCommitSigningInfo { + data: msg.tree_hash_root(), + module_signing_id: *module_signing_id, + } + .tree_hash_root(); + let signing_root = types::SigningData { object_root, signing_domain }.tree_hash_root(); verify_bls_signature(pubkey, signing_root.as_slice(), signature) } @@ -155,14 +144,11 @@ pub fn verify_proposer_commitment_signature_ecdsa( module_signing_id: &B256, ) -> Result<(), eyre::Report> { let object_root = msg.tree_hash_root(); - let domain = compute_domain(chain, &B32::from(COMMIT_BOOST_DOMAIN)); - let signing_root = compute_tree_hash_root(&types::SigningData { - object_root: compute_tree_hash_root(&types::PropCommitSigningInfo { - data: object_root, - module_signing_id: *module_signing_id, - }), - signing_domain: domain, - }); + let signing_domain = compute_domain(chain, &B32::from(COMMIT_BOOST_DOMAIN)); + let object_root = + types::PropCommitSigningInfo { data: object_root, module_signing_id: *module_signing_id } + .tree_hash_root(); + let signing_root = types::SigningData { object_root, signing_domain }.tree_hash_root(); verify_ecdsa_signature(address, &signing_root, signature) } diff --git a/crates/common/src/signer/schemes/ecdsa.rs b/crates/common/src/signer/schemes/ecdsa.rs index 53911141..907340f1 100644 --- a/crates/common/src/signer/schemes/ecdsa.rs +++ b/crates/common/src/signer/schemes/ecdsa.rs @@ -9,7 +9,7 @@ use tree_hash::TreeHash; use crate::{ constants::COMMIT_BOOST_DOMAIN, - signature::{compute_domain, compute_tree_hash_root}, + signature::compute_domain, types::{self, Chain}, }; @@ -91,25 +91,18 @@ impl EcdsaSigner { ) -> Result { match self { EcdsaSigner::Local(sk) => { - let domain = compute_domain(chain, &B32::from(COMMIT_BOOST_DOMAIN)); + let signing_domain = compute_domain(chain, &B32::from(COMMIT_BOOST_DOMAIN)); let signing_root = match module_signing_id { Some(id) => { - let signing_data = types::SigningData { - object_root: compute_tree_hash_root(&types::PropCommitSigningInfo { - data: *object_root, - module_signing_id: *id, - }), - signing_domain: domain, - }; - compute_tree_hash_root(&signing_data) - } - None => { - let signing_data = types::SigningData { - object_root: *object_root, - signing_domain: domain, - }; - compute_tree_hash_root(&signing_data) + let object_root = types::PropCommitSigningInfo { + data: *object_root, + module_signing_id: *id, + } + .tree_hash_root(); + types::SigningData { object_root, signing_domain }.tree_hash_root() } + None => types::SigningData { object_root: *object_root, signing_domain } + .tree_hash_root(), }; sk.sign_hash_sync(&signing_root).map(EcdsaSignature::from) } @@ -152,7 +145,7 @@ mod test { let domain = compute_domain(Chain::Holesky, &B32::from(COMMIT_BOOST_DOMAIN)); let signing_data = types::SigningData { object_root, signing_domain: domain }; - let msg = compute_tree_hash_root(&signing_data); + let msg = signing_data.tree_hash_root(); assert_eq!(msg, hex!("219ca7a673b2cbbf67bec6c9f60f78bd051336d57b68d1540190f30667e86725")); @@ -171,15 +164,10 @@ mod test { let signature = signer.sign(Chain::Hoodi, &object_root, Some(&module_signing_id)).await.unwrap(); - let domain = compute_domain(Chain::Hoodi, &B32::from(COMMIT_BOOST_DOMAIN)); - let signing_data = types::SigningData { - object_root: compute_tree_hash_root(&types::PropCommitSigningInfo { - data: object_root, - module_signing_id, - }), - signing_domain: domain, - }; - let msg = compute_tree_hash_root(&signing_data); + let signing_domain = compute_domain(Chain::Hoodi, &B32::from(COMMIT_BOOST_DOMAIN)); + let object_root = + types::PropCommitSigningInfo { data: object_root, module_signing_id }.tree_hash_root(); + let msg = types::SigningData { object_root, signing_domain }.tree_hash_root(); assert_eq!(msg, hex!("8cd49ccf2f9b0297796ff96ce5f7c5d26e20a59d0032ee2ad6249dcd9682b808")); diff --git a/crates/signer/src/manager/dirk.rs b/crates/signer/src/manager/dirk.rs index e1ebac11..add9e3a2 100644 --- a/crates/signer/src/manager/dirk.rs +++ b/crates/signer/src/manager/dirk.rs @@ -10,7 +10,7 @@ use cb_common::{ commit::request::{ConsensusProxyMap, ProxyDelegation, SignedProxyDelegation}, config::{DirkConfig, DirkHostConfig}, constants::COMMIT_BOOST_DOMAIN, - signature::{compute_domain, compute_tree_hash_root}, + signature::compute_domain, signer::{BlsPublicKey, BlsSignature, ProxyStore}, types::{self, Chain, ModuleId}, }; @@ -238,11 +238,9 @@ impl DirkManager { let domain = compute_domain(self.chain, &B32::from(COMMIT_BOOST_DOMAIN)); let data = match module_signing_id { - Some(id) => compute_tree_hash_root(&types::PropCommitSigningInfo { - data: *object_root, - module_signing_id: *id, - }) - .to_vec(), + Some(id) => types::PropCommitSigningInfo { data: *object_root, module_signing_id: *id } + .tree_hash_root() + .to_vec(), None => object_root.to_vec(), }; @@ -279,11 +277,9 @@ impl DirkManager { let mut requests = Vec::with_capacity(account.participants.len()); let data = match module_signing_id { - Some(id) => compute_tree_hash_root(&types::PropCommitSigningInfo { - data: *object_root, - module_signing_id: *id, - }) - .to_vec(), + Some(id) => types::PropCommitSigningInfo { data: *object_root, module_signing_id: *id } + .tree_hash_root() + .to_vec(), None => object_root.to_vec(), }; diff --git a/crates/signer/src/manager/local.rs b/crates/signer/src/manager/local.rs index 48ec757c..cc942525 100644 --- a/crates/signer/src/manager/local.rs +++ b/crates/signer/src/manager/local.rs @@ -280,7 +280,6 @@ impl LocalSigningManager { #[cfg(test)] mod tests { use alloy::primitives::B256; - use cb_common::signature::compute_tree_hash_root; use lazy_static::lazy_static; use super::*; @@ -324,14 +323,13 @@ mod tests { .unwrap(); // Verify signature - let domain = compute_domain(CHAIN, &B32::from(COMMIT_BOOST_DOMAIN)); - let signing_root = compute_tree_hash_root(&types::SigningData { - object_root: compute_tree_hash_root(&types::PropCommitSigningInfo { - data: data_root.tree_hash_root(), - module_signing_id, - }), - signing_domain: domain, - }); + let signing_domain = compute_domain(CHAIN, &B32::from(COMMIT_BOOST_DOMAIN)); + let object_root = types::PropCommitSigningInfo { + data: data_root.tree_hash_root(), + module_signing_id, + } + .tree_hash_root(); + let signing_root = types::SigningData { object_root, signing_domain }.tree_hash_root(); let validation_result = verify_bls_signature(&consensus_pk, signing_root.as_slice(), &sig); @@ -402,14 +400,13 @@ mod tests { .unwrap(); // Verify signature - let domain = compute_domain(CHAIN, &B32::from(COMMIT_BOOST_DOMAIN)); - let signing_root = compute_tree_hash_root(&types::SigningData { - object_root: compute_tree_hash_root(&types::PropCommitSigningInfo { - data: data_root.tree_hash_root(), - module_signing_id, - }), - signing_domain: domain, - }); + let signing_domain = compute_domain(CHAIN, &B32::from(COMMIT_BOOST_DOMAIN)); + let object_root = types::PropCommitSigningInfo { + data: data_root.tree_hash_root(), + module_signing_id, + } + .tree_hash_root(); + let signing_root = types::SigningData { object_root, signing_domain }.tree_hash_root(); let validation_result = verify_bls_signature(&proxy_pk, signing_root.as_slice(), &sig); @@ -482,14 +479,13 @@ mod tests { .unwrap(); // Verify signature - let domain = compute_domain(CHAIN, &B32::from(COMMIT_BOOST_DOMAIN)); - let signing_root = compute_tree_hash_root(&types::SigningData { - object_root: compute_tree_hash_root(&types::PropCommitSigningInfo { - data: data_root.tree_hash_root(), - module_signing_id, - }), - signing_domain: domain, - }); + let signing_domain = compute_domain(CHAIN, &B32::from(COMMIT_BOOST_DOMAIN)); + let object_root = types::PropCommitSigningInfo { + data: data_root.tree_hash_root(), + module_signing_id, + } + .tree_hash_root(); + let signing_root = types::SigningData { object_root, signing_domain }.tree_hash_root(); let validation_result = verify_ecdsa_signature(&proxy_pk, &signing_root, &sig); From a75605eaf9269ef71eaa8325e8d05f31f1fd7c95 Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 4 Aug 2025 16:07:28 -0400 Subject: [PATCH 66/67] Some minor cleanup --- crates/signer/src/manager/local.rs | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/crates/signer/src/manager/local.rs b/crates/signer/src/manager/local.rs index cc942525..a13695e5 100644 --- a/crates/signer/src/manager/local.rs +++ b/crates/signer/src/manager/local.rs @@ -139,10 +139,7 @@ impl LocalSigningManager { .consensus_signers .get(pubkey) .ok_or(SignerModuleError::UnknownConsensusSigner(pubkey.to_vec()))?; - let signature = match module_signing_id { - Some(id) => signer.sign(self.chain, object_root, Some(id)).await, - None => signer.sign(self.chain, object_root, None).await, - }; + let signature = signer.sign(self.chain, object_root, module_signing_id).await; Ok(signature) } @@ -158,10 +155,7 @@ impl LocalSigningManager { .bls_signers .get(pubkey) .ok_or(SignerModuleError::UnknownProxySigner(pubkey.to_vec()))?; - let signature = match module_signing_id { - Some(id) => bls_proxy.sign(self.chain, object_root, Some(id)).await, - None => bls_proxy.sign(self.chain, object_root, None).await, - }; + let signature = bls_proxy.sign(self.chain, object_root, module_signing_id).await; Ok(signature) } @@ -176,10 +170,7 @@ impl LocalSigningManager { .ecdsa_signers .get(address) .ok_or(SignerModuleError::UnknownProxySigner(address.to_vec()))?; - let signature = match module_signing_id { - Some(id) => ecdsa_proxy.sign(self.chain, object_root, Some(id)).await?, - None => ecdsa_proxy.sign(self.chain, object_root, None).await?, - }; + let signature = ecdsa_proxy.sign(self.chain, object_root, module_signing_id).await?; Ok(signature) } From d091a90827df196f40b2aa82209d9c6268e6be1b Mon Sep 17 00:00:00 2001 From: Joe Clapis Date: Mon, 4 Aug 2025 16:09:35 -0400 Subject: [PATCH 67/67] Fixed some docs --- docs/docs/developing/prop-commit-signing.md | 24 ++++++++++----------- docs/docs/get_started/configuration.md | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/docs/docs/developing/prop-commit-signing.md b/docs/docs/developing/prop-commit-signing.md index 21dd6320..fd19fafc 100644 --- a/docs/docs/developing/prop-commit-signing.md +++ b/docs/docs/developing/prop-commit-signing.md @@ -1,18 +1,18 @@ -# Requesting Proposer Commitment Signatures with Commit Boost +# Requesting Proposer Commitment Signatures with Commit-Boost When you create a new validator on the Ethereum network, one of the steps is the generation of a new BLS private key (commonly known as the "validator key" or the "signer key") and its corresponding BLS public key (the "validator pubkey", used as an identifier). Typically this private key will be used by an Ethereum consensus client to sign things such as attestations and blocks for publication on the Beacon chain. These signatures prove that you, as the owner of that private key, approve of the data being signed. However, as general-purpose private keys, they can also be used to sign *other* arbitrary messages not destined for the Beacon chain. -Commit Boost takes advantage of this by offering a standard known as **proposer commitments**. These are arbitrary messages (albeit with some important rules), similar to the kind used on the Beacon chain, that have been signed by one of the owner's private keys. Modules interested in leveraging Commit Boost's proposer commitments can construct their own data in whatever format they like and request that Commit Boost's **signer service** generate a signature for it with a particular private key. The module can then use that signature to verify the data was signed by that user. +Commit-Boost takes advantage of this by offering a standard known as **proposer commitments**. These are arbitrary messages (albeit with some important rules), similar to the kind used on the Beacon chain, that have been signed by one of the owner's private keys. Modules interested in leveraging Commit-Boost's proposer commitments can construct their own data in whatever format they like and request that Commit-Boost's **signer service** generate a signature for it with a particular private key. The module can then use that signature to verify the data was signed by that user. -Commit Boost supports proposer commitment signatures for both BLS private keys (identified by their public key) and ECDSA private keys (identified by their Ethereum address). +Commit-Boost supports proposer commitment signatures for both BLS private keys (identified by their public key) and ECDSA private keys (identified by their Ethereum address). ## Rules of Proposer Commitment Signatures -Proposer commitment signatures produced by Commit Boost's signer service conform to the following rules: +Proposer commitment signatures produced by Commit-Boost's signer service conform to the following rules: - Signatures are **unique** to a given EVM chain (identified by its [chain ID](https://chainlist.org/)). Signatures generated for one chain will not work on a different chain. -- Signatures are **unique** to Commit Boost proposer commitments. The signer service **cannot** be used to create signatures that could be used for other applications, such as for attestations on the Beacon chain. While the signer service has access to the same validator private keys used to attest on the Beacon chain, it cannot create signatures that would get you slashed on the Beacon chain. +- Signatures are **unique** to Commit-Boost proposer commitments. The signer service **cannot** be used to create signatures that could be used for other applications, such as for attestations on the Beacon chain. While the signer service has access to the same validator private keys used to attest on the Beacon chain, it cannot create signatures that would get you slashed on the Beacon chain. - Signatures are **unique** to a particular module. One module cannot, for example, request an identical payload as another module and effectively "forge" a signature for the second module; identical payloads from two separate modules will result in two separate signatures. - The data payload being signed must be a **32-byte array**, typically serializd as a 64-character hex string with an optional `0x` prefix. The value itself is arbitrary, as long as it has meaning to the requester - though it is typically the 256-bit hash of some kind of data. - If requesting a signature from a BLS key, the resulting signature will be a standard BLS signature (96 bytes in length). @@ -21,20 +21,20 @@ Proposer commitment signatures produced by Commit Boost's signer service conform ## Configuring a Module for Proposer Commitments -Commit Boost's signer service must be configured prior to launching to expect requests from your module. There are two main parts: +Commit-Boost's signer service must be configured prior to launching to expect requests from your module. There are two main parts: -1. An entry for your module into [Commit Boost's configuration file](../get_started/configuration.md#custom-module). This must include a unique ID for your module, the line `type = "commit"`, and include a unique [signing ID](#the-signing-id) for your module. Generally you should provide values for these in your documentation, so your users can reference it when configuring their own Commit Boost node. +1. An entry for your module into [Commit-Boost's configuration file](../get_started/configuration.md#custom-module). This must include a unique ID for your module, the line `type = "commit"`, and include a unique [signing ID](#the-signing-id) for your module. Generally you should provide values for these in your documentation, so your users can reference it when configuring their own Commit-Boost node. -2. A JWT secret used by your module to authenticate with the signer in HTTP requests. This must be a string that both the Commit Boost signer can read and your module can read, but no other modules should be allowed to access it. The user should be responsible for determining an appropriate secret and providing it to the Commit Boost signer service securely; your module will need some way to accept this, typically via a command line argument that accepts a path to a file with the secret or as an environment variable. +2. A JWT secret used by your module to authenticate with the signer in HTTP requests. This must be a string that both the Commit-Boost signer can read and your module can read, but no other modules should be allowed to access it. The user should be responsible for determining an appropriate secret and providing it to the Commit-Boost signer service securely; your module will need some way to accept this, typically via a command line argument that accepts a path to a file with the secret or as an environment variable. -Once the user has configured both Commit Boost and your module with these settings, your module will be able to authenticate with the signer service and request signatures. +Once the user has configured both Commit-Boost and your module with these settings, your module will be able to authenticate with the signer service and request signatures. ## The Signing ID -Your module's signing ID is a 32-byte value that is used as a unique identifier within the signing process. Proposer commitment signatures incorporate this value along with the data being signed as a way to create signatures that are exclusive to your module, so other modules can't maliciously construct signatures that appear to be from your module. Your module must have this ID incorporated into itself ahead of time, and the user must include this same ID within their Commit Boost configuration file section for your module. Commit Boost does not maintain a global registry of signing IDs, so this is a value you should provide to your users in your documentation. +Your module's signing ID is a 32-byte value that is used as a unique identifier within the signing process. Proposer commitment signatures incorporate this value along with the data being signed as a way to create signatures that are exclusive to your module, so other modules can't maliciously construct signatures that appear to be from your module. Your module must have this ID incorporated into itself ahead of time, and the user must include this same ID within their Commit-Boost configuration file section for your module. Commit-Boost does not maintain a global registry of signing IDs, so this is a value you should provide to your users in your documentation. -The Signing ID is decoupled from your module's human-readable name (the `module_id` field in the Commit Boost configuration file) so that any changes to your module name will not invalidate signatures from previous versions. Similarly, if you don't change the module ID but *want* to invalidate previous signatures, you can modify the signing ID and it will do so. Just ensure your users are made aware of the change, so they can update it in their Commit Boost configuration files accordingly. +The Signing ID is decoupled from your module's human-readable name (the `module_id` field in the Commit-Boost configuration file) so that any changes to your module name will not invalidate signatures from previous versions. Similarly, if you don't change the module ID but *want* to invalidate previous signatures, you can modify the signing ID and it will do so. Just ensure your users are made aware of the change, so they can update it in their Commit-Boost configuration files accordingly. ## Structure of a Signature @@ -53,7 +53,7 @@ where: - `Signing ID` is your module's 32-byte signing ID. The signer service will load this for your module from its configuration file. -- `Domain` is the 32-byte output of the [compute_domain()](https://eth2book.info/capella/part2/building_blocks/signatures/#domain-separation-and-forks) function in the Beacon specification. The 4-byte domain type in this case is not a standard Beacon domain type, but rather Commit Boost's own domain type: `0x6D6D6F43`. +- `Domain` is the 32-byte output of the [compute_domain()](https://eth2book.info/capella/part2/building_blocks/signatures/#domain-separation-and-forks) function in the Beacon specification. The 4-byte domain type in this case is not a standard Beacon domain type, but rather Commit-Boost's own domain type: `0x6D6D6F43`. The data signed in a proposer commitment is the 32-byte root of this tree (the green `Root` box). Note that calculating this will involve calculating the Merkle Root of two separate trees: first the blue data subtree (with the original request data and the signing ID) to establish the blue `Root` value, and then again with a tree created from that value and the `Domain`. diff --git a/docs/docs/get_started/configuration.md b/docs/docs/get_started/configuration.md index 0c25e54b..ed2ffa6e 100644 --- a/docs/docs/get_started/configuration.md +++ b/docs/docs/get_started/configuration.md @@ -387,7 +387,7 @@ docker_image = "test_builder_log" A few things to note: - We now added a `signer` section which will be used to create the Signer module. -- There is now a `[[modules]]` section which at a minimum needs to specify the module `id`, `type` and `docker_image`. For modules with type `commit`, which will be used to access the Signer service and request signatures for preconfs, you will also need to specify the module's unique `signing_id` (see ). Additional parameters needed for the business logic of the module will also be here. +- There is now a `[[modules]]` section which at a minimum needs to specify the module `id`, `type` and `docker_image`. For modules with type `commit`, which will be used to access the Signer service and request signatures for preconfs, you will also need to specify the module's unique `signing_id` (see [the propser commitment documentation](../developing/prop-commit-signing.md)). Additional parameters needed for the business logic of the module will also be here. To learn more about developing modules, check out [here](/category/developing).