Skip to content
This repository has been archived by the owner on Sep 13, 2023. It is now read-only.

Commit

Permalink
refactor: from slog+hyper to tracing+axum (#45)
Browse files Browse the repository at this point in the history
This is a massive refactor which will enable us to collect better logs.

- Moves code out of `main.rs`
  - agent proxy code is now in `proxy/agent.rs`
  - forwarding proxy code is now in `proxy/forward.rs`
  - temporary `src/http_transport.rs` pending on dfinity/agent-rs#373
- Adds a new log format switch with `json` support. The exact format may need some fine tuning
- Completely gets rid of `reqwest`
  • Loading branch information
Daniel-Bloom-dfinity authored Aug 10, 2022
1 parent c619cc4 commit 7624053
Show file tree
Hide file tree
Showing 14 changed files with 1,786 additions and 1,309 deletions.
411 changes: 125 additions & 286 deletions Cargo.lock

Large diffs are not rendered by default.

36 changes: 18 additions & 18 deletions Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "icx-proxy"
version = "0.9.0"
version = "0.10.0"
authors = ["DFINITY Stiftung <sdk@dfinity.org>"]
edition = "2018"
description = "CLI tool to create an HTTP proxy to the Internet Computer."
Expand All @@ -18,36 +18,36 @@ path = "src/main.rs"

[dependencies]
anyhow = "1"
axum = "0.5.3"
axum = "0.5"
base64 = "0.13"
candid = { version = "0.7", features = ["mute_warnings"] }
clap = { version = "3", features = ["cargo", "derive"] }
flate2 = "1.0.0"
futures = "0.3.21"
flate2 = "1"
form_urlencoded = "1"
futures = "0.3"
garcon = { version = "0.2", features = ["async"] }
hex = "0.4"
http-body = "0.4.5"
hyper = { version = "0.14", features = ["full"] }
hyper-rustls = { version = "0.23", features = [ "webpki-roots" ] }
hyper-tls = "0.5"
ic-agent = "0.20"
http-body = "0.4"
hyper = { version = "0.14.11", features = ["client", "http2", "http1"] }
hyper-rustls = { version = "0.23", features = [ "webpki-roots", "http2" ] }
itertools = "0.10"
ic-agent = { version = "0.20", default-features = false }
ic-utils = { version = "0.20", features = ["raw"] }
lazy-regex = "2"
opentelemetry = "0.17.0"
opentelemetry-prometheus = "0.10.0"
prometheus = "0.13.0"
reqwest = { version = "0.11", features = ["rustls-tls-webpki-roots"] }
rustls = "0.20"
opentelemetry = "0.17"
opentelemetry-prometheus = "0.10"
prometheus = "0.13"
rustls = { version = "0.20", features = ["dangerous_configuration"] }
rustls-pemfile = "1"
tower = "0.4"
tower-http = { version = "0.3", features = ["trace"] }
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["json"]}
serde = "1"
serde_cbor = "0.11"
serde_json = "1"
sha2 = "0.10"
slog = { version = "2", features = ["max_level_trace"] }
slog-async = "2"
slog-term = "2"
tokio = { version = "1", features = ["full"] }
url = "2"
webpki-roots = "0.22"

[features]
Expand Down
72 changes: 55 additions & 17 deletions src/canister_id.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,29 @@
use hyper::{header::HOST, Request, Uri};
use crate::http_transport::hyper::{header::HOST, http::request::Parts, Uri};
use anyhow::Context;
use clap::Args;
use ic_agent::export::Principal;
use tracing::error;

use crate::config::dns_canister_config::DnsCanisterConfig;

/// The options for the canister resolver
#[derive(Args)]
pub struct Opts {
/// A map of domain names to canister IDs.
/// Format: domain.name:canister-id
#[clap(long)]
dns_alias: Vec<String>,

/// A list of domain name suffixes. If found, the next (to the left) subdomain
/// is used as the Principal, if it parses as a Principal.
#[clap(long, default_value = "localhost")]
dns_suffix: Vec<String>,

/// Whether or not to ignore `canisterId=` when locating the canister.
#[clap(long)]
ignore_url_canister_param: bool,
}

/// A resolver for `Principal`s from a `Uri`.
trait UriResolver: Sync + Send {
fn resolve(&self, uri: &Uri) -> Option<Principal>;
Expand All @@ -13,12 +34,11 @@ impl<T: UriResolver> UriResolver for &T {
T::resolve(self, uri)
}
}

struct UriParameterResolver;

impl UriResolver for UriParameterResolver {
fn resolve(&self, uri: &Uri) -> Option<Principal> {
url::form_urlencoded::parse(uri.query()?.as_bytes())
form_urlencoded::parse(uri.query()?.as_bytes())
.find(|(name, _)| name == "canisterId")
.and_then(|(_, canister_id)| Principal::from_text(canister_id.as_ref()).ok())
}
Expand All @@ -31,31 +51,31 @@ impl UriResolver for DnsCanisterConfig {
}

/// A resolver for `Principal`s from a `Request`.
pub trait Resolver<B>: Sync + Send {
fn resolve(&self, request: &Request<B>) -> Option<Principal>;
pub trait Resolver: Sync + Send {
fn resolve(&self, request: &Parts) -> Option<Principal>;
}

impl<B, T: Resolver<B>> Resolver<B> for &T {
fn resolve(&self, request: &Request<B>) -> Option<Principal> {
impl<T: Resolver> Resolver for &T {
fn resolve(&self, request: &Parts) -> Option<Principal> {
T::resolve(self, request)
}
}

struct RequestUriResolver<T>(pub T);

impl<B, T: UriResolver> Resolver<B> for RequestUriResolver<T> {
fn resolve(&self, request: &Request<B>) -> Option<Principal> {
self.0.resolve(request.uri())
impl<T: UriResolver> Resolver for RequestUriResolver<T> {
fn resolve(&self, request: &Parts) -> Option<Principal> {
self.0.resolve(&request.uri)
}
}

struct RequestHostResolver<T>(pub T);

impl<B, T: UriResolver> Resolver<B> for RequestHostResolver<T> {
fn resolve(&self, request: &Request<B>) -> Option<Principal> {
impl<T: UriResolver> Resolver for RequestHostResolver<T> {
fn resolve(&self, request: &Parts) -> Option<Principal> {
self.0.resolve(
&Uri::builder()
.authority(request.headers().get(HOST)?.as_bytes())
.authority(request.headers.get(HOST)?.as_bytes())
.build()
.ok()?,
)
Expand All @@ -68,8 +88,8 @@ pub struct DefaultResolver {
pub check_params: bool,
}

impl<B> Resolver<B> for DefaultResolver {
fn resolve(&self, request: &Request<B>) -> Option<Principal> {
impl Resolver for DefaultResolver {
fn resolve(&self, request: &Parts) -> Option<Principal> {
if let Some(v) = RequestHostResolver(&self.dns).resolve(request) {
return Some(v);
}
Expand All @@ -85,13 +105,29 @@ impl<B> Resolver<B> for DefaultResolver {
}
}

pub fn setup(opts: Opts) -> Result<DefaultResolver, anyhow::Error> {
let dns = DnsCanisterConfig::new(&opts.dns_alias, &opts.dns_suffix)
.context("Failed to configure canister resolver DNS");
let dns = match dns {
Err(e) => {
error!("{e}");
Err(e)
}
Ok(v) => Ok(v),
}?;
Ok(DefaultResolver {
dns,
check_params: !opts.ignore_url_canister_param,
})
}

#[cfg(test)]
mod tests {
use hyper::{header::HOST, Request};
use ic_agent::export::Principal;

use super::{DefaultResolver, Resolver};
use crate::config::dns_canister_config::DnsCanisterConfig;
use crate::http_transport::hyper::{header::HOST, http::request::Parts, Request};

#[test]
fn simple_resolve() {
Expand Down Expand Up @@ -229,7 +265,7 @@ mod tests {
DnsCanisterConfig::new(&aliases, &suffixes).unwrap()
}

fn build_req(host: Option<&str>, uri: &str) -> Request<()> {
fn build_req(host: Option<&str>, uri: &str) -> Parts {
let req = Request::builder().uri(uri);
if let Some(host) = host {
req.header(HOST, host)
Expand All @@ -238,6 +274,8 @@ mod tests {
}
.body(())
.unwrap()
.into_parts()
.0
}

fn principal(v: &str) -> Principal {
Expand Down
6 changes: 4 additions & 2 deletions src/config/dns_canister_config.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
use crate::config::dns_canister_rule::DnsCanisterRule;
use ic_agent::ic_types::Principal;
use std::cmp::Reverse;

use ic_agent::ic_types::Principal;

use crate::config::dns_canister_rule::DnsCanisterRule;

/// Configuration for determination of Domain Name to Principal
#[derive(Clone, Debug)]
pub struct DnsCanisterConfig {
Expand Down
41 changes: 14 additions & 27 deletions src/headers.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use ic_utils::interfaces::http_request::HeaderField;
use lazy_regex::regex_captures;
use tracing::{trace, warn};

const MAX_LOG_CERT_NAME_SIZE: usize = 100;
const MAX_LOG_CERT_B64_SIZE: usize = 2000;
Expand All @@ -11,7 +12,7 @@ pub struct HeadersData {
pub encoding: Option<String>,
}

pub fn extract_headers_data(headers: &[HeaderField], logger: &slog::Logger) -> HeadersData {
pub fn extract_headers_data(headers: &[HeaderField]) -> HeadersData {
let mut headers_data = HeadersData {
certificate: None,
tree: None,
Expand All @@ -22,50 +23,43 @@ pub fn extract_headers_data(headers: &[HeaderField], logger: &slog::Logger) -> H
if name.eq_ignore_ascii_case("Ic-Certificate") {
for field in value.split(',') {
if let Some((_, name, b64_value)) = regex_captures!("^(.*)=:(.*):$", field.trim()) {
slog::trace!(
logger,
trace!(
">> certificate {:.l1$}: {:.l2$}",
name,
b64_value,
l1 = MAX_LOG_CERT_NAME_SIZE,
l2 = MAX_LOG_CERT_B64_SIZE
);
let bytes = decode_hash_tree(name, Some(b64_value.to_string()), logger);
let bytes = decode_hash_tree(name, Some(b64_value.to_string()));
if name == "certificate" {
headers_data.certificate = Some(match (headers_data.certificate, bytes) {
(None, bytes) => bytes,
(Some(Ok(certificate)), Ok(bytes)) => {
slog::warn!(logger, "duplicate certificate field: {:?}", bytes);
warn!("duplicate certificate field: {:?}", bytes);
Ok(certificate)
}
(Some(Ok(certificate)), Err(_)) => {
slog::warn!(
logger,
"duplicate certificate field (failed to decode)"
);
warn!("duplicate certificate field (failed to decode)");
Ok(certificate)
}
(Some(Err(_)), bytes) => {
slog::warn!(
logger,
"duplicate certificate field (failed to decode)"
);
warn!("duplicate certificate field (failed to decode)");
bytes
}
});
} else if name == "tree" {
headers_data.tree = Some(match (headers_data.tree, bytes) {
(None, bytes) => bytes,
(Some(Ok(tree)), Ok(bytes)) => {
slog::warn!(logger, "duplicate tree field: {:?}", bytes);
warn!("duplicate tree field: {:?}", bytes);
Ok(tree)
}
(Some(Ok(tree)), Err(_)) => {
slog::warn!(logger, "duplicate tree field (failed to decode)");
warn!("duplicate tree field (failed to decode)");
Ok(tree)
}
(Some(Err(_)), bytes) => {
slog::warn!(logger, "duplicate tree field (failed to decode)");
warn!("duplicate tree field (failed to decode)");
bytes
}
});
Expand All @@ -81,14 +75,10 @@ pub fn extract_headers_data(headers: &[HeaderField], logger: &slog::Logger) -> H
headers_data
}

fn decode_hash_tree(
name: &str,
value: Option<String>,
logger: &slog::Logger,
) -> Result<Vec<u8>, ()> {
fn decode_hash_tree(name: &str, value: Option<String>) -> Result<Vec<u8>, ()> {
match value {
Some(tree) => base64::decode(tree).map_err(|e| {
slog::warn!(logger, "Unable to decode {} from base64: {}", name, e);
warn!("Unable to decode {} from base64: {}", name, e);
}),
_ => Err(()),
}
Expand All @@ -97,16 +87,14 @@ fn decode_hash_tree(
#[cfg(test)]
mod tests {
use ic_utils::interfaces::http_request::HeaderField;
use slog::o;

use super::{extract_headers_data, HeadersData};

#[test]
fn extract_headers_data_simple() {
let logger = slog::Logger::root(slog::Discard, o!());
let headers: Vec<HeaderField> = vec![];

let out = extract_headers_data(&headers, &logger);
let out = extract_headers_data(&headers);

assert_eq!(
out,
Expand All @@ -120,10 +108,9 @@ mod tests {

#[test]
fn extract_headers_data_content_encoding() {
let logger = slog::Logger::root(slog::Discard, o!());
let headers: Vec<HeaderField> = vec![HeaderField("Content-Encoding".into(), "test".into())];

let out = extract_headers_data(&headers, &logger);
let out = extract_headers_data(&headers);

assert_eq!(
out,
Expand Down
Loading

0 comments on commit 7624053

Please sign in to comment.