1
0
mirror of https://github.com/actix/examples synced 2025-06-26 17:17:42 +02:00

feat: add mainmatter telemetry workshop example

This commit is contained in:
Rob Ede
2024-06-05 04:52:05 +01:00
parent 0614b2c506
commit 8e0b57e658
16 changed files with 793 additions and 87 deletions

View File

@ -0,0 +1,2 @@
RUST_LOG="info,mainmatter_workshop=trace"
HONEYCOMB_API_KEY="..."

View File

@ -0,0 +1 @@
/target

View File

@ -0,0 +1,22 @@
[package]
name = "tracing-mainmatter-workshop"
version = "1.0.0"
publish.workspace = true
edition.workspace = true
rust-version.workspace = true
[dependencies]
actix-web.workspace = true
actix-web-lab.workspace = true
dotenvy.workspace = true
metrics = "0.23"
metrics-exporter-prometheus = { version = "0.15", default-features = false }
opentelemetry = "0.22"
opentelemetry_sdk = { version = "0.22", features = ["rt-tokio-current-thread"] }
opentelemetry-otlp = { version = "0.15", features = ["tls-roots"] }
tonic = "0.11"
tracing-actix-web = { version = "0.7", features = ["opentelemetry_0_22", "uuid_v7"] }
tracing-opentelemetry = "0.23"
tracing-subscriber.workspace = true
tracing-bunyan-formatter = "0.3"
tracing.workspace = true

View File

@ -0,0 +1,23 @@
# Telemetry Workshop Solution
## Overview
A solution to the capstone project at the end of [Mainmatter's telemetry workshop](https://github.com/mainmatter/rust-telemetry-workshop).
As stated in the exercise brief, this example will:
- Configure a `tracing` subscriber that exports data to both Honeycomb and stdout, in JSON format;
- Configure a suitable panic hook;
- Configure a `metric` recorder that exposes metric data at `/metrics`~~, using a different port than your API endpoints~~ (this example shows how to use the existing HTTP server);
- Add one or more middleware that:
- Create a top-level INFO span for each incoming request;
- Track the number of concurrent requests using a gauge;
- Track request duration using a histogram;
- Track the number of handled requests All metrics should include success/failure as a label.
## Usage
```console
$ cd tracing/mainmatter-workshop
$ cargo run
```

View File

@ -0,0 +1,57 @@
use std::{io, time::Duration};
use opentelemetry::KeyValue;
use opentelemetry_otlp::WithExportConfig as _;
use opentelemetry_sdk::{runtime, trace::Tracer, Resource};
use tonic::metadata::MetadataMap;
use tracing::level_filters::LevelFilter;
use tracing_bunyan_formatter::{BunyanFormattingLayer, JsonStorageLayer};
use tracing_subscriber::{layer::SubscriberExt as _, util::SubscriberInitExt as _, EnvFilter};
pub(crate) fn init() {
let app_name = "actix-web-mainmatter-telemetry-workshop-capstone";
let tracer = opentelemetry_tracer(app_name);
let telemetry = tracing_opentelemetry::layer().with_tracer(tracer);
// we prefer the bunyan formatting layer in this example because it captures
// span enters and exits by default, making a good way to observe request
// info like duration when
let stdout_log = BunyanFormattingLayer::new(app_name.to_owned(), io::stdout);
tracing_subscriber::registry()
.with(
EnvFilter::builder()
.with_default_directive(LevelFilter::INFO.into())
.from_env_lossy(),
)
.with(telemetry)
.with(JsonStorageLayer)
.with(stdout_log)
.init();
}
fn opentelemetry_tracer(app_name: &str) -> Tracer {
let honeycomb_key =
std::env::var("HONEYCOMB_API_KEY").expect("`HONEYCOMB_API_KEY` should be set in your .env");
let mut metadata = MetadataMap::with_capacity(1);
metadata.insert("x-honeycomb-team", honeycomb_key.try_into().unwrap());
let trace_config =
opentelemetry_sdk::trace::Config::default().with_resource(Resource::new(vec![
KeyValue::new("service.name", app_name.to_owned()),
]));
let exporter = opentelemetry_otlp::new_exporter()
.tonic()
.with_endpoint("https://api.honeycomb.io/api/traces")
.with_timeout(Duration::from_secs(5))
.with_metadata(metadata);
opentelemetry_otlp::new_pipeline()
.tracing()
.with_trace_config(trace_config)
.with_exporter(exporter)
.install_batch(runtime::TokioCurrentThread)
.unwrap()
}

View File

@ -0,0 +1,32 @@
use std::io;
use actix_web::{App, HttpServer};
use actix_web_lab::{extract::ThinData, middleware::from_fn};
use tracing_actix_web::TracingLogger;
mod logging;
mod metric_names;
mod middleware;
mod prometheus;
mod routes;
#[actix_web::main]
async fn main() -> io::Result<()> {
dotenvy::dotenv().ok();
logging::init();
let handle = prometheus::init();
HttpServer::new(move || {
App::new()
.app_data(ThinData(handle.clone()))
.service(routes::hello)
.service(routes::sleep)
.service(routes::metrics)
.wrap(from_fn(middleware::request_telemetry))
.wrap(TracingLogger::default())
})
.workers(2)
.bind(("127.0.0.1", 8080))?
.run()
.await
}

View File

@ -0,0 +1,2 @@
pub(crate) const HISTOGRAM_HTTP_REQUEST_DURATION: &str = "http_request_duration";
pub(crate) const GAUGE_HTTP_CONCURRENT_REQUESTS: &str = "http_concurrent_requests";

View File

@ -0,0 +1,40 @@
use std::time::Instant;
use actix_web::HttpMessage as _;
use actix_web::{
body::MessageBody,
dev::{ServiceRequest, ServiceResponse},
http::header::{HeaderName, HeaderValue},
};
use actix_web_lab::middleware::Next;
use tracing_actix_web::RequestId;
use crate::metric_names::*;
pub(crate) async fn request_telemetry(
req: ServiceRequest,
next: Next<impl MessageBody>,
) -> actix_web::Result<ServiceResponse<impl MessageBody>> {
let now = Instant::now();
metrics::gauge!(GAUGE_HTTP_CONCURRENT_REQUESTS).increment(1);
let mut res = next.call(req).await?;
let req_id = res.request().extensions().get::<RequestId>().copied();
if let Some(req_id) = req_id {
res.headers_mut().insert(
HeaderName::from_static("request-id"),
// this unwrap never fails, since UUIDs are valid ASCII strings
HeaderValue::from_str(&req_id.to_string()).unwrap(),
);
};
let diff = now.elapsed();
metrics::histogram!(HISTOGRAM_HTTP_REQUEST_DURATION).record(diff);
metrics::gauge!(GAUGE_HTTP_CONCURRENT_REQUESTS).decrement(1);
Ok(res)
}

View File

@ -0,0 +1,28 @@
use std::array;
use metrics::Unit;
use metrics_exporter_prometheus::{PrometheusBuilder, PrometheusHandle};
use crate::metric_names::*;
pub(crate) fn init() -> PrometheusHandle {
metrics::describe_histogram!(
HISTOGRAM_HTTP_REQUEST_DURATION,
Unit::Seconds,
"Duration (in seconds) a request took to be processed"
);
PrometheusBuilder::new()
.set_buckets_for_metric(
metrics_exporter_prometheus::Matcher::Full(HISTOGRAM_HTTP_REQUEST_DURATION.to_owned()),
&exp_buckets::<28>(0.001), // values from ~0.3ms -> ~33s
)
.unwrap()
.install_recorder()
.unwrap()
}
fn exp_buckets<const N: usize>(base: f64) -> [f64; N] {
const RATIO: f64 = 1.5;
array::from_fn(|i| base * RATIO.powi(i as i32 - 3)).map(|val| (val * 1_e7).round() / 1_e7)
}

View File

@ -0,0 +1,21 @@
use std::time::Duration;
use actix_web::{get, HttpResponse, Responder};
use actix_web_lab::extract::ThinData;
use metrics_exporter_prometheus::PrometheusHandle;
#[get("/hello")]
pub(crate) async fn hello() -> impl Responder {
"Hello, World!"
}
#[get("/sleep")]
pub(crate) async fn sleep() -> impl Responder {
actix_web::rt::time::sleep(Duration::from_millis(500)).await;
HttpResponse::Ok()
}
#[get("/metrics")]
pub(crate) async fn metrics(metrics_handle: ThinData<PrometheusHandle>) -> impl Responder {
metrics_handle.render()
}