1
0
mirror of https://github.com/actix/examples synced 2024-11-28 00:12:57 +01:00
examples/basics/state/src/main.rs

88 lines
3.0 KiB
Rust
Raw Normal View History

2019-03-17 04:23:09 +01:00
//! Application may have multiple data objects that are shared across
//! all handlers within same Application.
2018-05-02 15:20:43 +02:00
//!
2022-01-29 15:23:32 +01:00
//! For global shared state, we wrap our state in a [`actix_web::web::Data`] and move it into
//! the factory closure. The closure is called once-per-thread, and we clone our state
2022-01-29 15:23:32 +01:00
//! and attach to each instance of the [`App`] with `.app_data(state.clone())`.
//!
//! For thread-local state, we construct our state within the factory closure and attach to
2022-01-29 15:23:32 +01:00
//! the app with `.app_data(Data::new(state))`.
//!
//! We retrieve our app state within our handlers with a `state: Data<...>` argument.
//!
2022-02-06 09:13:24 +01:00
//! By default, Actix Web runs one [`App`] per logical cpu core.
2022-01-29 15:23:32 +01:00
//! When running on `<N>` cores, we see that the example will increment `counter_mutex` (global state via
//! Mutex) and `counter_atomic` (global state via Atomic variable) each time the endpoint is called,
//! but only appear to increment `counter_cell` every Nth time on average (thread-local state). This
//! is because the workload is being shared equally among cores.
2018-05-02 15:20:43 +02:00
//!
2019-11-14 21:16:48 +01:00
//! Check [user guide](https://actix.rs/docs/application/#state) for more info.
2022-08-28 19:39:28 +02:00
use std::{
cell::Cell,
io,
sync::{
atomic::{AtomicUsize, Ordering},
Mutex,
},
};
2022-01-29 15:23:32 +01:00
use actix_web::{
middleware,
web::{self, Data},
App, HttpRequest, HttpResponse, HttpServer,
};
/// simple handle
async fn index(
2022-01-29 15:23:32 +01:00
counter_mutex: Data<Mutex<usize>>,
counter_cell: Data<Cell<u32>>,
counter_atomic: Data<AtomicUsize>,
req: HttpRequest,
) -> HttpResponse {
println!("{req:?}");
// Increment the counters
2022-01-29 15:23:32 +01:00
*counter_mutex.lock().unwrap() += 1;
counter_cell.set(counter_cell.get() + 1);
counter_atomic.fetch_add(1, Ordering::SeqCst);
let body = format!(
"global mutex counter: {}, local counter: {}, global atomic counter: {}",
2022-01-29 15:23:32 +01:00
*counter_mutex.lock().unwrap(),
counter_cell.get(),
counter_atomic.load(Ordering::SeqCst),
);
HttpResponse::Ok().body(body)
}
2020-09-12 17:49:45 +02:00
#[actix_web::main]
2019-12-07 18:59:24 +01:00
async fn main() -> io::Result<()> {
2023-03-14 04:11:49 +01:00
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
// Create some global state prior to building the server
2020-04-03 09:14:30 +02:00
#[allow(clippy::mutex_atomic)] // it's intentional.
2022-01-29 15:23:32 +01:00
let counter_mutex = Data::new(Mutex::new(0usize));
let counter_atomic = Data::new(AtomicUsize::new(0usize));
2019-03-07 23:50:29 +01:00
2023-03-14 04:11:49 +01:00
log::info!("starting HTTP server at http://localhost:8080");
// move is necessary to give closure below ownership of counter1
2019-03-07 23:50:29 +01:00
HttpServer::new(move || {
// Create some thread-local state
2022-01-29 15:23:32 +01:00
let counter_cell = Cell::new(0u32);
2019-03-07 23:50:29 +01:00
App::new()
2022-01-29 15:23:32 +01:00
.app_data(counter_mutex.clone()) // add shared state
.app_data(counter_atomic.clone()) // add shared state
.app_data(Data::new(counter_cell)) // add thread-local state
// enable logger
2019-03-26 04:29:00 +01:00
.wrap(middleware::Logger::default())
// register simple handler
2019-03-07 23:50:29 +01:00
.service(web::resource("/").to(index))
})
2022-02-17 21:22:36 +01:00
.bind(("127.0.0.1", 8080))?
2019-12-25 17:48:33 +01:00
.run()
2019-12-07 18:59:24 +01:00
.await
}