1
0
mirror of https://github.com/actix/examples synced 2024-11-27 16:02:57 +01:00

fixes according to reviews

Fix typos. Format the comments. Use macro for routes. etc
This commit is contained in:
fakeshadow 2020-06-28 06:15:34 +08:00
parent 1e61a03d5f
commit c7660c8f86
4 changed files with 61 additions and 47 deletions

View File

@ -1,10 +1,10 @@
[workspace]
members = [
"async_data_factory",
"async_db",
"async_ex1",
"async_ex2",
"async_pg",
"async_data_factory",
"awc_https",
"basics",
"casbin",

View File

@ -11,5 +11,5 @@ actix-rt = "1.1.1"
actix-web = { version = "2.0.0" }
num_cpus = "1.13.0"
redis = { version = "0.16.0", default-features = false, features = ["tokio-rt-core"] }
# tang-rs is an object pool for test purpose
redis_tang = { git = "https://github.com/fakeshadow/tang_rs", branch = "atomic" }
# redis_tang is an redis pool for test purpose
redis_tang = "0.1.0"

View File

@ -5,12 +5,12 @@ This is an example on constructing async state with `App::data_factory`
`data_factory` would make sense in these situations:
- When async state not necessarily have to be shared between workers/threads.
- When async state would spawn tasks on `actix-rt`. If we centralized the state there could be a possibilitythe tasks get a very unbalanced distribution on the workers/threads
- When async state would spawn tasks on `actix-rt`. If we centralized the state there could be a possibility the tasks get a very unbalanced distribution on the workers/threads
(`actix-rt` would spawn tasks on local thread whenever it's called)
## Requirement:
- `rustc 1.43 stable`
- `redis` server listen on `127.0.0.1:6379`(or make change to const var `REDIS_URL` in `main.rs`)
- `redis` server listen on `127.0.0.1:6379`(or use `REDIS_URL` env argument when starting the example)
## Endpoints:
- use a work load generator(e.g wrk) to benchmark the end points:

View File

@ -1,32 +1,40 @@
use actix_web::web::Data;
use actix_web::{web, App, HttpServer};
use actix_web::{get, App, HttpServer};
use redis_tang::{Builder, Pool, RedisManager};
// change according to your redis setting.
const REDIS_URL: &str = "redis://127.0.0.1";
#[actix_rt::main]
async fn main() -> std::io::Result<()> {
let redis_url =
std::env::var("REDIS_URL").unwrap_or_else(|_| String::from("redis://127.0.0.1"));
let num_cpus = num_cpus::get();
// a shared redis pool for work load comparision.
let pool = pool_builder(num_cpus).await.expect("fail to build pool");
let pool = Wrapper(pool);
// a shared redis pool for work load comparison.
let pool = pool_builder(num_cpus, redis_url.as_str())
.await
.expect("fail to build pool");
let pool = RedisWrapper(pool);
HttpServer::new(move || {
let redis_url = redis_url.clone();
App::new()
.data(pool.clone())
// a dummy data_factory implementation
.data_factory(|| {
/*
App::data_factory would accept a future as return type and poll the future when App is initialized.
App::data_factory would accept a future as return type and poll the future when
App is initialized.
The Output of the future must be Result<T, E> and T will be the transformed to App::Data<T> that can be extracted from handler/request.
The Output of the future must be Result<T, E> and T will be the transformed to
App::Data<T> that can be extracted from handler/request.
(The E will only be used to trigger a log::error.)
This data is bound to worker thread and you get an instance of it for every worker of the HttpServer.(hence the name data_factory)
*. It is NOT shared between workers(unless the underlying data is a smart pointer like Arc<T>).
This data is bound to worker thread and you get an instance of it for every
worker of the HttpServer.(hence the name data_factory)
*. It is NOT shared between workers
(unless the underlying data is a smart pointer like Arc<T>).
*/
async {
@ -34,52 +42,47 @@ async fn main() -> std::io::Result<()> {
Ok::<usize, ()>(123)
}
})
// a data_factory redis pool for work load comparision.
.data_factory(|| pool_builder(1))
.service(web::resource("/pool").route(web::get().to(pool_shared_prebuilt)))
.service(web::resource("/pool2").route(web::get().to(pool_local)))
// a data_factory redis pool for work load comparison.
.data_factory(move || pool_builder(1, redis_url.clone()))
.service(pool_shared_prebuilt)
.service(pool_local)
})
.bind("127.0.0.1:8080")?
.workers(num_cpus)
.run()
.await
}
// this pool is shared between workers.
// we have all redis connections spawned tasks on main thread therefore it puts too much pressure on one thread.
// *. This is the case for redis::aio::MultiplexedConnection and it may not apply to other async redis connection type.
async fn pool_shared_prebuilt(pool: Data<Wrapper>) -> &'static str {
let mut client = pool.0.get().await.unwrap().clone();
redis::cmd("PING")
.query_async::<_, ()>(&mut client)
.await
.unwrap();
"Done"
/*
This pool is shared between workers. We have all redis connections spawned tasks on main thread
therefore it puts too much pressure on one thread.
*. This is the case for redis::aio::MultiplexedConnection and it may not apply to other async
redis connection type.
*/
#[get("/pool")]
async fn pool_shared_prebuilt(pool: Data<RedisWrapper>) -> &'static str {
ping(&pool.as_ref().0).await
}
// this pool is built with App::data_factory and we have 2 connections fixed for every worker.
// It's evenly distributed and have no cross workers synchronization.
/*
This pool is built with App::data_factory and we have 2 connections fixed for every worker.
It's evenly distributed and have no cross workers synchronization.
*/
#[get("/pool2")]
async fn pool_local(data: Data<usize>, pool: Data<Pool<RedisManager>>) -> &'static str {
assert_eq!(data.get_ref(), &123);
let mut client = pool.get().await.unwrap().clone();
redis::cmd("PING")
.query_async::<_, ()>(&mut client)
.await
.unwrap();
"Done"
ping(pool.as_ref()).await
}
// some boiler plate for create redis pool
// boiler plate for redis pool
#[derive(Clone)]
struct Wrapper(Pool<RedisManager>);
struct RedisWrapper(Pool<RedisManager>);
async fn pool_builder(num_cpus: usize) -> Result<Pool<RedisManager>, ()> {
let mgr = RedisManager::new(REDIS_URL);
async fn pool_builder(
num_cpus: usize,
redis_url: impl redis::IntoConnectionInfo,
) -> Result<Pool<RedisManager>, ()> {
let mgr = RedisManager::new(redis_url);
Builder::new()
.always_check(false)
.idle_timeout(None)
@ -90,3 +93,14 @@ async fn pool_builder(num_cpus: usize) -> Result<Pool<RedisManager>, ()> {
.await
.map_err(|_| ())
}
async fn ping(pool: &Pool<RedisManager>) -> &'static str {
let mut client = pool.get().await.unwrap().clone();
redis::cmd("PING")
.query_async::<_, ()>(&mut client)
.await
.unwrap();
"Done"
}