1
0
mirror of https://github.com/actix/examples synced 2024-11-27 16:02:57 +01:00

added actix_redis example

This commit is contained in:
dowwie 2018-08-11 06:58:29 -04:00
parent 026ff64cf3
commit 7ccde8c2d3
5 changed files with 116 additions and 0 deletions

View File

@ -36,6 +36,7 @@ script:
- | - |
cd async_db && cargo check && cd .. cd async_db && cargo check && cd ..
cd async_ex1 && cargo check && cd .. cd async_ex1 && cargo check && cd ..
cd actix_redis && cargo check && cd ..
cd basics && cargo check && cd .. cd basics && cargo check && cd ..
cd cookie-auth && cargo check && cd .. cd cookie-auth && cargo check && cd ..
cd cookie-auth-full && cargo check && cd .. cd cookie-auth-full && cargo check && cd ..

View File

@ -1,6 +1,7 @@
[workspace] [workspace]
members = [ members = [
"./", "./",
"actix_redis",
"async_db", "async_db",
"async_ex1", "async_ex1",
"basics", "basics",

14
actix_redis/Cargo.toml Normal file
View File

@ -0,0 +1,14 @@
[package]
name = "actix_redis"
version = "0.1.0"
authors = ["dowwie <dkcdkg@gmail.com>"]
[dependencies]
actix = "0.7.3"
actix-web = "0.7.3"
actix-redis = "0.5.1"
futures = "0.1.23"
redis-async = "0.4.0"
serde = "1.0.71"
serde_derive = "1.0.71"
env_logger = "0.5.12"

14
actix_redis/README.md Normal file
View File

@ -0,0 +1,14 @@
This project illustrates how to send multiple cache requests to redis in bulk, asynchronously.
This asyncio approach resembles traditional redis pipelining. Details about how this
is so can be read at https://github.com/benashford/redis-async-rs/issues/19#issuecomment-412208018
To test the demo, POST a json object containing three strings to the /cache_stuff endpoint:
{"one": "first entry",
"two": "second entry",
"three": "third entry" }
These three entries will cache to redis, keyed accordingly.

86
actix_redis/src/main.rs Normal file
View File

@ -0,0 +1,86 @@
extern crate actix;
extern crate actix_redis;
extern crate actix_web;
extern crate env_logger;
extern crate futures;
#[macro_use] extern crate redis_async;
extern crate serde;
#[macro_use] extern crate serde_derive;
use std::sync::Arc;
use actix::prelude::*;
use actix_redis::{Command, RedisActor, Error as ARError};
use actix_web::{middleware, server, App, HttpRequest, HttpResponse, Json,
AsyncResponder, http::Method, Error as AWError};
use futures::future::{Future, join_all};
use redis_async::resp::RespValue;
#[derive(Deserialize)]
pub struct CacheInfo {
one: String,
two: String,
three: String
}
fn cache_stuff((info, req): (Json<CacheInfo>, HttpRequest<AppState>))
-> impl Future<Item=HttpResponse, Error=AWError> {
let info = info.into_inner();
let redis = req.state().redis_addr.clone();
let one = redis.send(Command(resp_array!["SET", "mydomain:one", info.one]));
let two = redis.send(Command(resp_array!["SET", "mydomain:two", info.two]));
let three = redis.send(Command(resp_array!["SET", "mydomain:three", info.three]));
// Creates a future which represents a collection of the results of the futures
// given. The returned future will drive execution for all of its underlying futures,
// collecting the results into a destination `Vec<RespValue>` in the same order as they
// were provided. If any future returns an error then all other futures will be
// canceled and an error will be returned immediately. If all futures complete
// successfully, however, then the returned future will succeed with a `Vec` of
// all the successful results.
let info_set = join_all(vec![one, two, three].into_iter());
info_set
.map_err(AWError::from)
.and_then(|res: Vec<Result<RespValue, ARError>>|
// successful operations return "OK", so confirm that all returned as so
if !res.iter().all(|res| match res {
Ok(RespValue::SimpleString(x)) if x=="OK" => true,
_ => false
}) {
Ok(HttpResponse::InternalServerError().finish())
} else {
Ok(HttpResponse::Ok().body("successfully cached values"))
}
)
.responder()
}
pub struct AppState {
pub redis_addr: Arc<Addr<RedisActor>>
}
fn main() {
::std::env::set_var("RUST_LOG", "actix_web=info,actix_redis=info");
env_logger::init();
let sys = actix::System::new("actix_redis_ex");
server::new(|| {
let redis_addr = Arc::new(RedisActor::start("127.0.0.1:6379"));
let app_state = AppState{redis_addr};
App::with_state(app_state)
.middleware(middleware::Logger::default())
.resource("/cache_stuff", |r| r.method(Method::POST)
.with_async(cache_stuff))
}).bind("0.0.0.0:8080")
.unwrap()
.workers(1)
.start();
let _ = sys.run();
}