1
0
mirror of https://github.com/actix/examples synced 2025-06-28 18:00:37 +02:00

upgrade diesel, r2d2, state examples

This commit is contained in:
Nikolay Kim
2019-03-07 14:50:29 -08:00
parent 60a9df8abd
commit f39a53ea3a
10 changed files with 185 additions and 282 deletions

View File

@ -2,15 +2,14 @@
name = "diesel-example"
version = "0.1.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
workspace = "../"
workspace = ".."
edition = "2018"
[dependencies]
actix-web = { git="https://github.com/actix/actix-web.git", branch = "1.0" }
bytes = "0.4"
env_logger = "0.5"
actix = "0.7"
actix-web = "0.7"
env_logger = "0.6"
futures = "0.1"
uuid = { version = "0.5", features = ["serde", "v4"] }
serde = "1.0"

View File

@ -1,55 +0,0 @@
//! Db executor actor
use actix::prelude::*;
use actix_web::*;
use diesel;
use diesel::prelude::*;
use diesel::r2d2::{ConnectionManager, Pool};
use uuid;
use models;
use schema;
/// This is db executor actor. We are going to run 3 of them in parallel.
pub struct DbExecutor(pub Pool<ConnectionManager<SqliteConnection>>);
/// This is only message that this actor can handle, but it is easy to extend
/// number of messages.
pub struct CreateUser {
pub name: String,
}
impl Message for CreateUser {
type Result = Result<models::User, Error>;
}
impl Actor for DbExecutor {
type Context = SyncContext<Self>;
}
impl Handler<CreateUser> for DbExecutor {
type Result = Result<models::User, Error>;
fn handle(&mut self, msg: CreateUser, _: &mut Self::Context) -> Self::Result {
use self::schema::users::dsl::*;
let uuid = format!("{}", uuid::Uuid::new_v4());
let new_user = models::NewUser {
id: &uuid,
name: &msg.name,
};
let conn: &SqliteConnection = &self.0.get().unwrap();
diesel::insert_into(users)
.values(&new_user)
.execute(conn)
.map_err(|_| error::ErrorInternalServerError("Error inserting person"))?;
let mut items = users
.filter(id.eq(&uuid))
.load::<models::User>(conn)
.map_err(|_| error::ErrorInternalServerError("Error loading person"))?;
Ok(items.pop().unwrap())
}
}

View File

@ -4,77 +4,74 @@
//! Actix supports sync actors by default, so we going to create sync actor
//! that use diesel. Technically sync actors are worker style actors, multiple
//! of them can run in parallel and process messages from same queue.
extern crate serde;
extern crate serde_json;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate diesel;
extern crate actix;
extern crate actix_web;
extern crate env_logger;
extern crate futures;
extern crate r2d2;
extern crate uuid;
extern crate bytes;
// extern crate json;
use bytes::BytesMut;
use actix::prelude::*;
use actix_web::{
http, middleware, server, App, AsyncResponder, FutureResponse, HttpResponse, Path, Error, HttpRequest,
State, HttpMessage, error, Json
};
#[macro_use]
extern crate serde_derive;
use actix_web::{error, middleware, web, App, Error, HttpResponse, HttpServer};
use bytes::{Bytes, BytesMut};
use diesel::prelude::*;
use diesel::r2d2::ConnectionManager;
use futures::{future, Future, Stream};
use diesel::r2d2::{self, ConnectionManager};
use futures::future::{err, Either};
use futures::{Future, Stream};
mod db;
mod models;
mod schema;
use db::{CreateUser, DbExecutor};
type Pool = r2d2::Pool<ConnectionManager<SqliteConnection>>;
/// State with DbExecutor address
struct AppState {
db: Addr<DbExecutor>,
/// Diesel query
fn query(
nm: String,
pool: web::State<Pool>,
) -> Result<models::User, diesel::result::Error> {
use self::schema::users::dsl::*;
let uuid = format!("{}", uuid::Uuid::new_v4());
let new_user = models::NewUser {
id: &uuid,
name: nm.as_str(),
};
let conn: &SqliteConnection = &pool.get().unwrap();
diesel::insert_into(users).values(&new_user).execute(conn)?;
let mut items = users.filter(id.eq(&uuid)).load::<models::User>(conn)?;
Ok(items.pop().unwrap())
}
/// Async request handler
fn add(
(name, state): (Path<String>, State<AppState>),
) -> FutureResponse<HttpResponse> {
// send async `CreateUser` message to a `DbExecutor`
state
.db
.send(CreateUser {
name: name.into_inner(),
})
.from_err()
.and_then(|res| match res {
Ok(user) => Ok(HttpResponse::Ok().json(user)),
Err(_) => Ok(HttpResponse::InternalServerError().into()),
})
.responder()
name: web::Path<String>,
pool: web::State<Pool>,
) -> impl Future<Item = HttpResponse, Error = Error> {
// run diesel blocking code
web::block(move || query(name.into_inner(), pool)).then(|res| match res {
Ok(user) => Ok(HttpResponse::Ok().json(user)),
Err(_) => Ok(HttpResponse::InternalServerError().into()),
})
}
#[derive(Debug, Serialize, Deserialize)]
struct MyUser {
name: String
name: String,
}
const MAX_SIZE: usize = 262_144; // max payload size is 256k
/// This handler manually load request payload and parse json object
fn index_add((req, state): (HttpRequest<AppState>, State<AppState>)) -> impl Future<Item = HttpResponse, Error = Error> {
// HttpRequest::payload() is stream of Bytes objects
req.payload()
fn index_add<P>(
pl: web::Payload<P>,
pool: web::State<Pool>,
) -> impl Future<Item = HttpResponse, Error = Error>
where
P: Stream<Item = Bytes, Error = error::PayloadError>,
{
pl
// `Future::from_err` acts like `?` in that it coerces the error type from
// the future into the final error type
.from_err()
// `fold` will asynchronously read each chunk of the request body and
// call supplied closure, then it resolves to result of closure
.fold(BytesMut::new(), move |mut body, chunk| {
@ -92,45 +89,39 @@ fn index_add((req, state): (HttpRequest<AppState>, State<AppState>)) -> impl Fut
// Douman NOTE:
// The return value in this closure helps, to clarify result for compiler
// as otheriwse it cannot understand it
.and_then(move |body| -> Box<Future<Item = HttpResponse, Error = Error>> {
.and_then(move |body| {
// body is loaded, now we can deserialize serde-json
let r_obj = serde_json::from_slice::<MyUser>(&body);
// Send to the db for create
match r_obj {
Ok(obj) => {
let res = state.db.send(CreateUser { name: obj.name, })
.from_err()
.and_then(|res| match res {
Ok(user) => Ok(HttpResponse::Ok().json(user)),
Err(_) => Ok(HttpResponse::InternalServerError().into()),
});
Box::new(res)
Either::A(web::block(move || query(obj.name, pool)).then(|res| {
match res {
Ok(user) => Ok(HttpResponse::Ok().json(user)),
Err(_) => Ok(HttpResponse::InternalServerError().into()),
}
}))
}
Err(_) => Box::new(future::err(error::ErrorBadRequest("Json Decode Failed")))
Err(_) => Either::B(err(error::ErrorBadRequest("Json Decode Failed"))),
}
})
}
fn add2((item, state): (Json<MyUser>, State<AppState>)) -> impl Future<Item = HttpResponse, Error = Error> {
state.db
.send(CreateUser {
// into_inner to move into the reference, then accessing name to
// move the name out.
name: item.into_inner().name,
})
.from_err()
.and_then(|res| match res {
Ok(user) => Ok(HttpResponse::Ok().json(user)),
Err(_) => Ok(HttpResponse::InternalServerError().into()),
})
fn add2(
item: web::Json<MyUser>,
pool: web::State<Pool>,
) -> impl Future<Item = HttpResponse, Error = Error> {
// run diesel blocking code
web::block(move || query(item.into_inner().name, pool)).then(|res| match res {
Ok(user) => Ok(HttpResponse::Ok().json(user)),
Err(_) => Ok(HttpResponse::InternalServerError().into()),
})
}
fn main() {
::std::env::set_var("RUST_LOG", "actix_web=info");
fn main() -> std::io::Result<()> {
std::env::set_var("RUST_LOG", "actix_web=info");
env_logger::init();
let sys = actix::System::new("diesel-example");
// Start 3 db executor actors
let manager = ConnectionManager::<SqliteConnection>::new("test.db");
@ -138,32 +129,29 @@ fn main() {
.build(manager)
.expect("Failed to create pool.");
let addr = SyncArbiter::start(3, move || DbExecutor(pool.clone()));
// Start http server
server::new(move || {
App::with_state(AppState{db: addr.clone()})
HttpServer::new(move || {
App::new()
.state(pool.clone())
// enable logger
.middleware(middleware::Logger::default())
// This can be called with:
// curl -S --header "Content-Type: application/json" --request POST --data '{"name":"xyz"}' http://127.0.0.1:8080/add
// Use of the extractors makes some post conditions simpler such
// as size limit protections and built in json validation.
.resource("/add2", |r| {
r.method(http::Method::POST)
.with_async_config(add2, |(json_cfg, )| {
json_cfg.0.limit(4096); // <- limit size of the payload
})
})
.service(
web::resource("/add2").route(
web::post()
.config(web::JsonConfig::default().limit(4096)) // <- limit size of the payload
.to_async(add2),
),
)
// Manual parsing would allow custom error construction, use of
// other parsers *beside* json (for example CBOR, protobuf, xml), and allows
// an application to standardise on a single parser implementation.
.resource("/add", |r| r.method(http::Method::POST).with_async(index_add))
.resource("/add/{name}", |r| r.method(http::Method::GET).with(add))
}).bind("127.0.0.1:8080")
.unwrap()
.start();
println!("Started http server: 127.0.0.1:8080");
let _ = sys.run();
.service(web::resource("/add").route(web::post().to_async(index_add)))
.service(web::resource("/add/{name}").route(web::get().to_async(add)))
})
.bind("127.0.0.1:8080")?
.run()
}