2018-04-13 09:18:42 +08:00
|
|
|
//! Actix web diesel example
|
|
|
|
//!
|
|
|
|
//! Diesel does not support tokio, so we have to run it in separate threads.
|
2018-05-08 11:08:43 -07:00
|
|
|
//! Actix supports sync actors by default, so we going to create sync actor
|
|
|
|
//! that use diesel. Technically sync actors are worker style actors, multiple
|
|
|
|
//! of them can run in parallel and process messages from same queue.
|
2018-04-13 09:18:42 +08:00
|
|
|
extern crate serde;
|
|
|
|
extern crate serde_json;
|
|
|
|
#[macro_use]
|
|
|
|
extern crate serde_derive;
|
|
|
|
#[macro_use]
|
|
|
|
extern crate diesel;
|
|
|
|
extern crate actix;
|
|
|
|
extern crate actix_web;
|
|
|
|
extern crate env_logger;
|
2018-05-08 11:08:43 -07:00
|
|
|
extern crate futures;
|
|
|
|
extern crate r2d2;
|
|
|
|
extern crate uuid;
|
2019-01-31 19:12:27 +13:00
|
|
|
extern crate bytes;
|
|
|
|
// extern crate json;
|
2018-04-13 09:18:42 +08:00
|
|
|
|
2019-01-31 19:12:27 +13:00
|
|
|
|
|
|
|
use bytes::BytesMut;
|
2018-04-13 09:18:42 +08:00
|
|
|
use actix::prelude::*;
|
2018-05-20 21:03:29 -07:00
|
|
|
use actix_web::{
|
2019-01-31 19:12:27 +13:00
|
|
|
http, middleware, server, App, AsyncResponder, FutureResponse, HttpResponse, Path, Error, HttpRequest,
|
|
|
|
State, HttpMessage, error, Json
|
2018-05-20 21:03:29 -07:00
|
|
|
};
|
2018-04-13 09:18:42 +08:00
|
|
|
|
|
|
|
use diesel::prelude::*;
|
2018-06-01 11:31:53 -07:00
|
|
|
use diesel::r2d2::ConnectionManager;
|
2019-01-31 19:12:27 +13:00
|
|
|
use futures::{future, Future, Stream};
|
2018-04-13 09:18:42 +08:00
|
|
|
|
|
|
|
mod db;
|
|
|
|
mod models;
|
|
|
|
mod schema;
|
|
|
|
|
|
|
|
use db::{CreateUser, DbExecutor};
|
|
|
|
|
|
|
|
/// State with DbExecutor address
|
|
|
|
struct AppState {
|
2018-07-16 12:36:53 +06:00
|
|
|
db: Addr<DbExecutor>,
|
2018-04-13 09:18:42 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Async request handler
|
2019-01-31 19:12:27 +13:00
|
|
|
fn add(
|
2018-06-01 11:31:53 -07:00
|
|
|
(name, state): (Path<String>, State<AppState>),
|
|
|
|
) -> FutureResponse<HttpResponse> {
|
2018-04-13 09:18:42 +08:00
|
|
|
// send async `CreateUser` message to a `DbExecutor`
|
2018-05-08 11:08:43 -07:00
|
|
|
state
|
|
|
|
.db
|
|
|
|
.send(CreateUser {
|
|
|
|
name: name.into_inner(),
|
|
|
|
})
|
2018-04-13 09:18:42 +08:00
|
|
|
.from_err()
|
2018-05-08 11:08:43 -07:00
|
|
|
.and_then(|res| match res {
|
|
|
|
Ok(user) => Ok(HttpResponse::Ok().json(user)),
|
|
|
|
Err(_) => Ok(HttpResponse::InternalServerError().into()),
|
2018-04-13 09:18:42 +08:00
|
|
|
})
|
|
|
|
.responder()
|
|
|
|
}
|
|
|
|
|
2019-01-31 19:12:27 +13:00
|
|
|
#[derive(Debug, Serialize, Deserialize)]
|
|
|
|
struct MyUser {
|
|
|
|
name: String
|
|
|
|
}
|
|
|
|
|
|
|
|
const MAX_SIZE: usize = 262_144; // max payload size is 256k
|
|
|
|
|
|
|
|
/// This handler manually load request payload and parse json object
|
|
|
|
fn index_add((req, state): (HttpRequest<AppState>, State<AppState>)) -> impl Future<Item = HttpResponse, Error = Error> {
|
|
|
|
// HttpRequest::payload() is stream of Bytes objects
|
|
|
|
req.payload()
|
|
|
|
// `Future::from_err` acts like `?` in that it coerces the error type from
|
|
|
|
// the future into the final error type
|
|
|
|
.from_err()
|
|
|
|
|
|
|
|
// `fold` will asynchronously read each chunk of the request body and
|
|
|
|
// call supplied closure, then it resolves to result of closure
|
|
|
|
.fold(BytesMut::new(), move |mut body, chunk| {
|
|
|
|
// limit max size of in-memory payload
|
|
|
|
if (body.len() + chunk.len()) > MAX_SIZE {
|
|
|
|
Err(error::ErrorBadRequest("overflow"))
|
|
|
|
} else {
|
|
|
|
body.extend_from_slice(&chunk);
|
|
|
|
Ok(body)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
// `Future::and_then` can be used to merge an asynchronous workflow with a
|
|
|
|
// synchronous workflow
|
|
|
|
//
|
|
|
|
// Douman NOTE:
|
|
|
|
// The return value in this closure helps, to clarify result for compiler
|
|
|
|
// as otheriwse it cannot understand it
|
|
|
|
.and_then(move |body| -> Box<Future<Item = HttpResponse, Error = Error>> {
|
|
|
|
// body is loaded, now we can deserialize serde-json
|
|
|
|
let r_obj = serde_json::from_slice::<MyUser>(&body);
|
|
|
|
|
|
|
|
// Send to the db for create
|
|
|
|
match r_obj {
|
|
|
|
Ok(obj) => {
|
|
|
|
let res = state.db.send(CreateUser { name: obj.name, })
|
|
|
|
.from_err()
|
|
|
|
.and_then(|res| match res {
|
|
|
|
Ok(user) => Ok(HttpResponse::Ok().json(user)),
|
|
|
|
Err(_) => Ok(HttpResponse::InternalServerError().into()),
|
|
|
|
});
|
|
|
|
|
|
|
|
Box::new(res)
|
|
|
|
}
|
|
|
|
Err(_) => Box::new(future::err(error::ErrorBadRequest("Json Decode Failed")))
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
fn add2((item, state): (Json<MyUser>, State<AppState>)) -> impl Future<Item = HttpResponse, Error = Error> {
|
|
|
|
state.db
|
|
|
|
.send(CreateUser {
|
|
|
|
// into_inner to move into the reference, then accessing name to
|
|
|
|
// move the name out.
|
|
|
|
name: item.into_inner().name,
|
|
|
|
})
|
|
|
|
.from_err()
|
|
|
|
.and_then(|res| match res {
|
|
|
|
Ok(user) => Ok(HttpResponse::Ok().json(user)),
|
|
|
|
Err(_) => Ok(HttpResponse::InternalServerError().into()),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-04-13 09:18:42 +08:00
|
|
|
fn main() {
|
|
|
|
::std::env::set_var("RUST_LOG", "actix_web=info");
|
|
|
|
env_logger::init();
|
|
|
|
let sys = actix::System::new("diesel-example");
|
|
|
|
|
|
|
|
// Start 3 db executor actors
|
|
|
|
let manager = ConnectionManager::<SqliteConnection>::new("test.db");
|
2018-05-08 11:08:43 -07:00
|
|
|
let pool = r2d2::Pool::builder()
|
|
|
|
.build(manager)
|
|
|
|
.expect("Failed to create pool.");
|
2018-04-13 09:18:42 +08:00
|
|
|
|
2018-05-08 11:08:43 -07:00
|
|
|
let addr = SyncArbiter::start(3, move || DbExecutor(pool.clone()));
|
2018-04-13 09:18:42 +08:00
|
|
|
|
|
|
|
// Start http server
|
|
|
|
server::new(move || {
|
|
|
|
App::with_state(AppState{db: addr.clone()})
|
|
|
|
// enable logger
|
|
|
|
.middleware(middleware::Logger::default())
|
2019-01-31 19:12:27 +13:00
|
|
|
// This can be called with:
|
|
|
|
// curl -S --header "Content-Type: application/json" --request POST --data '{"name":"xyz"}' http://127.0.0.1:8080/add
|
|
|
|
// Use of the extractors makes some post conditions simpler such
|
|
|
|
// as size limit protections and built in json validation.
|
|
|
|
.resource("/add2", |r| {
|
|
|
|
r.method(http::Method::POST)
|
|
|
|
.with_async_config(add2, |(json_cfg, )| {
|
|
|
|
json_cfg.0.limit(4096); // <- limit size of the payload
|
|
|
|
})
|
|
|
|
})
|
|
|
|
// Manual parsing would allow custom error construction, use of
|
|
|
|
// other parsers *beside* json (for example CBOR, protobuf, xml), and allows
|
|
|
|
// an application to standardise on a single parser implementation.
|
|
|
|
.resource("/add", |r| r.method(http::Method::POST).with_async(index_add))
|
|
|
|
.resource("/add/{name}", |r| r.method(http::Method::GET).with(add))
|
2018-05-08 11:08:43 -07:00
|
|
|
}).bind("127.0.0.1:8080")
|
|
|
|
.unwrap()
|
2018-04-13 09:18:42 +08:00
|
|
|
.start();
|
|
|
|
|
|
|
|
println!("Started http server: 127.0.0.1:8080");
|
|
|
|
let _ = sys.run();
|
|
|
|
}
|