1
0
mirror of https://github.com/actix/examples synced 2025-02-02 17:39:05 +01:00
examples/diesel/src/main.rs

164 lines
5.9 KiB
Rust
Raw Normal View History

//! Actix web diesel example
//!
//! Diesel does not support tokio, so we have to run it in separate threads.
2018-05-08 11:08:43 -07:00
//! Actix supports sync actors by default, so we going to create sync actor
//! that use diesel. Technically sync actors are worker style actors, multiple
//! of them can run in parallel and process messages from same queue.
#[macro_use]
extern crate diesel;
2019-03-07 14:50:29 -08:00
#[macro_use]
extern crate serde_derive;
2019-03-07 14:50:29 -08:00
use actix_web::{error, middleware, web, App, Error, HttpResponse, HttpServer};
2019-03-26 04:29:00 +01:00
use bytes::BytesMut;
use diesel::prelude::*;
2019-03-07 14:50:29 -08:00
use diesel::r2d2::{self, ConnectionManager};
use futures::future::{err, Either};
use futures::{Future, Stream};
mod models;
mod schema;
2019-03-07 14:50:29 -08:00
type Pool = r2d2::Pool<ConnectionManager<SqliteConnection>>;
/// Diesel query
fn query(
nm: String,
2019-03-16 20:23:09 -07:00
pool: web::Data<Pool>,
2019-03-07 14:50:29 -08:00
) -> Result<models::User, diesel::result::Error> {
use self::schema::users::dsl::*;
let uuid = format!("{}", uuid::Uuid::new_v4());
let new_user = models::NewUser {
id: &uuid,
name: nm.as_str(),
};
let conn: &SqliteConnection = &pool.get().unwrap();
2019-03-07 14:50:29 -08:00
diesel::insert_into(users).values(&new_user).execute(conn)?;
let mut items = users.filter(id.eq(&uuid)).load::<models::User>(conn)?;
Ok(items.pop().unwrap())
}
/// Async request handler
fn add(
2019-03-07 14:50:29 -08:00
name: web::Path<String>,
2019-03-16 20:23:09 -07:00
pool: web::Data<Pool>,
2019-03-07 14:50:29 -08:00
) -> impl Future<Item = HttpResponse, Error = Error> {
// run diesel blocking code
web::block(move || query(name.into_inner(), pool)).then(|res| match res {
Ok(user) => Ok(HttpResponse::Ok().json(user)),
Err(_) => Ok(HttpResponse::InternalServerError().into()),
})
}
#[derive(Debug, Serialize, Deserialize)]
struct MyUser {
2019-03-07 14:50:29 -08:00
name: String,
}
const MAX_SIZE: usize = 262_144; // max payload size is 256k
/// This handler manually load request payload and parse json object
2019-03-26 04:29:00 +01:00
fn index_add(
pl: web::Payload,
2019-03-16 20:23:09 -07:00
pool: web::Data<Pool>,
2019-03-26 04:29:00 +01:00
) -> impl Future<Item = HttpResponse, Error = Error> {
2019-03-07 14:50:29 -08:00
pl
// `Future::from_err` acts like `?` in that it coerces the error type from
// the future into the final error type
.from_err()
// `fold` will asynchronously read each chunk of the request body and
// call supplied closure, then it resolves to result of closure
.fold(BytesMut::new(), move |mut body, chunk| {
// limit max size of in-memory payload
if (body.len() + chunk.len()) > MAX_SIZE {
Err(error::ErrorBadRequest("overflow"))
} else {
body.extend_from_slice(&chunk);
Ok(body)
}
})
// `Future::and_then` can be used to merge an asynchronous workflow with a
// synchronous workflow
//
// Douman NOTE:
// The return value in this closure helps, to clarify result for compiler
// as otheriwse it cannot understand it
2019-03-07 14:50:29 -08:00
.and_then(move |body| {
// body is loaded, now we can deserialize serde-json
let r_obj = serde_json::from_slice::<MyUser>(&body);
// Send to the db for create
match r_obj {
Ok(obj) => {
2019-03-07 14:50:29 -08:00
Either::A(web::block(move || query(obj.name, pool)).then(|res| {
match res {
Ok(user) => Ok(HttpResponse::Ok().json(user)),
Err(_) => Ok(HttpResponse::InternalServerError().into()),
}
}))
}
2019-03-07 14:50:29 -08:00
Err(_) => Either::B(err(error::ErrorBadRequest("Json Decode Failed"))),
}
})
}
2019-03-07 14:50:29 -08:00
fn add2(
item: web::Json<MyUser>,
2019-03-16 20:23:09 -07:00
pool: web::Data<Pool>,
2019-03-07 14:50:29 -08:00
) -> impl Future<Item = HttpResponse, Error = Error> {
// run diesel blocking code
web::block(move || query(item.into_inner().name, pool)).then(|res| match res {
Ok(user) => Ok(HttpResponse::Ok().json(user)),
Err(_) => Ok(HttpResponse::InternalServerError().into()),
})
}
2019-03-07 14:50:29 -08:00
fn main() -> std::io::Result<()> {
std::env::set_var("RUST_LOG", "actix_web=info");
env_logger::init();
// Start 3 db executor actors
let manager = ConnectionManager::<SqliteConnection>::new("test.db");
2018-05-08 11:08:43 -07:00
let pool = r2d2::Pool::builder()
.build(manager)
.expect("Failed to create pool.");
// Start http server
2019-03-07 14:50:29 -08:00
HttpServer::new(move || {
App::new()
2019-03-26 04:29:00 +01:00
.data(pool.clone())
// enable logger
2019-03-26 04:29:00 +01:00
.wrap(middleware::Logger::default())
// This can be called with:
// curl -S --header "Content-Type: application/json" --request POST --data '{"name":"xyz"}' http://127.0.0.1:8080/add
// Use of the extractors makes some post conditions simpler such
// as size limit protections and built in json validation.
2019-03-07 14:50:29 -08:00
.service(
2019-05-04 21:52:24 -07:00
web::resource("/add2")
.data(
web::JsonConfig::default()
.limit(4096) // <- limit size of the payload
.error_handler(|err, _| {
// <- create custom error response
error::InternalError::from_response(
err,
HttpResponse::Conflict().finish(),
)
.into()
}),
)
.route(web::post().to_async(add2)),
2019-03-07 14:50:29 -08:00
)
// Manual parsing would allow custom error construction, use of
// other parsers *beside* json (for example CBOR, protobuf, xml), and allows
// an application to standardise on a single parser implementation.
2019-03-07 14:50:29 -08:00
.service(web::resource("/add").route(web::post().to_async(index_add)))
.service(web::resource("/add/{name}").route(web::get().to_async(add)))
})
.bind("127.0.0.1:8080")?
.run()
}