1
0
mirror of https://github.com/actix/examples synced 2025-06-26 17:17:42 +02:00

restructure folders

This commit is contained in:
Rob Ede
2022-02-18 02:01:48 +00:00
parent 4d8573c3fe
commit cc3d356209
201 changed files with 52 additions and 49 deletions

1
databases/diesel/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
test.db

View File

@ -0,0 +1,15 @@
[package]
name = "diesel-example"
version = "1.0.0"
edition = "2021"
[dependencies]
actix-web = "4.0.0-beta.21"
diesel = { version = "1.4.8", features = ["sqlite", "r2d2"] }
dotenv = "0.15"
env_logger = "0.9.0"
failure = "0.1.8"
futures = "0.3.1"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
uuid = { version = "0.8", features = ["serde", "v4"] }

109
databases/diesel/README.md Normal file
View File

@ -0,0 +1,109 @@
# diesel
Basic integration of [Diesel](https://diesel.rs/) using SQLite for Actix Web.
## Usage
### Install SQLite
```sh
# on OpenSUSE
sudo zypper install sqlite3-devel libsqlite3-0 sqlite3
# on Ubuntu
sudo apt-get install libsqlite3-dev sqlite3
# on Fedora
sudo dnf install libsqlite3x-devel sqlite3x
# on macOS (using homebrew)
brew install sqlite3
```
### Initialize SQLite Database
```sh
cd examples/diesel
cargo install diesel_cli --no-default-features --features sqlite
echo "DATABASE_URL=test.db" > .env
diesel migration run
```
There will now be a database file at `./test.db`.
### Running Server
```sh
cd examples/diesel
cargo run (or ``cargo watch -x run``)
# Started http server: 127.0.0.1:8080
```
### Available Routes
#### `POST /user`
Inserts a new user into the SQLite DB.
Provide a JSON payload with a name. Eg:
```json
{ "name": "bill" }
```
On success, a response like the following is returned:
```json
{
"id": "9e46baba-a001-4bb3-b4cf-4b3e5bab5e97",
"name": "bill"
}
```
<details>
<summary>Client Examples</summary>
Using [HTTPie](https://httpie.org/):
```sh
http POST localhost:8080/user name=bill
```
Using cURL:
```sh
curl -S -X POST --header "Content-Type: application/json" --data '{"name":"bill"}' http://localhost:8080/user
```
</details>
#### `GET /user/{user_uid}`
Gets a user from the DB using its UID (returned from the insert request or taken from the DB directly). Returns a 404 when no user exists with that UID.
<details>
<summary>Client Examples</summary>
Using [HTTPie](https://httpie.org/):
```sh
http localhost:8080/user/9e46baba-a001-4bb3-b4cf-4b3e5bab5e97
```
Using cURL:
```sh
curl -S http://localhost:8080/user/9e46baba-a001-4bb3-b4cf-4b3e5bab5e97
```
</details>
### Explore The SQLite DB
```sh
sqlite3 test.db
```
```
sqlite> .tables
sqlite> SELECT * FROM users;
```
## Using Other Databases
You can find a complete example of Diesel + PostgreSQL at: [https://github.com/TechEmpower/FrameworkBenchmarks/tree/master/frameworks/Rust/actix](https://github.com/TechEmpower/FrameworkBenchmarks/tree/master/frameworks/Rust/actix)

View File

@ -0,0 +1 @@
DROP TABLE users

View File

@ -0,0 +1,4 @@
CREATE TABLE users (
id VARCHAR NOT NULL PRIMARY KEY,
name VARCHAR NOT NULL
)

View File

@ -0,0 +1,42 @@
use diesel::prelude::*;
use uuid::Uuid;
use crate::models;
type DbError = Box<dyn std::error::Error + Send + Sync>;
/// Run query using Diesel to find user by uid and return it.
pub fn find_user_by_uid(
uid: Uuid,
conn: &SqliteConnection,
) -> Result<Option<models::User>, DbError> {
use crate::schema::users::dsl::*;
let user = users
.filter(id.eq(uid.to_string()))
.first::<models::User>(conn)
.optional()?;
Ok(user)
}
/// Run query using Diesel to insert a new database row and return the result.
pub fn insert_new_user(
// prevent collision with `name` column imported inside the function
nm: &str,
conn: &SqliteConnection,
) -> Result<models::User, DbError> {
// It is common when using Diesel with Actix Web to import schema-related
// modules inside a function's scope (rather than the normal module's scope)
// to prevent import collisions and namespace pollution.
use crate::schema::users::dsl::*;
let new_user = models::User {
id: Uuid::new_v4().to_string(),
name: nm.to_owned(),
};
diesel::insert_into(users).values(&new_user).execute(conn)?;
Ok(new_user)
}

View File

@ -0,0 +1,146 @@
//! Actix Web Diesel integration example
//!
//! Diesel does not support tokio, so we have to run it in separate threads using the web::block
//! function which offloads blocking code (like Diesel's) in order to not block the server's thread.
#[macro_use]
extern crate diesel;
use actix_web::{get, middleware, post, web, App, Error, HttpResponse, HttpServer};
use diesel::prelude::*;
use diesel::r2d2::{self, ConnectionManager};
use uuid::Uuid;
mod actions;
mod models;
mod schema;
type DbPool = r2d2::Pool<ConnectionManager<SqliteConnection>>;
/// Finds user by UID.
#[get("/user/{user_id}")]
async fn get_user(
pool: web::Data<DbPool>,
user_uid: web::Path<Uuid>,
) -> Result<HttpResponse, Error> {
let user_uid = user_uid.into_inner();
// use web::block to offload blocking Diesel code without blocking server thread
let user = web::block(move || {
let conn = pool.get()?;
actions::find_user_by_uid(user_uid, &conn)
})
.await?
.map_err(actix_web::error::ErrorInternalServerError)?;
if let Some(user) = user {
Ok(HttpResponse::Ok().json(user))
} else {
let res = HttpResponse::NotFound()
.body(format!("No user found with uid: {}", user_uid));
Ok(res)
}
}
/// Inserts new user with name defined in form.
#[post("/user")]
async fn add_user(
pool: web::Data<DbPool>,
form: web::Json<models::NewUser>,
) -> Result<HttpResponse, Error> {
// use web::block to offload blocking Diesel code without blocking server thread
let user = web::block(move || {
let conn = pool.get()?;
actions::insert_new_user(&form.name, &conn)
})
.await?
.map_err(actix_web::error::ErrorInternalServerError)?;
Ok(HttpResponse::Ok().json(user))
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
std::env::set_var("RUST_LOG", "actix_web=info");
env_logger::init();
dotenv::dotenv().ok();
// set up database connection pool
let connspec = std::env::var("DATABASE_URL").expect("DATABASE_URL");
let manager = ConnectionManager::<SqliteConnection>::new(connspec);
let pool = r2d2::Pool::builder()
.build(manager)
.expect("Failed to create pool.");
let bind = ("127.0.0.1", 8080);
println!("Starting server at: {}", &bind);
// Start HTTP server
HttpServer::new(move || {
App::new()
// set up DB pool to be used with web::Data<Pool> extractor
.app_data(web::Data::new(pool.clone()))
.wrap(middleware::Logger::default())
.service(get_user)
.service(add_user)
})
.bind(&bind)?
.run()
.await
}
#[cfg(test)]
mod tests {
use super::*;
use actix_web::test;
#[actix_web::test]
async fn user_routes() {
std::env::set_var("RUST_LOG", "actix_web=debug");
env_logger::init();
dotenv::dotenv().ok();
let connspec = std::env::var("DATABASE_URL").expect("DATABASE_URL");
let manager = ConnectionManager::<SqliteConnection>::new(connspec);
let pool = r2d2::Pool::builder()
.build(manager)
.expect("Failed to create pool.");
let mut app = test::init_service(
App::new()
.app_data(web::Data::new(pool.clone()))
.wrap(middleware::Logger::default())
.service(get_user)
.service(add_user),
)
.await;
// Insert a user
let req = test::TestRequest::post()
.uri("/user")
.set_json(&models::NewUser {
name: "Test user".to_owned(),
})
.to_request();
let resp: models::User = test::call_and_read_body_json(&mut app, req).await;
assert_eq!(resp.name, "Test user");
// Get a user
let req = test::TestRequest::get()
.uri(&format!("/user/{}", resp.id))
.to_request();
let resp: models::User = test::call_and_read_body_json(&mut app, req).await;
assert_eq!(resp.name, "Test user");
// Delete new user from table
use crate::schema::users::dsl::*;
diesel::delete(users.filter(id.eq(resp.id)))
.execute(&pool.get().expect("couldn't get db connection from pool"))
.expect("couldn't delete test user from table");
}
}

View File

@ -0,0 +1,14 @@
use serde::{Deserialize, Serialize};
use crate::schema::users;
#[derive(Debug, Clone, Serialize, Deserialize, Queryable, Insertable)]
pub struct User {
pub id: String,
pub name: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NewUser {
pub name: String,
}

View File

@ -0,0 +1,6 @@
table! {
users (id) {
id -> Text,
name -> Text,
}
}

View File

@ -0,0 +1,10 @@
[package]
name = "mongodb"
version = "1.0.0"
edition = "2021"
[dependencies]
actix-web = "4.0.0-beta.21"
futures-util = "0.3.17"
mongodb = "2.0.0"
serde = { version = "1.0", features = ["derive"] }

View File

@ -0,0 +1,24 @@
# MongoDB
Simple example of MongoDB usage with Actix Web. For more information on the MongoDB Rust driver,
visit the [documentation](https://docs.rs/mongodb/2.0.0/mongodb/index.html) and
[source code](https://github.com/mongodb/mongo-rust-driver).
## Usage
### Install MongoDB
Visit the [MongoDB Download Center](https://www.mongodb.com/try) for instructions on how to use
MongoDB Atlas or set up MongoDB locally.
### Set an environment variable
The example code creates a client with the URI set by the `MONGODB_URI` environment variable. The
default URI for a standalone `mongod` running on localhost is "mongodb://localhost:27017". For more
information on MongoDB URIs, visit the
[connection string](https://docs.mongodb.com/manual/reference/connection-string/) entry in the
MongoDB manual.
### Run the example
To execute the example code, run `cargo run` in the `database_interactions/mongodb` directory.

View File

@ -0,0 +1,77 @@
//! Example code for using MongoDB with Actix.
mod model;
#[cfg(test)]
mod test;
use actix_web::{get, post, web, App, HttpResponse, HttpServer};
use mongodb::{bson::doc, options::IndexOptions, Client, Collection, IndexModel};
use model::User;
const DB_NAME: &str = "myApp";
const COLL_NAME: &str = "users";
/// Adds a new user to the "users" collection in the database.
#[post("/add_user")]
async fn add_user(client: web::Data<Client>, form: web::Form<User>) -> HttpResponse {
let collection = client.database(DB_NAME).collection(COLL_NAME);
let result = collection.insert_one(form.into_inner(), None).await;
match result {
Ok(_) => HttpResponse::Ok().body("user added"),
Err(err) => HttpResponse::InternalServerError().body(err.to_string()),
}
}
/// Gets the user with the supplied username.
#[get("/get_user/{username}")]
async fn get_user(
client: web::Data<Client>,
username: web::Path<String>,
) -> HttpResponse {
let username = username.into_inner();
let collection: Collection<User> = client.database(DB_NAME).collection(COLL_NAME);
match collection
.find_one(doc! { "username": &username }, None)
.await
{
Ok(Some(user)) => HttpResponse::Ok().json(user),
Ok(None) => HttpResponse::NotFound()
.body(format!("No user found with username {}", username)),
Err(err) => HttpResponse::InternalServerError().body(err.to_string()),
}
}
/// Creates an index on the "username" field to force the values to be unique.
async fn create_username_index(client: &Client) {
let options = IndexOptions::builder().unique(true).build();
let model = IndexModel::builder()
.keys(doc! { "username": 1 })
.options(options)
.build();
client
.database(DB_NAME)
.collection::<User>(COLL_NAME)
.create_index(model, None)
.await
.expect("creating an index should succeed");
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let uri = std::env::var("MONGODB_URI")
.unwrap_or_else(|_| "mongodb://localhost:27017".into());
let client = Client::with_uri_str(uri).await.expect("failed to connect");
create_username_index(&client).await;
HttpServer::new(move || {
App::new()
.app_data(web::Data::new(client.clone()))
.service(add_user)
.service(get_user)
})
.bind(("127.0.0.1", 8080))?
.run()
.await
}

View File

@ -0,0 +1,9 @@
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Deserialize, Serialize)]
pub struct User {
pub first_name: String,
pub last_name: String,
pub username: String,
pub email: String,
}

View File

@ -0,0 +1,54 @@
use actix_web::{
test::{call_and_read_body, call_and_read_body_json, init_service, TestRequest},
web::Bytes,
};
use mongodb::Client;
use super::*;
#[actix_web::test]
#[ignore = "requires MongoDB instance running"]
async fn test() {
let uri = std::env::var("MONGODB_URI")
.unwrap_or_else(|_| "mongodb://localhost:27017".into());
let client = Client::with_uri_str(uri).await.expect("failed to connect");
// Clear any data currently in the users collection.
client
.database(DB_NAME)
.collection::<User>(COLL_NAME)
.drop(None)
.await
.expect("drop collection should succeed");
let app = init_service(
App::new()
.app_data(web::Data::new(client))
.service(add_user)
.service(get_user),
)
.await;
let user = User {
first_name: "Jane".into(),
last_name: "Doe".into(),
username: "janedoe".into(),
email: "example@example.com".into(),
};
let req = TestRequest::post()
.uri("/add_user")
.set_form(&user)
.to_request();
let response = call_and_read_body(&app, req).await;
assert_eq!(response, Bytes::from_static(b"user added"));
let req = TestRequest::get()
.uri(&format!("/get_user/{}", &user.username))
.to_request();
let response: User = call_and_read_body_json(&app, req).await;
assert_eq!(response, user);
}

3
databases/postgres/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
/target
**/*.rs.bk
.env

View File

@ -0,0 +1,15 @@
[package]
name = "async_pg"
version = "1.0.0"
edition = "2021"
[dependencies]
actix-web = "4.0.0-beta.21"
config = "0.11.0"
deadpool-postgres = { version = "0.10.1", features = ["serde"] }
derive_more = "0.99.2"
dotenv = "0.15.0"
serde = { version = "1.0.104", features = ["derive"] }
tokio-pg-mapper = "0.2.0"
tokio-pg-mapper-derive = "0.2.0"
tokio-postgres = "0.7.5"

View File

@ -0,0 +1,72 @@
# async_pg example
## This example illustrates
- `tokio_postgres`
- use of `tokio_pg_mapper` for postgres data mapping
- `deadpool_postgres` for connection pooling
- `dotenv` + `config` for configuration
## Instructions
1. Create database user
```shell
createuser -P test_user
```
Enter a password of your choice. The following instructions assume you
used `testing` as password.
This step is **optional** and you can also use an existing database user
for that. Just make sure to replace `test_user` by the database user
of your choice in the following steps and change the `.env` file
containing the configuration accordingly.
2. Create database
```shell
createdb -O test_user testing_db
```
3. Initialize database
```shell
psql -f sql/schema.sql testing_db
```
This step can be repeated and clears the database as it drops and
recreates the schema `testing` which is used within the database.
4. Create `.env` file:
```ini
SERVER_ADDR=127.0.0.1:8080
PG.USER=test_user
PG.PASSWORD=testing
PG.HOST=127.0.0.1
PG.PORT=5432
PG.DBNAME=testing_db
PG.POOL.MAX_SIZE=16
```
5. Run the server:
```shell
cargo run
```
6. Using a different terminal send an HTTP POST request to the running server:
```shell
echo '{"email": "ferris@thecrab.com", "first_name": "ferris", "last_name": "crab", "username": "ferreal"}' | http -f --json --print h POST http://127.0.0.1:8080/users
```
**...or using curl...**
```shell
curl -d '{"email": "ferris@thecrab.com", "first_name": "ferris", "last_name": "crab", "username": "ferreal"}' -H 'Content-Type: application/json' http://127.0.0.1:8080/users
```
A unique constraint exists for username, so sending this request twice
will return an internal server error (HTTP 500).

View File

@ -0,0 +1,3 @@
INSERT INTO testing.users(email, first_name, last_name, username)
VALUES ($1, $2, $3, $4)
RETURNING $table_fields;

View File

@ -0,0 +1 @@
SELECT $table_fields FROM testing.users;

View File

@ -0,0 +1,11 @@
DROP SCHEMA IF EXISTS testing CASCADE;
CREATE SCHEMA testing;
CREATE TABLE testing.users (
id BIGSERIAL PRIMARY KEY,
email VARCHAR(200) NOT NULL,
first_name VARCHAR(200) NOT NULL,
last_name VARCHAR(200) NOT NULL,
username VARCHAR(50) UNIQUE NOT NULL,
UNIQUE (username)
);

View File

@ -0,0 +1,131 @@
mod config {
pub use ::config::ConfigError;
use serde::Deserialize;
#[derive(Deserialize)]
pub struct Config {
pub server_addr: String,
pub pg: deadpool_postgres::Config,
}
impl Config {
pub fn from_env() -> Result<Self, ConfigError> {
let mut cfg = ::config::Config::new();
cfg.merge(::config::Environment::new())?;
cfg.try_into()
}
}
}
mod models {
use serde::{Deserialize, Serialize};
use tokio_pg_mapper_derive::PostgresMapper;
#[derive(Deserialize, PostgresMapper, Serialize)]
#[pg_mapper(table = "users")] // singular 'user' is a keyword..
pub struct User {
pub email: String,
pub first_name: String,
pub last_name: String,
pub username: String,
}
}
mod errors {
use actix_web::{HttpResponse, ResponseError};
use deadpool_postgres::PoolError;
use derive_more::{Display, From};
use tokio_pg_mapper::Error as PGMError;
use tokio_postgres::error::Error as PGError;
#[derive(Display, From, Debug)]
pub enum MyError {
NotFound,
PGError(PGError),
PGMError(PGMError),
PoolError(PoolError),
}
impl std::error::Error for MyError {}
impl ResponseError for MyError {
fn error_response(&self) -> HttpResponse {
match *self {
MyError::NotFound => HttpResponse::NotFound().finish(),
MyError::PoolError(ref err) => {
HttpResponse::InternalServerError().body(err.to_string())
}
_ => HttpResponse::InternalServerError().finish(),
}
}
}
}
mod db {
use crate::{errors::MyError, models::User};
use deadpool_postgres::Client;
use tokio_pg_mapper::FromTokioPostgresRow;
pub async fn add_user(client: &Client, user_info: User) -> Result<User, MyError> {
let _stmt = include_str!("../sql/add_user.sql");
let _stmt = _stmt.replace("$table_fields", &User::sql_table_fields());
let stmt = client.prepare(&_stmt).await.unwrap();
client
.query(
&stmt,
&[
&user_info.email,
&user_info.first_name,
&user_info.last_name,
&user_info.username,
],
)
.await?
.iter()
.map(|row| User::from_row_ref(row).unwrap())
.collect::<Vec<User>>()
.pop()
.ok_or(MyError::NotFound) // more applicable for SELECTs
}
}
mod handlers {
use crate::{db, errors::MyError, models::User};
use actix_web::{web, Error, HttpResponse};
use deadpool_postgres::{Client, Pool};
pub async fn add_user(
user: web::Json<User>,
db_pool: web::Data<Pool>,
) -> Result<HttpResponse, Error> {
let user_info: User = user.into_inner();
let client: Client = db_pool.get().await.map_err(MyError::PoolError)?;
let new_user = db::add_user(&client, user_info).await?;
Ok(HttpResponse::Ok().json(new_user))
}
}
use actix_web::{web, App, HttpServer};
use dotenv::dotenv;
use handlers::add_user;
use tokio_postgres::NoTls;
#[actix_web::main]
async fn main() -> std::io::Result<()> {
dotenv().ok();
let config = crate::config::Config::from_env().unwrap();
let pool = config.pg.create_pool(None, NoTls).unwrap();
let server = HttpServer::new(move || {
App::new()
.app_data(web::Data::new(pool.clone()))
.service(web::resource("/users").route(web::post().to(add_user)))
})
.bind(config.server_addr.clone())?
.run();
println!("Server running at http://{}/", config.server_addr);
server.await
}

View File

@ -0,0 +1,5 @@
The `rbatis` ORM developers keep their own example on usage with Actix Web.
You can find the example in the [rbatis/example/src/actix_web](https://github.com/rbatis/rbatis/blob/master/example/src/actix_web/main.rs) directory.
See the [`example` folder readme](https://github.com/rbatis/rbatis/tree/master/example) for a guide on how to get started with the example.

View File

@ -0,0 +1,14 @@
[package]
name = "actix_redis"
version = "1.0.0"
edition = "2021"
[dependencies]
actix = "0.12"
actix-web = "4.0.0-rc.3"
actix-redis = "0.10.0-beta.6"
futures-util = { version = "0.3.7", default-features = false, features = ["std"] }
log = "0.4"
redis-async = { version = "0.8", default_features = false, features = ["tokio10"] }
serde = { version = "1", features = ["derive"] }
env_logger = "0.9"

65
databases/redis/README.md Normal file
View File

@ -0,0 +1,65 @@
# Redis
This project illustrates how to send multiple cache requests to Redis in bulk, asynchronously. This approach resembles traditional Redis pipelining. [See here for more details about this approach.](https://github.com/benashford/redis-async-rs/issues/19#issuecomment-412208018)
## Start Server
```sh
cd database_interactions/redis
cargo run
```
## Endpoints
### `POST /stuff`
To test the demo, POST a json object containing three strings to the `/stuff` endpoint:
```json
{
"one": "first entry",
"two": "second entry",
"three": "third entry"
}
```
These three entries will cache to redis, keyed accordingly.
Using [HTTPie]:
```sh
http :8080/stuff one="first entry" two="second entry" three="third entry"
```
Using [cURL]:
```sh
curl localhost:8080/stuff -H 'content-type: application/json' -d '{"one":"first entry","two":"second entry","three":"third entry"}'
```
### `DELETE /stuff`
To delete these, simply issue a DELETE http request to /stuff endpoint
Using [HTTPie]:
```sh
http DELETE :8080/stuff
```
Using [cURL]:
```sh
curl -XDELETE 127.0.0.1:8080/stuff
```
## Verify Redis Contents
At any time, verify the contents of Redis using its CLI:
```sh
echo "MGET mydomain:one mydomain:two mydomain:three" | redis-cli
```
[HTTPie]: https://httpie.org
[cURL]: https://curl.haxx.se

View File

@ -0,0 +1,96 @@
use actix::prelude::*;
use actix_redis::{Command, RedisActor};
use actix_web::{error, middleware, web, App, HttpResponse, HttpServer};
use futures_util::future::try_join_all;
use redis_async::{resp::RespValue, resp_array};
use serde::Deserialize;
#[derive(Deserialize)]
pub struct CacheInfo {
one: String,
two: String,
three: String,
}
async fn cache_stuff(
info: web::Json<CacheInfo>,
redis: web::Data<Addr<RedisActor>>,
) -> actix_web::Result<HttpResponse> {
let info = info.into_inner();
let one = redis.send(Command(resp_array!["SET", "mydomain:one", info.one]));
let two = redis.send(Command(resp_array!["SET", "mydomain:two", info.two]));
let three = redis.send(Command(resp_array!["SET", "mydomain:three", info.three]));
// Asynchronously collects the results of the futures given. The returned future will drive
// execution for all of its underlying futures, collecting the results into a destination
// `Vec<RespValue>` in the same order as they were provided. If any future returns an error then
// all other futures will be canceled and an error will be returned immediately. If all futures
// complete successfully, however, then the returned future will succeed with a `Vec` of all the
// successful results.
let res = try_join_all([one, two, three])
.await
.map_err(error::ErrorInternalServerError)?
.into_iter()
.map(|item| item.map_err(error::ErrorInternalServerError))
.collect::<Result<Vec<_>, _>>()?;
// successful operations return "OK", so confirm that all returned as so
if res
.iter()
.all(|res| matches!(res, RespValue::SimpleString(x) if x == "OK"))
{
Ok(HttpResponse::Ok().body("successfully cached values"))
} else {
Ok(HttpResponse::InternalServerError().finish())
}
}
async fn del_stuff(
redis: web::Data<Addr<RedisActor>>,
) -> actix_web::Result<HttpResponse> {
let res = redis
.send(Command(resp_array![
"DEL",
"mydomain:one",
"mydomain:two",
"mydomain:three"
]))
.await
.map_err(error::ErrorInternalServerError)?
.map_err(error::ErrorInternalServerError)?;
match res {
RespValue::Integer(x) if x == 3 => {
Ok(HttpResponse::Ok().body("successfully deleted values"))
}
_ => {
log::error!("{:?}", res);
Ok(HttpResponse::InternalServerError().finish())
}
}
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
log::info!("starting HTTP server at http://localhost:8080");
HttpServer::new(|| {
let redis_addr = RedisActor::start("127.0.0.1:6379");
App::new()
.app_data(web::Data::new(redis_addr))
.wrap(middleware::Logger::default())
.service(
web::resource("/stuff")
.route(web::post().to(cache_stuff))
.route(web::delete().to(del_stuff)),
)
})
.bind(("127.0.0.1", 8080))?
.run()
.await
}

2
databases/sqlite/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
weather.db
weather.db-*

View File

@ -0,0 +1,16 @@
[package]
name = "async_db"
version = "1.0.0"
edition = "2021"
[dependencies]
actix-web = "4.0.0-rc.2"
env_logger = "0.9"
futures-util = { version = "0.3", default-features = false, features = ["std"] }
log = "0.4"
r2d2 = "0.8"
r2d2_sqlite = "0.19"
rusqlite = "0.26"
serde = { version = "1", features = ["derive"] }
serde_json = "1"

View File

@ -0,0 +1,47 @@
Getting started using databases with Actix Web, asynchronously.
## Usage
### init database sqlite
From the root directory of this project:
```bash
bash db/setup_db.sh
```
This creates a sqlite database, weather.db, in the root.
### server
```bash
# if ubuntu : sudo apt-get install libsqlite3-dev
# if fedora : sudo dnf install libsqlite3x-devel
cargo run (or ``cargo watch -x run``)
# Started http server: 127.0.0.1:8080
```
### web client
[http://127.0.0.1:8080/asyncio_weather](http://127.0.0.1:8080/asyncio_weather)
[http://127.0.0.1:8080/parallel_weather](http://127.0.0.1:8080/parallel_weather)
### sqlite client
```bash
# if ubuntu : sudo apt-get install sqlite3
# if fedora : sudo dnf install sqlite3x
sqlite3 weather.db
sqlite> .tables
sqlite> select * from nyc_weather;
```
## Dependencies
On Ubuntu 19.10:
```
sudo apt install libsqlite3-dev
```

Binary file not shown.

View File

@ -0,0 +1,5 @@
This directory includes weather information obtained from NOAA for NYC Central Park: https://www.ncdc.noaa.gov/cdo-web/
# Setup Instructions
Set up a sqlite3 database by executing the setup_db.sh file: `bash sqlite_db.sh`

28
databases/sqlite/db/db.sql Executable file
View File

@ -0,0 +1,28 @@
CREATE TABLE nyc_weather(
STATION TEXT,
NAME TEXT,
DATE TEXT,
ACMH DOUBLE,
AWND DOUBLE,
FMTM DOUBLE,
PGTM DOUBLE,
PRCP DOUBLE,
PSUN DOUBLE,
SNOW DOUBLE,
SNWD DOUBLE,
TAVG DOUBLE,
TMAX DOUBLE,
TMIN DOUBLE,
TSUN DOUBLE,
WDF1 DOUBLE,
WDF2 DOUBLE,
WDF5 DOUBLE,
WDFG DOUBLE,
WDFM DOUBLE,
WSF1 DOUBLE,
WSF2 DOUBLE,
WSF5 DOUBLE,
WSFG DOUBLE,
WSFM DOUBLE
);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,4 @@
#!/usr/bin/env bash
cd $(dirname "$0")
sqlite3 ../weather.db < db.sql
sqlite3 -csv ../weather.db ".import nyc_centralpark_weather.csv nyc_weather"

123
databases/sqlite/src/db.rs Normal file
View File

@ -0,0 +1,123 @@
use actix_web::{error, web, Error};
use rusqlite::Statement;
use serde::{Deserialize, Serialize};
use std::{thread::sleep, time::Duration};
pub type Pool = r2d2::Pool<r2d2_sqlite::SqliteConnectionManager>;
pub type Connection = r2d2::PooledConnection<r2d2_sqlite::SqliteConnectionManager>;
type WeatherAggResult = Result<Vec<WeatherAgg>, rusqlite::Error>;
#[derive(Debug, Serialize, Deserialize)]
pub enum WeatherAgg {
AnnualAgg { year: i32, total: f64 },
MonthAgg { year: i32, month: i32, total: f64 },
}
#[allow(clippy::enum_variant_names)]
pub enum Queries {
GetTopTenHottestYears,
GetTopTenColdestYears,
GetTopTenHottestMonths,
GetTopTenColdestMonths,
}
pub async fn execute(pool: &Pool, query: Queries) -> Result<Vec<WeatherAgg>, Error> {
let pool = pool.clone();
let conn = web::block(move || pool.get())
.await?
.map_err(error::ErrorInternalServerError)?;
web::block(move || {
// simulate an expensive query, see comments at top of main.rs
sleep(Duration::from_secs(2));
match query {
Queries::GetTopTenHottestYears => get_hottest_years(conn),
Queries::GetTopTenColdestYears => get_coldest_years(conn),
Queries::GetTopTenHottestMonths => get_hottest_months(conn),
Queries::GetTopTenColdestMonths => get_coldest_months(conn),
}
})
.await?
.map_err(error::ErrorInternalServerError)
}
fn get_hottest_years(conn: Connection) -> WeatherAggResult {
let stmt = conn.prepare(
"
SELECT cast(strftime('%Y', date) as int) as theyear,
sum(tmax) as total
FROM nyc_weather
WHERE tmax <> 'TMAX'
GROUP BY theyear
ORDER BY total DESC LIMIT 10",
)?;
get_rows_as_annual_agg(stmt)
}
fn get_coldest_years(conn: Connection) -> WeatherAggResult {
let stmt = conn.prepare(
"
SELECT cast(strftime('%Y', date) as int) as theyear,
sum(tmax) as total
FROM nyc_weather
WHERE tmax <> 'TMAX'
GROUP BY theyear
ORDER BY total ASC LIMIT 10",
)?;
get_rows_as_annual_agg(stmt)
}
fn get_rows_as_annual_agg(mut statement: Statement) -> WeatherAggResult {
statement
.query_map([], |row| {
Ok(WeatherAgg::AnnualAgg {
year: row.get(0)?,
total: row.get(1)?,
})
})
.and_then(Iterator::collect)
}
fn get_hottest_months(conn: Connection) -> WeatherAggResult {
let stmt = conn.prepare(
"SELECT cast(strftime('%Y', date) as int) as theyear,
cast(strftime('%m', date) as int) as themonth,
sum(tmax) as total
FROM nyc_weather
WHERE tmax <> 'TMAX'
GROUP BY theyear, themonth
ORDER BY total DESC LIMIT 10",
)?;
get_rows_as_month_agg(stmt)
}
fn get_coldest_months(conn: Connection) -> WeatherAggResult {
let stmt = conn.prepare(
"SELECT cast(strftime('%Y', date) as int) as theyear,
cast(strftime('%m', date) as int) as themonth,
sum(tmax) as total
FROM nyc_weather
WHERE tmax <> 'TMAX'
GROUP BY theyear, themonth
ORDER BY total ASC LIMIT 10",
)?;
get_rows_as_month_agg(stmt)
}
fn get_rows_as_month_agg(mut statement: Statement) -> WeatherAggResult {
statement
.query_map([], |row| {
Ok(WeatherAgg::MonthAgg {
year: row.get(0)?,
month: row.get(1)?,
total: row.get(2)?,
})
})
.and_then(Iterator::collect)
}

View File

@ -0,0 +1,78 @@
//! Actix Web Asynchronous Database Example
//!
//! This project illustrates expensive and blocking database requests that runs
//! in a thread-pool using `web::block` with two examples:
//!
//! 1. An asynchronous handler that executes 4 queries in *sequential order*,
//! collecting the results and returning them as a single serialized json object
//!
//! 2. An asynchronous handler that executes 4 queries in *parallel*,
//! collecting the results and returning them as a single serialized json object
//!
//! Note: The use of sleep(Duration::from_secs(2)); in db.rs is to make performance
//! improvement with parallelism more obvious.
use std::io;
use actix_web::{middleware, web, App, Error as AWError, HttpResponse, HttpServer};
use futures_util::future::join_all;
use r2d2_sqlite::{self, SqliteConnectionManager};
mod db;
use db::{Pool, Queries};
/// Version 1: Calls 4 queries in sequential order, as an asynchronous handler
async fn asyncio_weather(db: web::Data<Pool>) -> Result<HttpResponse, AWError> {
let result = vec![
db::execute(&db, Queries::GetTopTenHottestYears).await?,
db::execute(&db, Queries::GetTopTenColdestYears).await?,
db::execute(&db, Queries::GetTopTenHottestMonths).await?,
db::execute(&db, Queries::GetTopTenColdestMonths).await?,
];
Ok(HttpResponse::Ok().json(result))
}
/// Version 2: Calls 4 queries in parallel, as an asynchronous handler
/// Returning Error types turn into None values in the response
async fn parallel_weather(db: web::Data<Pool>) -> Result<HttpResponse, AWError> {
let fut_result = vec![
Box::pin(db::execute(&db, Queries::GetTopTenHottestYears)),
Box::pin(db::execute(&db, Queries::GetTopTenColdestYears)),
Box::pin(db::execute(&db, Queries::GetTopTenHottestMonths)),
Box::pin(db::execute(&db, Queries::GetTopTenColdestMonths)),
];
let result: Result<Vec<_>, _> = join_all(fut_result).await.into_iter().collect();
Ok(HttpResponse::Ok().json(result.map_err(AWError::from)?))
}
#[actix_web::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
// connect to SQLite DB
let manager = SqliteConnectionManager::file("weather.db");
let pool = Pool::new(manager).unwrap();
log::info!("starting HTTP server at http://localhost:8080");
// start HTTP server
HttpServer::new(move || {
App::new()
// store db pool as Data object
.app_data(web::Data::new(pool.clone()))
.wrap(middleware::Logger::default())
.service(
web::resource("/asyncio_weather").route(web::get().to(asyncio_weather)),
)
.service(
web::resource("/parallel_weather")
.route(web::get().to(parallel_weather)),
)
})
.bind(("127.0.0.1", 8080))?
.workers(2)
.run()
.await
}