mirror of
https://github.com/actix/examples
synced 2025-06-26 17:17:42 +02:00
restructure folders
This commit is contained in:
14
databases/redis/Cargo.toml
Normal file
14
databases/redis/Cargo.toml
Normal file
@ -0,0 +1,14 @@
|
||||
[package]
|
||||
name = "actix_redis"
|
||||
version = "1.0.0"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
actix = "0.12"
|
||||
actix-web = "4.0.0-rc.3"
|
||||
actix-redis = "0.10.0-beta.6"
|
||||
futures-util = { version = "0.3.7", default-features = false, features = ["std"] }
|
||||
log = "0.4"
|
||||
redis-async = { version = "0.8", default_features = false, features = ["tokio10"] }
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
env_logger = "0.9"
|
65
databases/redis/README.md
Normal file
65
databases/redis/README.md
Normal file
@ -0,0 +1,65 @@
|
||||
# Redis
|
||||
|
||||
This project illustrates how to send multiple cache requests to Redis in bulk, asynchronously. This approach resembles traditional Redis pipelining. [See here for more details about this approach.](https://github.com/benashford/redis-async-rs/issues/19#issuecomment-412208018)
|
||||
|
||||
## Start Server
|
||||
|
||||
```sh
|
||||
cd database_interactions/redis
|
||||
cargo run
|
||||
```
|
||||
|
||||
## Endpoints
|
||||
|
||||
### `POST /stuff`
|
||||
|
||||
To test the demo, POST a json object containing three strings to the `/stuff` endpoint:
|
||||
|
||||
```json
|
||||
{
|
||||
"one": "first entry",
|
||||
"two": "second entry",
|
||||
"three": "third entry"
|
||||
}
|
||||
```
|
||||
|
||||
These three entries will cache to redis, keyed accordingly.
|
||||
|
||||
Using [HTTPie]:
|
||||
|
||||
```sh
|
||||
http :8080/stuff one="first entry" two="second entry" three="third entry"
|
||||
```
|
||||
|
||||
Using [cURL]:
|
||||
|
||||
```sh
|
||||
curl localhost:8080/stuff -H 'content-type: application/json' -d '{"one":"first entry","two":"second entry","three":"third entry"}'
|
||||
```
|
||||
|
||||
### `DELETE /stuff`
|
||||
|
||||
To delete these, simply issue a DELETE http request to /stuff endpoint
|
||||
|
||||
Using [HTTPie]:
|
||||
|
||||
```sh
|
||||
http DELETE :8080/stuff
|
||||
```
|
||||
|
||||
Using [cURL]:
|
||||
|
||||
```sh
|
||||
curl -XDELETE 127.0.0.1:8080/stuff
|
||||
```
|
||||
|
||||
## Verify Redis Contents
|
||||
|
||||
At any time, verify the contents of Redis using its CLI:
|
||||
|
||||
```sh
|
||||
echo "MGET mydomain:one mydomain:two mydomain:three" | redis-cli
|
||||
```
|
||||
|
||||
[HTTPie]: https://httpie.org
|
||||
[cURL]: https://curl.haxx.se
|
96
databases/redis/src/main.rs
Normal file
96
databases/redis/src/main.rs
Normal file
@ -0,0 +1,96 @@
|
||||
use actix::prelude::*;
|
||||
use actix_redis::{Command, RedisActor};
|
||||
use actix_web::{error, middleware, web, App, HttpResponse, HttpServer};
|
||||
use futures_util::future::try_join_all;
|
||||
use redis_async::{resp::RespValue, resp_array};
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct CacheInfo {
|
||||
one: String,
|
||||
two: String,
|
||||
three: String,
|
||||
}
|
||||
|
||||
async fn cache_stuff(
|
||||
info: web::Json<CacheInfo>,
|
||||
redis: web::Data<Addr<RedisActor>>,
|
||||
) -> actix_web::Result<HttpResponse> {
|
||||
let info = info.into_inner();
|
||||
|
||||
let one = redis.send(Command(resp_array!["SET", "mydomain:one", info.one]));
|
||||
let two = redis.send(Command(resp_array!["SET", "mydomain:two", info.two]));
|
||||
let three = redis.send(Command(resp_array!["SET", "mydomain:three", info.three]));
|
||||
|
||||
// Asynchronously collects the results of the futures given. The returned future will drive
|
||||
// execution for all of its underlying futures, collecting the results into a destination
|
||||
// `Vec<RespValue>` in the same order as they were provided. If any future returns an error then
|
||||
// all other futures will be canceled and an error will be returned immediately. If all futures
|
||||
// complete successfully, however, then the returned future will succeed with a `Vec` of all the
|
||||
// successful results.
|
||||
let res = try_join_all([one, two, three])
|
||||
.await
|
||||
.map_err(error::ErrorInternalServerError)?
|
||||
.into_iter()
|
||||
.map(|item| item.map_err(error::ErrorInternalServerError))
|
||||
.collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
// successful operations return "OK", so confirm that all returned as so
|
||||
if res
|
||||
.iter()
|
||||
.all(|res| matches!(res, RespValue::SimpleString(x) if x == "OK"))
|
||||
{
|
||||
Ok(HttpResponse::Ok().body("successfully cached values"))
|
||||
} else {
|
||||
Ok(HttpResponse::InternalServerError().finish())
|
||||
}
|
||||
}
|
||||
|
||||
async fn del_stuff(
|
||||
redis: web::Data<Addr<RedisActor>>,
|
||||
) -> actix_web::Result<HttpResponse> {
|
||||
let res = redis
|
||||
.send(Command(resp_array![
|
||||
"DEL",
|
||||
"mydomain:one",
|
||||
"mydomain:two",
|
||||
"mydomain:three"
|
||||
]))
|
||||
.await
|
||||
.map_err(error::ErrorInternalServerError)?
|
||||
.map_err(error::ErrorInternalServerError)?;
|
||||
|
||||
match res {
|
||||
RespValue::Integer(x) if x == 3 => {
|
||||
Ok(HttpResponse::Ok().body("successfully deleted values"))
|
||||
}
|
||||
|
||||
_ => {
|
||||
log::error!("{:?}", res);
|
||||
Ok(HttpResponse::InternalServerError().finish())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[actix_web::main]
|
||||
async fn main() -> std::io::Result<()> {
|
||||
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
|
||||
|
||||
log::info!("starting HTTP server at http://localhost:8080");
|
||||
|
||||
HttpServer::new(|| {
|
||||
let redis_addr = RedisActor::start("127.0.0.1:6379");
|
||||
|
||||
App::new()
|
||||
.app_data(web::Data::new(redis_addr))
|
||||
.wrap(middleware::Logger::default())
|
||||
.service(
|
||||
web::resource("/stuff")
|
||||
.route(web::post().to(cache_stuff))
|
||||
.route(web::delete().to(del_stuff)),
|
||||
)
|
||||
})
|
||||
.bind(("127.0.0.1", 8080))?
|
||||
.run()
|
||||
.await
|
||||
}
|
Reference in New Issue
Block a user