1
0
mirror of https://github.com/actix/examples synced 2024-11-27 16:02:57 +01:00

chore: move and document rate limit middleware

This commit is contained in:
Rob Ede 2023-10-29 23:47:02 +00:00
parent cf55f50d1d
commit 5d36d72976
No known key found for this signature in database
GPG Key ID: 97C636207D3EF933
7 changed files with 84 additions and 53 deletions

40
Cargo.lock generated
View File

@ -141,13 +141,13 @@ dependencies = [
[[package]]
name = "actix-governor"
version = "0.4.1"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46ff2d40f2bc627b8054c5e20fa6b0b0cf9428699b54bd41634e9ae3098ad555"
checksum = "a2e7b88f3804e01bd4191fdb08650430bbfcb43d3d9b2890064df3551ec7d25b"
dependencies = [
"actix-http",
"actix-web",
"futures 0.3.28",
"futures 0.3.29",
"governor",
]
@ -2622,10 +2622,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856"
dependencies = [
"cfg-if 1.0.0",
"hashbrown 0.14.0",
"lock_api 0.4.10",
"hashbrown 0.14.2",
"lock_api 0.4.11",
"once_cell",
"parking_lot_core 0.9.8",
"parking_lot_core 0.9.9",
]
[[package]]
@ -3573,20 +3573,20 @@ dependencies = [
[[package]]
name = "governor"
version = "0.5.1"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c390a940a5d157878dd057c78680a33ce3415bcd05b4799509ea44210914b4d5"
checksum = "821239e5672ff23e2a7060901fa622950bbd80b649cdaadd78d1c1767ed14eb4"
dependencies = [
"cfg-if 1.0.0",
"dashmap",
"futures 0.3.28",
"futures 0.3.29",
"futures-timer",
"no-std-compat",
"nonzero_ext",
"parking_lot 0.12.1",
"quanta",
"rand 0.8.5",
"smallvec 1.11.0",
"smallvec 1.11.1",
]
[[package]]
@ -4610,10 +4610,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c41e0c4fef86961ac6d6f8a82609f55f31b05e4fce149ac5710e439df7619ba4"
[[package]]
name = "mach"
version = "0.3.2"
name = "mach2"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b823e83b2affd8f40a9ee8c29dbc56404c1e34cd2710921f2801e2cf29527afa"
checksum = "6d0d1830bcd151a6fc4aea1369af235b36c1528fe976b8ff678683c9995eade8"
dependencies = [
"libc",
]
@ -5858,16 +5858,16 @@ dependencies = [
[[package]]
name = "quanta"
version = "0.9.3"
version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "20afe714292d5e879d8b12740aa223c6a88f118af41870e8b6196e39a02238a8"
checksum = "a17e662a7a8291a865152364c20c7abc5e60486ab2001e8ec10b24862de0b9ab"
dependencies = [
"crossbeam-utils 0.8.16",
"libc",
"mach",
"mach2",
"once_cell",
"raw-cpuid",
"wasi 0.10.2+wasi-snapshot-preview1",
"wasi 0.11.0+wasi-snapshot-preview1",
"web-sys",
"winapi 0.3.9",
]
@ -8748,12 +8748,6 @@ version = "0.9.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
[[package]]
name = "wasi"
version = "0.10.2+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6"
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"

View File

@ -45,7 +45,7 @@ members = [
"middleware/middleware-ext-mut",
"middleware/middleware-http-to-https",
"middleware/middleware",
"middleware/middleware-rate-limit",
"middleware/rate-limit",
"protobuf",
"run-in-thread",
"server-sent-events",

View File

@ -1,13 +1,13 @@
[package]
name = "middleware-rate-limit"
version = "1.0.0"
edition = "2021"
publish.workspace = true
edition.workspace = true
[dependencies]
actix-governor = "0.5"
actix-web.workspace = true
log.workspace = true
chrono.workspace = true
env_logger.workspace = true
futures-util.workspace = true
chrono.workspace = true
actix-governor = "0.4"
log.workspace = true

View File

@ -0,0 +1,21 @@
# Middleware: Rate Limiting
This example showcases two middleware that achieve rate limiting for your API endpoints. One uses a simple leaky-bucket implementation and the other delegates to [`actix-governor`].
## Usage
```sh
cd middleware/rate-limit
cargo run
```
Look in `src/rate_limit.rs` to see the leaky-bucket implementation.
## Routes
- [GET /test/simple](http://localhost:8080/test/simple) - uses the hand-written leaky-bucket rate limiting.
- [GET /test/governor](http://localhost:8080/test/governor) - uses [`actix-governor`].
Calling either of these endpoints too frequently will result in a 429 Too Many Requests response.
[`actix-governor`]: https://crates.io/crates/actix-governor

View File

@ -17,7 +17,7 @@ async fn index() -> HttpResponse {
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
let limit_cfg = GovernorConfigBuilder::default()
let governor_config = GovernorConfigBuilder::default()
.per_second(10)
.burst_size(2)
.finish()
@ -27,10 +27,9 @@ async fn main() -> io::Result<()> {
HttpServer::new(move || {
App::new()
.wrap(middleware::Logger::default())
.service(
web::resource("/test/governor")
.wrap(Governor::new(&limit_cfg))
.wrap(Governor::new(&governor_config))
.route(web::get().to(index)),
)
.service(
@ -38,7 +37,10 @@ async fn main() -> io::Result<()> {
.wrap(rate_limit::RateLimit::new(2))
.route(web::get().to(index)),
)
.wrap(middleware::NormalizePath::trim())
.wrap(middleware::Logger::default())
})
.workers(2)
.bind(("127.0.0.1", 8080))?
.run()
.await

View File

@ -1,15 +1,18 @@
use std::cell::RefCell;
use std::cmp::min;
use std::future::{ready, Ready};
//! Simple leaky-bucket rate-limiter.
use std::{
cell::RefCell,
cmp::min,
future::{ready, Ready},
};
use actix_web::body::EitherBody;
use actix_web::{
dev,
dev::{Service, ServiceRequest, ServiceResponse, Transform},
body::EitherBody,
dev::{forward_ready, Service, ServiceRequest, ServiceResponse, Transform},
Error, HttpResponse,
};
use chrono::{Local, NaiveDateTime};
use futures_util::future::LocalBoxFuture;
use futures_util::{future::LocalBoxFuture, FutureExt, TryFutureExt};
#[doc(hidden)]
pub struct RateLimitService<S> {
@ -27,35 +30,38 @@ where
type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
dev::forward_ready!(service);
forward_ready!(service);
fn call(&self, req: ServiceRequest) -> Self::Future {
log::info!("request is passing through the AddMsg middleware");
req.uri().path();
// if be limited
if !self.token_bucket.borrow_mut().allow_query() {
// request has been rate limited
return Box::pin(async {
Ok(req.into_response(
HttpResponse::TooManyRequests()
.body("")
.finish()
.map_into_right_body(),
))
});
}
let fut = self.service.call(req);
Box::pin(async move { fut.await.map(ServiceResponse::map_into_left_body) })
self.service
.call(req)
.map_ok(ServiceResponse::map_into_left_body)
.boxed_local()
}
}
#[derive(Clone, Debug)]
pub struct RateLimit {
// limit in 10s
/// Request limit for 10 second period.
limit: u64,
}
impl RateLimit {
/// Constructs new rate limiter.
pub fn new(limit: u64) -> Self {
Self { limit }
}
@ -82,36 +88,44 @@ where
}
struct TokenBucket {
// limit in ten sec
/// Request limit for 10 second period.
limit: u64,
last_query_time: NaiveDateTime,
// max query number in ten sec,in this case equal limit
/// Max number of requests for 10 second period, in this case equal to limit.
capacity: u64,
// numbers of token,default equal capacity
/// Time that last request was accepted.
last_req_time: NaiveDateTime,
/// Numbers of tokens remaining.
///
/// Initialized equal to capacity.
tokens: u64,
}
impl TokenBucket {
/// Constructs new leaky bucket.
fn new(limit: u64) -> Self {
TokenBucket {
limit,
last_query_time: Default::default(),
last_req_time: NaiveDateTime::UNIX_EPOCH,
capacity: limit,
tokens: 0,
}
}
/// Mutates leaky bucket for accepted request.
fn allow_query(&mut self) -> bool {
let current_time = Local::now().naive_local();
let time_elapsed = (current_time.timestamp() - self.last_query_time.timestamp()) as u64;
let time_elapsed = (current_time.timestamp() - self.last_req_time.timestamp()) as u64;
let tokens_to_add = time_elapsed * self.limit / 10;
self.tokens = min(self.tokens + tokens_to_add, self.capacity);
if self.tokens > 0 {
self.last_query_time = current_time;
self.last_req_time = current_time;
self.tokens -= 1;
true
} else {