1
0
mirror of https://github.com/actix/examples synced 2025-06-26 17:17:42 +02:00

chore: move and document rate limit middleware

This commit is contained in:
Rob Ede
2023-10-29 23:47:02 +00:00
parent cf55f50d1d
commit 5d36d72976
7 changed files with 84 additions and 53 deletions

View File

@ -0,0 +1,13 @@
[package]
name = "middleware-rate-limit"
version = "1.0.0"
publish.workspace = true
edition.workspace = true
[dependencies]
actix-governor = "0.5"
actix-web.workspace = true
chrono.workspace = true
env_logger.workspace = true
futures-util.workspace = true
log.workspace = true

View File

@ -0,0 +1,21 @@
# Middleware: Rate Limiting
This example showcases two middleware that achieve rate limiting for your API endpoints. One uses a simple leaky-bucket implementation and the other delegates to [`actix-governor`].
## Usage
```sh
cd middleware/rate-limit
cargo run
```
Look in `src/rate_limit.rs` to see the leaky-bucket implementation.
## Routes
- [GET /test/simple](http://localhost:8080/test/simple) - uses the hand-written leaky-bucket rate limiting.
- [GET /test/governor](http://localhost:8080/test/governor) - uses [`actix-governor`].
Calling either of these endpoints too frequently will result in a 429 Too Many Requests response.
[`actix-governor`]: https://crates.io/crates/actix-governor

View File

@ -0,0 +1,47 @@
use std::io;
use actix_governor::{Governor, GovernorConfigBuilder};
use actix_web::{
middleware,
web::{self},
App, HttpResponse, HttpServer,
};
mod rate_limit;
async fn index() -> HttpResponse {
HttpResponse::Ok().body("succeed")
}
#[actix_web::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
let governor_config = GovernorConfigBuilder::default()
.per_second(10)
.burst_size(2)
.finish()
.unwrap();
log::info!("starting HTTP server at http://localhost:8080");
HttpServer::new(move || {
App::new()
.service(
web::resource("/test/governor")
.wrap(Governor::new(&governor_config))
.route(web::get().to(index)),
)
.service(
web::resource("/test/simple")
.wrap(rate_limit::RateLimit::new(2))
.route(web::get().to(index)),
)
.wrap(middleware::NormalizePath::trim())
.wrap(middleware::Logger::default())
})
.workers(2)
.bind(("127.0.0.1", 8080))?
.run()
.await
}

View File

@ -0,0 +1,135 @@
//! Simple leaky-bucket rate-limiter.
use std::{
cell::RefCell,
cmp::min,
future::{ready, Ready},
};
use actix_web::{
body::EitherBody,
dev::{forward_ready, Service, ServiceRequest, ServiceResponse, Transform},
Error, HttpResponse,
};
use chrono::{Local, NaiveDateTime};
use futures_util::{future::LocalBoxFuture, FutureExt, TryFutureExt};
#[doc(hidden)]
pub struct RateLimitService<S> {
service: S,
token_bucket: RefCell<TokenBucket>,
}
impl<S, B> Service<ServiceRequest> for RateLimitService<S>
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = Error>,
S::Future: 'static,
B: 'static,
{
type Response = ServiceResponse<EitherBody<B>>;
type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
forward_ready!(service);
fn call(&self, req: ServiceRequest) -> Self::Future {
log::info!("request is passing through the AddMsg middleware");
if !self.token_bucket.borrow_mut().allow_query() {
// request has been rate limited
return Box::pin(async {
Ok(req.into_response(
HttpResponse::TooManyRequests()
.finish()
.map_into_right_body(),
))
});
}
self.service
.call(req)
.map_ok(ServiceResponse::map_into_left_body)
.boxed_local()
}
}
#[derive(Clone, Debug)]
pub struct RateLimit {
/// Request limit for 10 second period.
limit: u64,
}
impl RateLimit {
/// Constructs new rate limiter.
pub fn new(limit: u64) -> Self {
Self { limit }
}
}
impl<S, B> Transform<S, ServiceRequest> for RateLimit
where
S: Service<ServiceRequest, Response = ServiceResponse<B>, Error = actix_web::Error>,
S::Future: 'static,
B: 'static,
{
type Response = ServiceResponse<EitherBody<B>>;
type Error = Error;
type Transform = RateLimitService<S>;
type InitError = ();
type Future = Ready<Result<Self::Transform, Self::InitError>>;
fn new_transform(&self, service: S) -> Self::Future {
ready(Ok(RateLimitService {
service,
token_bucket: RefCell::new(TokenBucket::new(self.limit)),
}))
}
}
struct TokenBucket {
/// Request limit for 10 second period.
limit: u64,
/// Max number of requests for 10 second period, in this case equal to limit.
capacity: u64,
/// Time that last request was accepted.
last_req_time: NaiveDateTime,
/// Numbers of tokens remaining.
///
/// Initialized equal to capacity.
tokens: u64,
}
impl TokenBucket {
/// Constructs new leaky bucket.
fn new(limit: u64) -> Self {
TokenBucket {
limit,
last_req_time: NaiveDateTime::UNIX_EPOCH,
capacity: limit,
tokens: 0,
}
}
/// Mutates leaky bucket for accepted request.
fn allow_query(&mut self) -> bool {
let current_time = Local::now().naive_local();
let time_elapsed = (current_time.timestamp() - self.last_req_time.timestamp()) as u64;
let tokens_to_add = time_elapsed * self.limit / 10;
self.tokens = min(self.tokens + tokens_to_add, self.capacity);
if self.tokens > 0 {
self.last_req_time = current_time;
self.tokens -= 1;
true
} else {
false
}
}
}