diff --git a/Cargo.toml b/Cargo.toml index b8323a08..c296bc23 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,6 +19,7 @@ members = [ "juniper", "middleware", "multipart", + "multipart-s3", "multipart-async-std", "openssl", "protobuf", diff --git a/multipart-s3/.env.example b/multipart-s3/.env.example new file mode 100644 index 00000000..92483c4a --- /dev/null +++ b/multipart-s3/.env.example @@ -0,0 +1,3 @@ +AWS_ACCESS_KEY_ID= +AWS_SECRET_ACCESS_KEY= +AWS_S3_BUCKET_NAME= \ No newline at end of file diff --git a/multipart-s3/.gitignore b/multipart-s3/.gitignore new file mode 100644 index 00000000..2f62c534 --- /dev/null +++ b/multipart-s3/.gitignore @@ -0,0 +1,3 @@ +/target +.env +/tmp \ No newline at end of file diff --git a/multipart-s3/Cargo.toml b/multipart-s3/Cargo.toml new file mode 100644 index 00000000..940f300a --- /dev/null +++ b/multipart-s3/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "multipart-s3" +version = "0.1.0" +authors = ["cheolgyu <38715510+cheolgyu@users.noreply.github.com>"] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +futures = "0.3.1" +actix-multipart = "0.2.0" +actix-web = "2.0.0" +actix-rt = "1.0.0" +rusoto_s3 = "0.42.0" +rusoto_core = "0.42.0" +bytes = { version = "0.5", features = ["serde"] } +serde = { version = "1.0.104", features=["derive"] } +serde_json = "1.0" +serde-value = "0.6.0" +dotenv = "0.15.0" \ No newline at end of file diff --git a/multipart-s3/README.md b/multipart-s3/README.md new file mode 100644 index 00000000..c33f72a8 --- /dev/null +++ b/multipart-s3/README.md @@ -0,0 +1,25 @@ +# multipart+s3 +Upload a file in multipart form to aws s3(https://github.com/rusoto/rusoto) +Receive multiple data in multipart form in JSON format and receive it as a struct. + +# usage +``` +cd examples/multipart+s3 +``` +1. copy .env.example .env +2. edit .env AWS_ACCESS_KEY_ID=you_key +3. edit .env AWS_SECRET_ACCESS_KEY=you_key +4. edit .env AWS_S3_BUCKET_NAME=you_key + + +# Running Server +``` +cd examples/multipart+s3 +cargo run (or ``cargo watch -x run``) +``` +http://localhost:3000 + + +# using other regions +https://www.rusoto.org/regions.html +https://docs.rs/rusoto_core/0.42.0/rusoto_core/enum.Region.html diff --git a/multipart-s3/src/main.rs b/multipart-s3/src/main.rs new file mode 100644 index 00000000..31e9db34 --- /dev/null +++ b/multipart-s3/src/main.rs @@ -0,0 +1,113 @@ +use std::io::Write; +extern crate dotenv; +use actix_multipart::Multipart; +use actix_web::{middleware, web, App, Error, HttpResponse, HttpServer}; +use dotenv::dotenv; +use futures::StreamExt; +mod utils; + +use std::borrow::BorrowMut; +use std::env; +use utils::upload::{ + delete_object, save_file as upload_save_file, split_payload, UplodFile, +}; +extern crate rusoto_core; +extern crate rusoto_s3; + +mod model { + use serde::{Deserialize, Serialize}; + #[derive(Deserialize, Serialize, Debug)] + pub struct InpAdd { + pub text: String, + pub number: i32, + } +} + +async fn save_file(mut payload: Multipart) -> Result { + let pl = split_payload(payload.borrow_mut()).await; + println!("bytes={:#?}", pl.0); + let mut inp_info: model::InpAdd = serde_json::from_slice(&pl.0).unwrap(); + println!("converter_struct={:#?}", inp_info); + println!("tmpfiles={:#?}", pl.1); + //make key + let s3_upload_key = format!("projects/{}/", "posts_id"); + //create tmp file and upload s3 and remove tmp file + let upload_files: Vec = + upload_save_file(pl.1, s3_upload_key).await.unwrap(); + println!("upload_files={:#?}", upload_files); + Ok(HttpResponse::Ok().into()) +} + +fn index() -> HttpResponse { + let html = r#" + Upload Test + +
+ + + + +
+ + + + "#; + + HttpResponse::Ok().body(html) +} + +#[actix_rt::main] +async fn main() -> std::io::Result<()> { + dotenv().ok(); + let AWS_ACCESS_KEY_ID = + env::var("AWS_ACCESS_KEY_ID").expect("DATABASE_URL must be set"); + let AWS_SECRET_ACCESS_KEY = + env::var("AWS_SECRET_ACCESS_KEY").expect("AWS_SECRET_ACCESS_KEY must be set"); + let AWS_S3_BUCKET_NAME = + env::var("AWS_S3_BUCKET_NAME").expect("AWS_S3_BUCKET_NAME must be set"); + + println!("{}", AWS_ACCESS_KEY_ID); + println!("{}", AWS_SECRET_ACCESS_KEY); + println!("{}", AWS_S3_BUCKET_NAME); + + std::env::set_var("RUST_LOG", "actix_server=info,actix_web=info"); + std::fs::create_dir_all("./tmp").unwrap(); + + let ip = "0.0.0.0:3000"; + + HttpServer::new(|| { + App::new().wrap(middleware::Logger::default()).service( + web::resource("/") + .route(web::get().to(index)) + .route(web::post().to(save_file)), + ) + }) + .bind(ip)? + .run() + .await +} diff --git a/multipart-s3/src/utils/mod.rs b/multipart-s3/src/utils/mod.rs new file mode 100644 index 00000000..e23a6f03 --- /dev/null +++ b/multipart-s3/src/utils/mod.rs @@ -0,0 +1,2 @@ +pub mod s3; +pub mod upload; diff --git a/multipart-s3/src/utils/s3.rs b/multipart-s3/src/utils/s3.rs new file mode 100644 index 00000000..f424398b --- /dev/null +++ b/multipart-s3/src/utils/s3.rs @@ -0,0 +1,66 @@ +use crate::rusoto_s3::S3; +use rusoto_core::{ProvideAwsCredentials, Region, RusotoError}; +use rusoto_s3::{DeleteObjectRequest, PutObjectRequest, S3Client}; +use std::io::Read; +use std::io::Write; + +pub struct Client { + region: Region, + s3: S3Client, + bucket_name: String, +} + +impl Client { + // construct S3 testing client + pub fn new() -> Client { + let region = Region::ApNortheast2; + + Client { + region: region.to_owned(), + s3: S3Client::new(region), + bucket_name: std::env::var("AWS_S3_BUCKET_NAME").unwrap(), + } + } + + pub fn url(&self, key: &str) -> String { + format!( + "https://{}.s3.{}.amazonaws.com/{}", + std::env::var("AWS_S3_BUCKET_NAME").unwrap(), + "ap-northeast-2", + key + ) + } + + pub fn put_object(&self, localfilepath: &str, key: &str) -> String { + let mut file = std::fs::File::open(localfilepath).unwrap(); + let mut contents: Vec = Vec::new(); + file.read_to_end(&mut contents); + let put_request = PutObjectRequest { + bucket: self.bucket_name.to_owned(), + key: key.to_owned(), + body: Some(contents.into()), + ..Default::default() + }; + let res = self + .s3 + .put_object(put_request) + .sync() + .expect("Failed to put test object"); + + self.url(key) + } + + pub fn delete_object(&self, key: String) { + let delete_object_req = DeleteObjectRequest { + bucket: self.bucket_name.to_owned(), + key: key.to_owned(), + ..Default::default() + }; + + let res = self + .s3 + .delete_object(delete_object_req) + .sync() + .expect("Couldn't delete object"); + } +} diff --git a/multipart-s3/src/utils/upload.rs b/multipart-s3/src/utils/upload.rs new file mode 100644 index 00000000..9c3e7387 --- /dev/null +++ b/multipart-s3/src/utils/upload.rs @@ -0,0 +1,124 @@ +use crate::utils::s3::Client; +use actix_multipart::{Field, Multipart}; +use actix_web::{web, Error}; +use bytes::Bytes; +use futures::StreamExt; +use serde::{Deserialize, Serialize}; +use serde_json::{Map as serdeMap, Value}; +use std::convert::From; +use std::io::Write; + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct UplodFile { + pub filename: String, + pub key: String, + pub url: String, +} + +impl From for UplodFile { + fn from(tmp_file: Tmpfile) -> Self { + UplodFile { + filename: tmp_file.name, + key: tmp_file.s3_key, + url: tmp_file.s3_url, + } + } +} + +/* +1. savefile +2. s3 upload -> upload_data +3. deletefile +*/ +#[derive(Debug, Clone)] +pub struct Tmpfile { + pub name: String, + pub tmp_path: String, + pub s3_key: String, + pub s3_url: String, +} +impl Tmpfile { + fn new(filename: &str) -> Tmpfile { + Tmpfile { + name: filename.to_string(), + tmp_path: format!("./tmp/{}", filename), + s3_key: "".to_string(), + s3_url: "".to_string(), + } + } + + fn s3_upload_and_tmp_remove(&mut self, s3_upload_key: String) { + self.s3_upload(s3_upload_key); + self.tmp_remove(); + } + + fn s3_upload(&mut self, s3_upload_key: String) { + let key = format!("{}{}", &s3_upload_key, &self.name); + self.s3_key = key.clone(); + let url: String = Client::new().put_object(&self.tmp_path, &key.clone()); + self.s3_url = url; + } + + fn tmp_remove(&self) { + std::fs::remove_file(&self.tmp_path).unwrap(); + } +} + +pub async fn split_payload(payload: &mut Multipart) -> (bytes::Bytes, Vec) { + let mut tmp_files: Vec = Vec::new(); + let mut data = Bytes::new(); + + while let Some(item) = payload.next().await { + let mut field: Field = item.expect(" split_payload err"); + let content_type = field.content_disposition().unwrap(); + let name = content_type.get_name().unwrap(); + if name == "data" { + while let Some(chunk) = field.next().await { + data = chunk.expect(" split_payload err chunk"); + } + } else { + match content_type.get_filename() { + Some(filename) => { + let tmp_file = Tmpfile::new(filename); + let tmp_path = tmp_file.tmp_path.clone(); + let mut f = web::block(move || std::fs::File::create(&tmp_path)) + .await + .unwrap(); + while let Some(chunk) = field.next().await { + let data = chunk.unwrap(); + f = web::block(move || f.write_all(&data).map(|_| f)) + .await + .unwrap(); + } + tmp_files.push(tmp_file.clone()); + } + None => { + println!("file none"); + } + } + } + } + (data, tmp_files) +} + +pub async fn save_file( + tmp_files: Vec, + s3_upload_key: String, +) -> Result, Error> { + let mut arr: Vec = Vec::new(); + let mut iter = tmp_files.iter(); + let mut index = 0; + // iterate over multipart stream + while let Some(item) = iter.next() { + let mut tmp_file: Tmpfile = item.clone(); + tmp_file.s3_upload_and_tmp_remove(s3_upload_key.clone()); + arr.push(UplodFile::from(tmp_file)); + } + Ok(arr) +} + +pub async fn delete_object(mut list: Vec) { + for key in list { + Client::new().delete_object(key); + } +}