From 5fb3d3270e0507921afe4df97aa866dd1a8813dd Mon Sep 17 00:00:00 2001 From: cheolgyu <38715510+cheolgyu@users.noreply.github.com> Date: Wed, 5 Feb 2020 12:53:33 +0900 Subject: [PATCH] example --- Cargo.toml | 1 + multipart-s3/.gitignore | 3 + multipart-s3/Cargo.toml | 19 +++++ multipart-s3/README.md | 8 +++ multipart-s3/src/main.rs | 100 ++++++++++++++++++++++++++ multipart-s3/src/utils/mod.rs | 2 + multipart-s3/src/utils/s3.rs | 66 +++++++++++++++++ multipart-s3/src/utils/upload.rs | 120 +++++++++++++++++++++++++++++++ 8 files changed, 319 insertions(+) create mode 100644 multipart-s3/.gitignore create mode 100644 multipart-s3/Cargo.toml create mode 100644 multipart-s3/README.md create mode 100644 multipart-s3/src/main.rs create mode 100644 multipart-s3/src/utils/mod.rs create mode 100644 multipart-s3/src/utils/s3.rs create mode 100644 multipart-s3/src/utils/upload.rs diff --git a/Cargo.toml b/Cargo.toml index d9217891..aecc98f9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,6 +19,7 @@ members = [ "juniper", "middleware", "multipart", + "multipart-s3", "multipart-async-std", "openssl", # "protobuf", diff --git a/multipart-s3/.gitignore b/multipart-s3/.gitignore new file mode 100644 index 00000000..2f62c534 --- /dev/null +++ b/multipart-s3/.gitignore @@ -0,0 +1,3 @@ +/target +.env +/tmp \ No newline at end of file diff --git a/multipart-s3/Cargo.toml b/multipart-s3/Cargo.toml new file mode 100644 index 00000000..5cd946c9 --- /dev/null +++ b/multipart-s3/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "multipart-s3" +version = "0.1.0" +authors = ["cheolgyu <38715510+cheolgyu@users.noreply.github.com>"] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +futures = "0.3.1" +actix-multipart = "0.2.0" +actix-web = "2.0.0" +actix-rt = "1.0.0" +rusoto_s3 = "0.42.0" +rusoto_core = "0.42.0" +bytes = { version = "0.5", features = ["serde"] } +serde = { version = "1.0.104", features=["derive"] } +serde_json = "1.0" +serde-value = "0.6.0" \ No newline at end of file diff --git a/multipart-s3/README.md b/multipart-s3/README.md new file mode 100644 index 00000000..06f72e41 --- /dev/null +++ b/multipart-s3/README.md @@ -0,0 +1,8 @@ +use : https://github.com/rusoto/rusoto + +1. set env AWS_ACCESS_KEY_ID +2. set env AWS_SECRET_ACCESS_KEY +3. set env AWS_S3_BUCKET_NAME + +1. multipart/post aws-s3 upload +2. multipart/post post-data to struct \ No newline at end of file diff --git a/multipart-s3/src/main.rs b/multipart-s3/src/main.rs new file mode 100644 index 00000000..ffee92df --- /dev/null +++ b/multipart-s3/src/main.rs @@ -0,0 +1,100 @@ +use std::io::Write; + +use actix_multipart::Multipart; +use actix_web::{middleware, web, App, Error, HttpResponse, HttpServer}; +use futures::StreamExt; +mod utils; + +use std::borrow::BorrowMut; +use utils::upload::{ + delete_object, save_file as upload_save_file, split_payload, UplodFile, +}; +extern crate rusoto_core; +extern crate rusoto_s3; +mod model { + use serde::{Deserialize, Serialize}; + #[derive(Deserialize, Serialize, Debug)] + pub struct InpAdd { + pub text: String, + pub number: i32, + } +} +async fn save_file(mut payload: Multipart) -> Result { + let pl = split_payload(payload.borrow_mut()).await; + println!("bytes={:#?}", pl.0); + let mut inp_info: model::InpAdd = serde_json::from_slice(&pl.0).unwrap(); + println!("converter_struct={:#?}", inp_info); + println!("tmpfiles={:#?}", pl.1); + //make key + let s3_upload_key = format!("projects/{}/", "posts_id"); + //create tmp file and upload s3 and remove tmp file + let upload_files: Vec = + upload_save_file(pl.1, s3_upload_key).await.unwrap(); + println!("upload_files={:#?}", upload_files); + Ok(HttpResponse::Ok().into()) +} + +fn index() -> HttpResponse { + let html = r#" + Upload Test + +
+ + + + +
+ + + + "#; + + HttpResponse::Ok().body(html) +} + +#[actix_rt::main] +async fn main() -> std::io::Result<()> { + std::env::set_var("RUST_LOG", "actix_server=info,actix_web=info"); + std::env::set_var("AWS_ACCESS_KEY_ID", "your_key"); + std::env::set_var("AWS_SECRET_ACCESS_KEY", "your_key"); + std::env::set_var("AWS_S3_BUCKET_NAME", "your_key"); + std::fs::create_dir_all("./tmp").unwrap(); + + let ip = "0.0.0.0:3000"; + + HttpServer::new(|| { + App::new().wrap(middleware::Logger::default()).service( + web::resource("/") + .route(web::get().to(index)) + .route(web::post().to(save_file)), + ) + }) + .bind(ip)? + .run() + .await +} diff --git a/multipart-s3/src/utils/mod.rs b/multipart-s3/src/utils/mod.rs new file mode 100644 index 00000000..e23a6f03 --- /dev/null +++ b/multipart-s3/src/utils/mod.rs @@ -0,0 +1,2 @@ +pub mod s3; +pub mod upload; diff --git a/multipart-s3/src/utils/s3.rs b/multipart-s3/src/utils/s3.rs new file mode 100644 index 00000000..f424398b --- /dev/null +++ b/multipart-s3/src/utils/s3.rs @@ -0,0 +1,66 @@ +use crate::rusoto_s3::S3; +use rusoto_core::{ProvideAwsCredentials, Region, RusotoError}; +use rusoto_s3::{DeleteObjectRequest, PutObjectRequest, S3Client}; +use std::io::Read; +use std::io::Write; + +pub struct Client { + region: Region, + s3: S3Client, + bucket_name: String, +} + +impl Client { + // construct S3 testing client + pub fn new() -> Client { + let region = Region::ApNortheast2; + + Client { + region: region.to_owned(), + s3: S3Client::new(region), + bucket_name: std::env::var("AWS_S3_BUCKET_NAME").unwrap(), + } + } + + pub fn url(&self, key: &str) -> String { + format!( + "https://{}.s3.{}.amazonaws.com/{}", + std::env::var("AWS_S3_BUCKET_NAME").unwrap(), + "ap-northeast-2", + key + ) + } + + pub fn put_object(&self, localfilepath: &str, key: &str) -> String { + let mut file = std::fs::File::open(localfilepath).unwrap(); + let mut contents: Vec = Vec::new(); + file.read_to_end(&mut contents); + let put_request = PutObjectRequest { + bucket: self.bucket_name.to_owned(), + key: key.to_owned(), + body: Some(contents.into()), + ..Default::default() + }; + let res = self + .s3 + .put_object(put_request) + .sync() + .expect("Failed to put test object"); + + self.url(key) + } + + pub fn delete_object(&self, key: String) { + let delete_object_req = DeleteObjectRequest { + bucket: self.bucket_name.to_owned(), + key: key.to_owned(), + ..Default::default() + }; + + let res = self + .s3 + .delete_object(delete_object_req) + .sync() + .expect("Couldn't delete object"); + } +} diff --git a/multipart-s3/src/utils/upload.rs b/multipart-s3/src/utils/upload.rs new file mode 100644 index 00000000..fa29a262 --- /dev/null +++ b/multipart-s3/src/utils/upload.rs @@ -0,0 +1,120 @@ +use crate::utils::s3::Client; +use actix_multipart::{Field, Multipart}; +use actix_web::{web, Error}; +use bytes::Bytes; +use futures::StreamExt; +use serde::{Deserialize, Serialize}; +use serde_json::{Map as serdeMap, Value}; +use std::convert::From; +use std::io::Write; + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct UplodFile { + pub filename: String, + pub key: String, + pub url: String, +} + +impl From for UplodFile { + fn from(tmp_file: Tmpfile) -> Self { + UplodFile { + filename: tmp_file.name, + key: tmp_file.s3_key, + url: tmp_file.s3_url, + } + } +} + +/* +1. savefile +2. s3 upload -> upload_data +3. deletefile +*/ +#[derive(Debug, Clone)] +pub struct Tmpfile { + pub name: String, + pub tmp_path: String, + pub s3_key: String, + pub s3_url: String, +} +impl Tmpfile { + fn new(filename: &str) -> Tmpfile { + Tmpfile { + name: filename.to_string(), + tmp_path: format!("./tmp/{}", filename), + s3_key: "".to_string(), + s3_url: "".to_string(), + } + } + + fn s3_upload_and_tmp_remove(&mut self, s3_upload_key: String) { + self.s3_upload(s3_upload_key); + self.tmp_remove(); + } + + fn s3_upload(&mut self, s3_upload_key: String) { + let key = format!("{}{}", &s3_upload_key, &self.name); + self.s3_key = key.clone(); + let url: String = Client::new().put_object(&self.tmp_path, &key.clone()); + self.s3_url = url; + } + + fn tmp_remove(&self) { + std::fs::remove_file(&self.tmp_path).unwrap(); + } +} + +pub async fn split_payload(payload: &mut Multipart) -> (bytes::Bytes, Vec) { + let mut tmp_files: Vec = Vec::new(); + let mut data = Bytes::new(); + + while let Some(item) = payload.next().await { + let mut field: Field = item.expect(" split_payload err"); + let content_type = field.content_disposition().unwrap(); + let name = content_type.get_name().unwrap(); + if name == "data" { + while let Some(chunk) = field.next().await { + data = chunk.expect(" split_payload err chunk"); + } + } else { + if content_type.get_filename() != None { + let filename = content_type.get_filename().unwrap(); + let tmp_file = Tmpfile::new(filename); + let tmp_path = tmp_file.tmp_path.clone(); + let mut f = web::block(move || std::fs::File::create(&tmp_path)) + .await + .unwrap(); + while let Some(chunk) = field.next().await { + let data = chunk.unwrap(); + f = web::block(move || f.write_all(&data).map(|_| f)) + .await + .unwrap(); + } + tmp_files.push(tmp_file.clone()); + } + } + } + (data, tmp_files) +} + +pub async fn save_file( + tmp_files: Vec, + s3_upload_key: String, +) -> Result, Error> { + let mut arr: Vec = Vec::new(); + let mut iter = tmp_files.iter(); + let mut index = 0; + // iterate over multipart stream + while let Some(item) = iter.next() { + let mut tmp_file: Tmpfile = item.clone(); + tmp_file.s3_upload_and_tmp_remove(s3_upload_key.clone()); + arr.push(UplodFile::from(tmp_file)); + } + Ok(arr) +} + +pub async fn delete_object(mut list: Vec) { + for key in list { + Client::new().delete_object(key); + } +}