2024-07-18 05:51:49 +09:00

296 lines
10 KiB
Rust

use std::path::{Path, PathBuf};
use aws_config::{BehaviorVersion, Region, SdkConfig};
use aws_sdk_s3::{
config::Credentials, operation::upload_part, primitives::ByteStream, types::CompletedPart,
};
use aws_smithy_runtime::client::http::hyper_014::HyperClientBuilder;
use aws_smithy_types::byte_stream::Length;
use clap::{Parser, Subcommand};
use tokio::{fs::File, io::BufReader};
use types::response::ListFilesResponseFile;
mod endpoints;
mod types;
const BEARER_TOKEN: &str = "eyJhbGciOiJSUzI1NiIsImtpZCI6ImMxNTQwYWM3MWJiOTJhYTA2OTNjODI3MTkwYWNhYmU1YjA1NWNiZWMiLCJ0eXAiOiJKV1QifQ.eyJuYW1lIjoi5bm457-8IOW_l-adkSIsInBsYW4iOiJza2YiLCJpc3MiOiJodHRwczovL3NlY3VyZXRva2VuLmdvb2dsZS5jb20vc2VuZHktc2VydmljZSIsImF1ZCI6InNlbmR5LXNlcnZpY2UiLCJhdXRoX3RpbWUiOjE3MjEyMjYwMTUsInVzZXJfaWQiOiJHY2xUN0RybkxGaG83dm5JaXJVemp0TUxoUmsyIiwic3ViIjoiR2NsVDdEcm5MRmhvN3ZuSWlyVXpqdE1MaFJrMiIsImlhdCI6MTcyMTI0NzQxMCwiZXhwIjoxNzIxMjUxMDEwLCJlbWFpbCI6ImtvdXN1a2UxMTIzNjEyNEBnbWFpbC5jb20iLCJlbWFpbF92ZXJpZmllZCI6ZmFsc2UsImZpcmViYXNlIjp7ImlkZW50aXRpZXMiOnsiZW1haWwiOlsia291c3VrZTExMjM2MTI0QGdtYWlsLmNvbSJdfSwic2lnbl9pbl9wcm92aWRlciI6ImN1c3RvbSJ9fQ.ikxsOAbgKhKywvvC1Ot28AEZ7_DTVNaMI2KSEFaZAaPTtgPk6fqYzegW2iwq7GK_ySmCuKppPEeSD8nKDggeX96z36Y1zd5xm7EIWTCdmCB36gjhAkAowVenRX2VW3gIVCJVHUQ50UEVM4CMzw73N058fQ97wAdHVp2oOtZOczJyQpAZuy0zqXSKWvnom0SfNz0iZov7r3TLSBlxSMGjEu_aSInq7yMOSHNkbQHenelv3592EY_ktnFLYSYi1HWEEijqsKSGdf01DYBkC5H8Eq0snk7n8NvKFAaUxT8DClxHlE_xagOnbkfCBh-AN2CqnkwxOi7Kkh0iWOkdMLqK0w";
const HOST_ID: &str = "GclT7DrnLFho7vnIirUzjtMLhRk2";
const CHUNK_SIZE: usize = 1024 * 1024 * 10; // 10MB
#[derive(Parser, Debug)]
#[command(version, about, long_about=None)]
struct Args {
#[command(subcommand)]
command: Commands,
}
#[derive(Subcommand, Debug)]
enum Commands {
List {
#[clap(short, long)]
prefix: Option<String>,
},
Upload {
#[clap(short, long)]
file: PathBuf,
#[clap(short, long)]
prefix: Option<String>,
#[clap(short, long)]
recursive: bool,
},
Download {},
Delete {},
MkDir {},
}
#[tokio::main]
async fn main() {
let args = Args::parse();
match &args.command {
Commands::List { prefix } => {
let client = endpoints::Client::new(BEARER_TOKEN.to_string(), HOST_ID.to_string());
let pagination_size = 40;
let mut files = Vec::<ListFilesResponseFile>::new();
let req = types::request::ListFilesRequest {
from: 0,
host_id: client.host_id.clone(),
path: prefix.clone().unwrap_or("".to_string()),
sort_type: "path".to_string(),
reverse: false,
thumbnail_size: 130,
to: pagination_size,
};
let mut res = client.list_files(req).await.unwrap();
files.append(&mut res.file);
if !res.last_page {
let mut cursor = res.file.len() as i64;
loop {
let req = types::request::ListFilesRequest {
from: cursor,
host_id: client.host_id.clone(),
path: prefix.clone().unwrap_or("".to_string()),
sort_type: "path".to_string(),
reverse: false,
thumbnail_size: 130,
to: pagination_size + cursor,
};
let mut next_res = client.list_files(req).await.unwrap();
files.append(&mut next_res.file);
if next_res.last_page {
break;
} else {
cursor += next_res.file.len() as i64;
}
}
}
res.file = files;
res.file.iter().for_each(|f| {
println!("{:#?}", f);
});
}
Commands::Upload {
file,
prefix,
recursive,
} => {
// file check
if !file.exists() {
println!("File not found: {:?}", file);
return;
}
// is folder
if file.is_dir() || *recursive {
println!("Folder upload is not supported. Use --recursive option.");
return;
}
let client = endpoints::Client::new(BEARER_TOKEN.to_string(), HOST_ID.to_string());
let req = types::request::CheckUploadRequest {
host_id: client.host_id.clone(),
path: prefix.clone().unwrap_or("".to_string()),
upload_id: "".to_string(),
file: vec![types::request::CheckUploadRequestFile {
path: file.to_str().unwrap().to_string(),
size: file.metadata().unwrap().len() as i64,
}],
};
let check_upload_res = client.check_upload(req).await.unwrap();
println!("{:#?}", check_upload_res);
let token_res = client.get_token().await.unwrap();
println!("{:#?}", token_res);
let cledential = Credentials::new(
token_res.access_key_id.clone(),
token_res.secret_access_key.clone(),
Some(token_res.session_token.clone()),
None,
"2021-06-01",
);
let config = aws_sdk_s3::Config::builder()
.behavior_version_latest()
.endpoint_url("https://sendy-cloud.s3.ap-northeast-1.amazonaws.com")
.region(Region::new(check_upload_res.region.clone()))
.credentials_provider(cledential)
.force_path_style(true)
.build();
let s3_client = aws_sdk_s3::Client::from_conf(config);
let file_size = file.metadata().unwrap().len();
if file_size > CHUNK_SIZE as u64 {
multipart_upload(token_res, check_upload_res, file.clone())
.await
.unwrap();
} else {
let stream = ByteStream::read_from()
.path(file.clone())
.offset(0)
.length(Length::Exact(file_size))
.build()
.await
.unwrap();
let key = check_upload_res.prefix + "/" + check_upload_res.file[0].path.as_str();
let _upload_res = s3_client
.put_object()
.bucket(check_upload_res.bucket)
.key(key)
.body(stream)
.send()
.await
.unwrap();
}
println!("Upload");
}
Commands::Download {} => {
println!("Download");
}
Commands::Delete {} => {
println!("Delete");
}
Commands::MkDir {} => {
println!("MkDir");
}
}
}
async fn multipart_upload(
token_res: types::response::GetFileLinkTokenResponse,
check_upload_res: types::response::CheckUploadResponse,
file: PathBuf,
) -> anyhow::Result<()> {
if !file.exists() {
println!("File not found: {:?}", file);
return Err(anyhow::anyhow!("File not found: {:?}", file));
}
let file_size = file.metadata().unwrap().len();
let cledential = Credentials::new(
token_res.access_key_id,
token_res.secret_access_key,
Some(token_res.session_token),
// 2024-07-18T07:14:42Z
Some(
chrono::DateTime::parse_from_rfc3339(&token_res.expiration)
.unwrap()
.into(),
),
"2021-06-01",
);
// let tls_client = hyper_rustls::HttpsConnectorBuilder::new();
// let hyper_connector = HyperClientBuilder::new().build(tls_client);
let config = aws_sdk_s3::Config::builder()
.behavior_version_latest()
.credentials_provider(cledential)
.region(Region::new(check_upload_res.region))
.endpoint_url("https://sendy-cloud.s3.ap-northeast-1.amazonaws.com")
.build();
let s3_client = aws_sdk_s3::Client::from_conf(config);
// let file = BufReader::new(File::open(file).await.unwrap());
let key = check_upload_res.prefix + check_upload_res.file[0].path.as_str();
let multipart_upload_res = s3_client
.create_multipart_upload()
.bucket(check_upload_res.bucket.clone())
.key(key.clone())
.send()
.await
.unwrap();
let upload_id = multipart_upload_res.upload_id().unwrap();
let mut chunk_count = file_size / CHUNK_SIZE as u64;
let mut size_of_last_chunk = file_size % CHUNK_SIZE as u64;
if size_of_last_chunk == 0 {
size_of_last_chunk = CHUNK_SIZE as u64;
chunk_count -= 1;
}
let mut upload_parts = Vec::<CompletedPart>::new();
for chunk_index in 0..chunk_count {
let this_chunk = if chunk_count - 1 == chunk_index {
size_of_last_chunk
} else {
CHUNK_SIZE as u64
};
let stream = ByteStream::read_from()
.path(file.clone())
.offset(chunk_index * CHUNK_SIZE as u64)
.length(Length::Exact(this_chunk))
.build()
.await
.unwrap();
//Chunk index needs to start at 0, but part numbers start at 1.
let part_number = (chunk_index as i32) + 1;
let upload_part_res = s3_client
.upload_part()
.key(&key)
.bucket(&check_upload_res.bucket)
.upload_id(upload_id)
.body(stream)
.part_number(part_number)
.send()
.await?;
upload_parts.push(
CompletedPart::builder()
.e_tag(upload_part_res.e_tag.unwrap_or_default())
.part_number(part_number)
.build(),
);
}
let completed_multipart_upload = aws_sdk_s3::types::CompletedMultipartUpload::builder()
.set_parts(Some(upload_parts))
.build();
let _complete_multipart_upload_res = s3_client
.complete_multipart_upload()
.bucket(check_upload_res.bucket)
.key(key)
.upload_id(upload_id)
.multipart_upload(completed_multipart_upload)
.send()
.await
.unwrap();
Ok(())
}