Compare commits
6 Commits
master
...
no-aws-sdk
Author | SHA1 | Date | |
---|---|---|---|
006a83afac | |||
26ca153f57 | |||
426300f2fa | |||
ad827c88e6 | |||
6801f8101d | |||
dc2be28874 |
BIN
src/.DS_Store
vendored
Normal file
BIN
src/.DS_Store
vendored
Normal file
Binary file not shown.
@ -89,7 +89,7 @@ impl Client {
|
|||||||
let request = client
|
let request = client
|
||||||
.get(format!(
|
.get(format!(
|
||||||
"https://forest.sendy.jp/cloud/service/file/v1/filelink/token?host_id={}&path={}",
|
"https://forest.sendy.jp/cloud/service/file/v1/filelink/token?host_id={}&path={}",
|
||||||
self.host_id, "hello"
|
self.host_id, ""
|
||||||
))
|
))
|
||||||
.bearer_auth(&self.token.read().await);
|
.bearer_auth(&self.token.read().await);
|
||||||
|
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
pub const REFRESH_TOKEN: &str = "AMf-vBwuDNdrMsPlvESgMqZWGCdVlBxMvrQVNvHOWb-FdDRV0Ozeq26POxH2tzy463DGlZTZpPYYhWCSi-KI0-8pSYEXtuCG_8DlVRyqwm4POeobYWkrw3dMgiEDNjFCfXIN4-k65CsizmCFbWQz6ASsi-XGAwMn_mXyFj9JirB7vyzTTr2ugbA";
|
pub const REFRESH_TOKEN: &str = "AMf-vBwuDNdrMsPlvESgMqZWGCdVlBxMvrQVNvHOWb-FdDRV0Ozeq26POxH2tzy463DGlZTZpPYYhWCSi-KI0-8pSYEXtuCG_8DlVRyqwm4POeobYWkrw3dMgiEDNjFCfXIN4-k65CsizmCFbWQz6ASsi-XGAwMn_mXyFj9JirB7vyzTTr2ugbA";
|
||||||
pub const CHUNK_SIZE: usize = 1024 * 1024 * 10; // 10MB
|
pub const CHUNK_SIZE: usize = 1024 * 1024 * 100; // 10MB
|
||||||
pub const APP_VERSION: &str = "v21.11.10";
|
pub const APP_VERSION: &str = "v21.11.10";
|
||||||
|
309
src/lib.rs
309
src/lib.rs
@ -1,8 +1,10 @@
|
|||||||
use aws_config::Region;
|
use aws_config::Region;
|
||||||
use aws_sdk_s3::config::Credentials;
|
use aws_sdk_s3::config::Credentials;
|
||||||
use constants::APP_VERSION;
|
use constants::{APP_VERSION, CHUNK_SIZE};
|
||||||
use std::{io::BufReader, path::PathBuf, sync::Arc};
|
use std::{io::BufReader, path::PathBuf, sync::Arc};
|
||||||
use util::{check_job, file_detail, list_files, multipart_upload, TargetFile};
|
use util::{
|
||||||
|
check_job, file_detail, list_files, multipart_upload, multipart_upload_from_path, single_upload,
|
||||||
|
};
|
||||||
|
|
||||||
mod client;
|
mod client;
|
||||||
mod constants;
|
mod constants;
|
||||||
@ -13,6 +15,17 @@ pub struct RakutenDriveClient {
|
|||||||
client: client::Client,
|
client: client::Client,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub struct UploadFile {
|
||||||
|
pub data: Vec<u8>,
|
||||||
|
pub path: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct TargetFile {
|
||||||
|
pub file: PathBuf,
|
||||||
|
pub path: String,
|
||||||
|
}
|
||||||
|
|
||||||
impl RakutenDriveClient {
|
impl RakutenDriveClient {
|
||||||
pub async fn try_new(refresh_token_str: String) -> anyhow::Result<Self> {
|
pub async fn try_new(refresh_token_str: String) -> anyhow::Result<Self> {
|
||||||
let client = client::Client::try_new(refresh_token_str).await?;
|
let client = client::Client::try_new(refresh_token_str).await?;
|
||||||
@ -34,22 +47,35 @@ impl RakutenDriveClient {
|
|||||||
}
|
}
|
||||||
pub async fn upload(
|
pub async fn upload(
|
||||||
&self,
|
&self,
|
||||||
file_path: &str,
|
files: Vec<UploadFile>,
|
||||||
file_data: &[u8],
|
|
||||||
prefix: Option<&str>,
|
prefix: Option<&str>,
|
||||||
fake_size: Option<u64>,
|
fake_size: Option<u64>,
|
||||||
pb: Option<indicatif::ProgressBar>,
|
pb: Option<indicatif::ProgressBar>,
|
||||||
) -> anyhow::Result<()> {
|
) -> anyhow::Result<()> {
|
||||||
|
let prefix = if prefix.unwrap_or("") == "/" {
|
||||||
|
""
|
||||||
|
} else {
|
||||||
|
prefix.unwrap_or("")
|
||||||
|
};
|
||||||
let req = types::request::CheckUploadRequest {
|
let req = types::request::CheckUploadRequest {
|
||||||
host_id: self.client.host_id.clone(),
|
host_id: self.client.host_id.clone(),
|
||||||
path: prefix.unwrap_or("").to_string(),
|
path: prefix.to_string(),
|
||||||
upload_id: "".to_string(),
|
file: files
|
||||||
file: vec![types::request::CheckUploadRequestFile {
|
.iter()
|
||||||
path: file_path.to_string(),
|
.map(|file| types::request::CheckUploadRequestFile {
|
||||||
size: fake_size.unwrap_or(file_data.len() as u64) as i64,
|
path: file.path.clone(),
|
||||||
}],
|
size: fake_size.unwrap_or(file.data.len() as u64) as i64,
|
||||||
|
version_id: None,
|
||||||
|
host_id: None,
|
||||||
|
hash: None,
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
println!("{:#?}", req);
|
||||||
|
|
||||||
|
// println!("prefix: {:?}", prefix.unwrap_or(""));
|
||||||
|
|
||||||
let check_upload_res = self.client.check_upload(req).await.unwrap();
|
let check_upload_res = self.client.check_upload(req).await.unwrap();
|
||||||
|
|
||||||
let token_res = self.client.get_upload_token().await.unwrap();
|
let token_res = self.client.get_upload_token().await.unwrap();
|
||||||
@ -68,46 +94,237 @@ impl RakutenDriveClient {
|
|||||||
.force_path_style(true)
|
.force_path_style(true)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
multipart_upload(
|
let semaphore = Arc::new(tokio::sync::Semaphore::new(5));
|
||||||
|
let mut handles = Vec::new();
|
||||||
|
|
||||||
|
for (i, file) in files.iter().enumerate() {
|
||||||
|
let token_res = token_res.clone();
|
||||||
|
let check_upload_res = check_upload_res.clone();
|
||||||
|
let file_data = file.data.clone();
|
||||||
|
let pb = pb.clone();
|
||||||
|
let semaphore = semaphore.clone().acquire_owned().await.unwrap();
|
||||||
|
if file_data.len() > CHUNK_SIZE * 10 {
|
||||||
|
multipart_upload(
|
||||||
|
&token_res,
|
||||||
|
&check_upload_res.bucket,
|
||||||
|
&check_upload_res.file[i],
|
||||||
|
&check_upload_res.prefix,
|
||||||
|
&check_upload_res.region,
|
||||||
|
&check_upload_res.upload_id,
|
||||||
|
&file_data,
|
||||||
|
pb.clone(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
} else {
|
||||||
|
handles.push(tokio::spawn(async move {
|
||||||
|
let _permit = semaphore;
|
||||||
|
loop {
|
||||||
|
match single_upload(
|
||||||
|
&token_res,
|
||||||
|
&check_upload_res.bucket,
|
||||||
|
&check_upload_res.file[i],
|
||||||
|
&check_upload_res.prefix,
|
||||||
|
&check_upload_res.region,
|
||||||
|
&check_upload_res.upload_id,
|
||||||
|
&file_data,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(_) => {
|
||||||
|
// println!("Uploaded: {:?}", i);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
// println!("Error");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let Some(pb) = pb {
|
||||||
|
pb.inc(1);
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for handle in handles {
|
||||||
|
handle.await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// multipart_upload(
|
||||||
|
// &token_res,
|
||||||
|
// &check_upload_res.bucket,
|
||||||
|
// &check_upload_res.file[0],
|
||||||
|
// &check_upload_res.prefix,
|
||||||
|
// &check_upload_res.region,
|
||||||
|
// &check_upload_res.upload_id,
|
||||||
|
// file_data,
|
||||||
|
// pb,
|
||||||
|
// )
|
||||||
|
// .await
|
||||||
|
// .unwrap();
|
||||||
|
// if file_size > CHUNK_SIZE as u64 {
|
||||||
|
// for (i, file) in files.iter().enumerate() {
|
||||||
|
// println!("Multi Uploading: {:?}", file.file);
|
||||||
|
|
||||||
|
// }
|
||||||
|
// } else {
|
||||||
|
// for (i, file) in files.iter().enumerate() {
|
||||||
|
// println!("Uploading: {:?}", file.file);
|
||||||
|
// let stream = ByteStream::read_from()
|
||||||
|
// .path(file.file.clone())
|
||||||
|
// .offset(0)
|
||||||
|
// .length(Length::Exact(file_size))
|
||||||
|
// .build()
|
||||||
|
// .await
|
||||||
|
// .unwrap();
|
||||||
|
// let key =
|
||||||
|
// check_upload_res.prefix.to_owned() + check_upload_res.file[i].path.as_str();
|
||||||
|
// let _upload_res = s3_client
|
||||||
|
// .put_object()
|
||||||
|
// .bucket(check_upload_res.bucket.clone())
|
||||||
|
// .key(key)
|
||||||
|
// .body(stream)
|
||||||
|
// .send()
|
||||||
|
// .await
|
||||||
|
// .unwrap();
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
println!("Checking for upload complete...");
|
||||||
|
|
||||||
|
match check_job(&check_upload_res.upload_id, &self.client).await {
|
||||||
|
Ok(_) => Ok(()),
|
||||||
|
Err(e) => {
|
||||||
|
println!("Error: {:?}", e);
|
||||||
|
Err(anyhow::anyhow!("Error: {:?}", e))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn upload_from_path(
|
||||||
|
&self,
|
||||||
|
file: TargetFile,
|
||||||
|
custom_file_name: Option<&str>,
|
||||||
|
prefix: Option<&str>,
|
||||||
|
fake_size: Option<u64>,
|
||||||
|
offset: Option<u64>,
|
||||||
|
length: Option<u64>,
|
||||||
|
pb: Option<indicatif::ProgressBar>,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
if offset.is_some() && length.is_none() || offset.is_none() && length.is_some() {
|
||||||
|
println!("offset and length are must be set both");
|
||||||
|
return Err(anyhow::anyhow!("offset and length are must be set both"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let prefix = if prefix.unwrap_or("") == "/" {
|
||||||
|
""
|
||||||
|
} else {
|
||||||
|
prefix.unwrap_or("")
|
||||||
|
};
|
||||||
|
|
||||||
|
let file_name = match custom_file_name {
|
||||||
|
Some(n) => n,
|
||||||
|
None => file.file.file_name().unwrap().to_str().unwrap(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let req = types::request::CheckUploadRequest {
|
||||||
|
host_id: self.client.host_id.clone(),
|
||||||
|
path: prefix.to_string(),
|
||||||
|
file: vec![types::request::CheckUploadRequestFile {
|
||||||
|
path: file_name.to_string(),
|
||||||
|
size: fake_size.unwrap_or(file.file.metadata().unwrap().len() as u64) as i64,
|
||||||
|
hash: None,
|
||||||
|
host_id: None,
|
||||||
|
version_id: None,
|
||||||
|
}],
|
||||||
|
};
|
||||||
|
|
||||||
|
println!("upload from path");
|
||||||
|
println!("{:#?}", req);
|
||||||
|
|
||||||
|
// println!("prefix: {:?}", prefix.unwrap_or(""));
|
||||||
|
|
||||||
|
let check_upload_res = self.client.check_upload(req).await.unwrap();
|
||||||
|
|
||||||
|
let token_res = self.client.get_upload_token().await.unwrap();
|
||||||
|
|
||||||
|
let cledential = Credentials::new(
|
||||||
|
token_res.access_key_id.clone(),
|
||||||
|
token_res.secret_access_key.clone(),
|
||||||
|
Some(token_res.session_token.clone()),
|
||||||
|
None,
|
||||||
|
"2021-06-01",
|
||||||
|
);
|
||||||
|
let _config = aws_sdk_s3::Config::builder()
|
||||||
|
.behavior_version_latest()
|
||||||
|
.region(Region::new(check_upload_res.region.clone()))
|
||||||
|
.credentials_provider(cledential)
|
||||||
|
.force_path_style(true)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let token_res = token_res.clone();
|
||||||
|
let check_upload_res = check_upload_res.clone();
|
||||||
|
let pb = pb.clone();
|
||||||
|
multipart_upload_from_path(
|
||||||
&token_res,
|
&token_res,
|
||||||
&check_upload_res.bucket,
|
&check_upload_res.bucket,
|
||||||
&check_upload_res.file[0],
|
&check_upload_res.file[0],
|
||||||
&check_upload_res.prefix,
|
&check_upload_res.prefix,
|
||||||
&check_upload_res.region,
|
&check_upload_res.region,
|
||||||
&check_upload_res.upload_id,
|
&check_upload_res.upload_id,
|
||||||
file_data,
|
file,
|
||||||
pb,
|
offset,
|
||||||
|
length,
|
||||||
|
pb.clone(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
|
// multipart_upload(
|
||||||
|
// &token_res,
|
||||||
|
// &check_upload_res.bucket,
|
||||||
|
// &check_upload_res.file[0],
|
||||||
|
// &check_upload_res.prefix,
|
||||||
|
// &check_upload_res.region,
|
||||||
|
// &check_upload_res.upload_id,
|
||||||
|
// file_data,
|
||||||
|
// pb,
|
||||||
|
// )
|
||||||
|
// .await
|
||||||
|
// .unwrap();
|
||||||
// if file_size > CHUNK_SIZE as u64 {
|
// if file_size > CHUNK_SIZE as u64 {
|
||||||
// for (i, file) in files.iter().enumerate() {
|
// for (i, file) in files.iter().enumerate() {
|
||||||
// println!("Multi Uploading: {:?}", file.file);
|
// println!("Multi Uploading: {:?}", file.file);
|
||||||
|
|
||||||
// }
|
// }
|
||||||
// } else {
|
// } else {
|
||||||
// for (i, file) in files.iter().enumerate() {
|
// for (i, file) in files.iter().enumerate() {
|
||||||
// println!("Uploading: {:?}", file.file);
|
// println!("Uploading: {:?}", file.file);
|
||||||
// let stream = ByteStream::read_from()
|
// let stream = ByteStream::read_from()
|
||||||
// .path(file.file.clone())
|
// .path(file.file.clone())
|
||||||
// .offset(0)
|
// .offset(0)
|
||||||
// .length(Length::Exact(file_size))
|
// .length(Length::Exact(file_size))
|
||||||
// .build()
|
// .build()
|
||||||
// .await
|
// .await
|
||||||
// .unwrap();
|
// .unwrap();
|
||||||
// let key =
|
// let key =
|
||||||
// check_upload_res.prefix.to_owned() + check_upload_res.file[i].path.as_str();
|
// check_upload_res.prefix.to_owned() + check_upload_res.file[i].path.as_str();
|
||||||
// let _upload_res = s3_client
|
// let _upload_res = s3_client
|
||||||
// .put_object()
|
// .put_object()
|
||||||
// .bucket(check_upload_res.bucket.clone())
|
// .bucket(check_upload_res.bucket.clone())
|
||||||
// .key(key)
|
// .key(key)
|
||||||
// .body(stream)
|
// .body(stream)
|
||||||
// .send()
|
// .send()
|
||||||
// .await
|
// .await
|
||||||
// .unwrap();
|
// .unwrap();
|
||||||
// }
|
// }
|
||||||
// }
|
|
||||||
// }
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
println!("Checking for upload complete...");
|
||||||
|
|
||||||
match check_job(&check_upload_res.upload_id, &self.client).await {
|
match check_job(&check_upload_res.upload_id, &self.client).await {
|
||||||
Ok(_) => Ok(()),
|
Ok(_) => Ok(()),
|
||||||
@ -166,9 +383,15 @@ impl RakutenDriveClient {
|
|||||||
|
|
||||||
pub async fn mkdir(&self, name: &str, path: Option<&str>) -> anyhow::Result<()> {
|
pub async fn mkdir(&self, name: &str, path: Option<&str>) -> anyhow::Result<()> {
|
||||||
if name.contains('/') {
|
if name.contains('/') {
|
||||||
println!("Please use --path option for set parent directory");
|
// println!("Please use --path option for set parent directory");
|
||||||
return Err(anyhow::anyhow!(
|
return Err(anyhow::anyhow!(
|
||||||
"Please use --path option for set parent directory"
|
"Can not use / in folder name. Use --path option for set parent directory."
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if name.len() > 255 {
|
||||||
|
println!("Folder name should be less than 255 characters.");
|
||||||
|
return Err(anyhow::anyhow!(
|
||||||
|
"Folder name should be less than 255 characters."
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
let req = types::request::CreateFolderRequest {
|
let req = types::request::CreateFolderRequest {
|
||||||
@ -190,11 +413,11 @@ impl RakutenDriveClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub async fn rename(&self, path: &str, name: &str) -> anyhow::Result<()> {
|
pub async fn rename(&self, path: &str, name: &str) -> anyhow::Result<()> {
|
||||||
if name.contains('/') {
|
// if name.contains('/') {
|
||||||
println!("Can't use / in file name");
|
// println!("Can't use / in file name");
|
||||||
println!("Name should be file name only.");
|
// println!("Name should be file name only.");
|
||||||
return Err(anyhow::anyhow!("Can't use / in file name"));
|
// return Err(anyhow::anyhow!("Can't use / in file name"));
|
||||||
}
|
// }
|
||||||
|
|
||||||
let file_path =
|
let file_path =
|
||||||
path.split('/').collect::<Vec<&str>>()[0..path.split('/').count() - 1].join("/") + "/";
|
path.split('/').collect::<Vec<&str>>()[0..path.split('/').count() - 1].join("/") + "/";
|
||||||
|
369
src/main.rs
369
src/main.rs
@ -1,7 +1,10 @@
|
|||||||
use std::{
|
use std::{
|
||||||
cmp::{max, min},
|
cmp::{max, min},
|
||||||
|
collections::{HashMap, HashSet},
|
||||||
io::{stdout, Write},
|
io::{stdout, Write},
|
||||||
|
os::unix::fs::FileTypeExt,
|
||||||
path::{Path, PathBuf},
|
path::{Path, PathBuf},
|
||||||
|
str::FromStr,
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -15,7 +18,7 @@ use clap::{Parser, Subcommand};
|
|||||||
use constants::REFRESH_TOKEN;
|
use constants::REFRESH_TOKEN;
|
||||||
use human_bytes::human_bytes;
|
use human_bytes::human_bytes;
|
||||||
use indicatif::{ProgressBar, ProgressState, ProgressStyle};
|
use indicatif::{ProgressBar, ProgressState, ProgressStyle};
|
||||||
use rakuten_drive_cui::RakutenDriveClient;
|
use rakuten_drive_cui::{RakutenDriveClient, TargetFile, UploadFile};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
fs::File,
|
fs::File,
|
||||||
io::{AsyncReadExt, BufReader},
|
io::{AsyncReadExt, BufReader},
|
||||||
@ -59,6 +62,34 @@ enum Commands {
|
|||||||
/// Send fake file size to server (byte)
|
/// Send fake file size to server (byte)
|
||||||
#[clap(short, long)]
|
#[clap(short, long)]
|
||||||
fake_size: Option<u64>,
|
fake_size: Option<u64>,
|
||||||
|
|
||||||
|
/// Do not check file existence
|
||||||
|
#[clap(long)]
|
||||||
|
force: bool,
|
||||||
|
|
||||||
|
/// Stream Upload
|
||||||
|
#[clap(short, long)]
|
||||||
|
stream: bool,
|
||||||
|
},
|
||||||
|
#[clap(about = "Upload ultra big file")]
|
||||||
|
UploadBig {
|
||||||
|
file: PathBuf,
|
||||||
|
|
||||||
|
/// Parent folder path
|
||||||
|
#[clap(short, long)]
|
||||||
|
prefix: Option<String>,
|
||||||
|
|
||||||
|
/// Send fake file size to server (byte)
|
||||||
|
#[clap(short, long)]
|
||||||
|
fake_size: Option<u64>,
|
||||||
|
|
||||||
|
/// Do not check file existence
|
||||||
|
#[clap(long)]
|
||||||
|
force: bool,
|
||||||
|
|
||||||
|
/// Set division size in bytes
|
||||||
|
#[clap(long)]
|
||||||
|
length: Option<u64>,
|
||||||
},
|
},
|
||||||
#[clap(about = "Download file")]
|
#[clap(about = "Download file")]
|
||||||
Download {
|
Download {
|
||||||
@ -119,64 +150,79 @@ enum Commands {
|
|||||||
async fn main() -> anyhow::Result<()> {
|
async fn main() -> anyhow::Result<()> {
|
||||||
let args = Args::parse();
|
let args = Args::parse();
|
||||||
|
|
||||||
let client = RakutenDriveClient::try_new(REFRESH_TOKEN.to_string()).await?;
|
let client = Arc::new(RakutenDriveClient::try_new(REFRESH_TOKEN.to_string()).await?);
|
||||||
|
|
||||||
match &args.command {
|
match args.command {
|
||||||
Commands::List { prefix } => {
|
Commands::List { prefix } => {
|
||||||
client.list(prefix.as_deref()).await.unwrap();
|
let res = client.list(prefix.as_deref()).await.unwrap();
|
||||||
|
res.file.iter().for_each(|f| {
|
||||||
|
let dir_str = if f.is_folder { "d" } else { " " };
|
||||||
|
println!(
|
||||||
|
"{}\t{}\t{}\t{}",
|
||||||
|
dir_str,
|
||||||
|
human_bytes(f.size as f64),
|
||||||
|
f.last_modified,
|
||||||
|
f.path,
|
||||||
|
)
|
||||||
|
});
|
||||||
}
|
}
|
||||||
Commands::Upload {
|
Commands::Upload {
|
||||||
file,
|
file,
|
||||||
prefix,
|
prefix,
|
||||||
recursive,
|
recursive,
|
||||||
fake_size,
|
fake_size,
|
||||||
|
force,
|
||||||
|
stream,
|
||||||
} => {
|
} => {
|
||||||
// is folder
|
// is folder
|
||||||
if file.is_dir() && !*recursive {
|
if file.is_dir() && !recursive {
|
||||||
println!("Use --recursive option for folder upload");
|
println!("Use --recursive option for folder upload");
|
||||||
return Err(anyhow::anyhow!("Use --recursive option for folder upload"));
|
return Err(anyhow::anyhow!("Use --recursive option for folder upload"));
|
||||||
}
|
}
|
||||||
|
if stream && recursive {
|
||||||
|
println!("Can't use Stream Upload and Recursive Upload both.");
|
||||||
|
return Err(anyhow::anyhow!(
|
||||||
|
"Can't use Stream Upload and Recursive Upload both."
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if let Some(prefix) = prefix.as_ref() {
|
||||||
|
if !prefix.ends_with('/') {
|
||||||
|
println!("Prefix must end with /");
|
||||||
|
return Err(anyhow::anyhow!("Prefix must end with /"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
println!("is_dir: {}", file.is_dir());
|
||||||
|
if file.is_dir() {
|
||||||
|
println!("name: {:?}", file.file_name().unwrap().to_str().unwrap());
|
||||||
|
println!("prefix: {:?}", prefix.as_deref());
|
||||||
|
client
|
||||||
|
.mkdir(
|
||||||
|
file.file_name().unwrap().to_str().unwrap(),
|
||||||
|
prefix.as_deref(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
let prefix = if file.is_dir() && prefix.is_none() {
|
||||||
|
Some(file.file_name().unwrap().to_str().unwrap().to_string() + "/")
|
||||||
|
} else {
|
||||||
|
prefix
|
||||||
|
};
|
||||||
|
|
||||||
let mut files = Vec::<TargetFile>::new();
|
let mut files = Vec::<TargetFile>::new();
|
||||||
|
|
||||||
if file.is_dir() && *recursive {
|
// file check
|
||||||
// upload folder
|
if !file.exists() {
|
||||||
let mut dirs = Vec::<PathBuf>::new();
|
println!("File not found: {:?}", file);
|
||||||
dirs.push(file.clone());
|
return Err(anyhow::anyhow!("File not found: {:?}", file));
|
||||||
while let Some(dir) = dirs.pop() {
|
|
||||||
let entries = std::fs::read_dir(dir).unwrap();
|
|
||||||
for entry in entries {
|
|
||||||
let entry = entry.unwrap();
|
|
||||||
let path = entry.path();
|
|
||||||
if path.is_dir() {
|
|
||||||
dirs.push(path);
|
|
||||||
} else {
|
|
||||||
files.push(TargetFile {
|
|
||||||
file: path.clone(),
|
|
||||||
path: path
|
|
||||||
.strip_prefix(file)
|
|
||||||
.unwrap()
|
|
||||||
.to_str()
|
|
||||||
.expect("Invalid File Name")
|
|
||||||
.to_string(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// for file in files {
|
|
||||||
// println!("{:?}", file);
|
|
||||||
// }
|
|
||||||
} else {
|
|
||||||
// file check
|
|
||||||
if !file.exists() {
|
|
||||||
println!("File not found: {:?}", file);
|
|
||||||
return Err(anyhow::anyhow!("File not found: {:?}", file));
|
|
||||||
}
|
|
||||||
files.push(TargetFile {
|
|
||||||
file: file.clone(),
|
|
||||||
path: file.file_name().unwrap().to_str().unwrap().to_string(),
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
let target_file = TargetFile {
|
||||||
|
file: file.clone(),
|
||||||
|
path: file.file_name().unwrap().to_str().unwrap().to_string(),
|
||||||
|
};
|
||||||
|
let path = match prefix.clone() {
|
||||||
|
Some(p) => p + target_file.clone().path.as_str(),
|
||||||
|
None => target_file.clone().path,
|
||||||
|
};
|
||||||
|
|
||||||
if cfg!(windows) {
|
if cfg!(windows) {
|
||||||
// replase \ to /
|
// replase \ to /
|
||||||
@ -185,36 +231,223 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
for file in &files {
|
if !force {
|
||||||
if client.info(file.path.as_str()).await.is_ok() {
|
println!("Checking file existence...");
|
||||||
|
println!("Checking: {:?}", path);
|
||||||
|
let res = client.info(path.as_str()).await;
|
||||||
|
if res.is_ok() {
|
||||||
println!("File already exists.");
|
println!("File already exists.");
|
||||||
return Err(anyhow::anyhow!("File already exists."));
|
return Err(anyhow::anyhow!("File already exists."));
|
||||||
|
} else {
|
||||||
|
println!("{:?}", res.err().unwrap());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for file in &files {
|
let file_size = target_file.file.metadata().unwrap().len();
|
||||||
let file_size = file.file.metadata().unwrap().len();
|
// let file_data = File::open(target_file.file.clone()).await.unwrap();
|
||||||
let file_data = File::open(file.file.clone()).await?;
|
|
||||||
let mut file_reader = tokio::io::BufReader::new(file_data);
|
|
||||||
let mut file_data: Vec<u8> = Vec::with_capacity(file_size as usize);
|
|
||||||
file_reader.read_to_end(&mut file_data).await?;
|
|
||||||
|
|
||||||
let pb = ProgressBar::new(file_size);
|
let pb = ProgressBar::new(file_size);
|
||||||
pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({eta})")
|
pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({eta})")
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.with_key("eta", |state: &ProgressState, w: &mut dyn std::fmt::Write| write!(w, "{:.1}s", state.eta().as_secs_f64()).unwrap())
|
.with_key("eta", |state: &ProgressState, w: &mut dyn std::fmt::Write| write!(w, "{:.1}s", state.eta().as_secs_f64()).unwrap())
|
||||||
.progress_chars("#>-"));
|
.progress_chars("#>-"));
|
||||||
|
|
||||||
client
|
// let file_name = target_file.file.file_name().unwrap().to_str().unwrap();
|
||||||
.upload(
|
// let file_dir = target_file.path.split('/').collect::<Vec<&str>>()
|
||||||
&file.path,
|
// [..target_file.path.split('/').count() - 1]
|
||||||
&file_data,
|
// .join("/")
|
||||||
prefix.as_deref(),
|
// + "/";
|
||||||
*fake_size,
|
|
||||||
Some(pb),
|
// println!("file.path: {:?}", file_name);
|
||||||
)
|
// println!("prefix: {:?}", file_dir);
|
||||||
.await
|
|
||||||
.unwrap();
|
client
|
||||||
|
.upload_from_path(
|
||||||
|
target_file.clone(),
|
||||||
|
None,
|
||||||
|
prefix.as_deref(),
|
||||||
|
fake_size,
|
||||||
|
None,
|
||||||
|
None,
|
||||||
|
Some(pb),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
for i in 0..5 {
|
||||||
|
println!("Checking: {:?}", path);
|
||||||
|
let res = client.info(path.as_str()).await;
|
||||||
|
if res.is_ok() {
|
||||||
|
println!("File exists.");
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
println!("{:?}", res.err().unwrap());
|
||||||
|
if i > 5 {
|
||||||
|
return Err(anyhow::anyhow!("File not exists."));
|
||||||
|
}
|
||||||
|
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Commands::UploadBig {
|
||||||
|
file,
|
||||||
|
prefix,
|
||||||
|
fake_size,
|
||||||
|
force,
|
||||||
|
length,
|
||||||
|
} => {
|
||||||
|
// is folder
|
||||||
|
if file.is_dir() {
|
||||||
|
println!("Can't folder upload");
|
||||||
|
return Err(anyhow::anyhow!("Can't folder upload"));
|
||||||
|
}
|
||||||
|
if let Some(prefix) = prefix.as_ref() {
|
||||||
|
if !prefix.ends_with('/') {
|
||||||
|
println!("Prefix must end with /");
|
||||||
|
return Err(anyhow::anyhow!("Prefix must end with /"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut files = Vec::<TargetFile>::new();
|
||||||
|
|
||||||
|
// file check
|
||||||
|
if !file.exists() {
|
||||||
|
println!("File not found: {:?}", file);
|
||||||
|
return Err(anyhow::anyhow!("File not found: {:?}", file));
|
||||||
|
}
|
||||||
|
let target_file = TargetFile {
|
||||||
|
file: file.clone(),
|
||||||
|
path: file.file_name().unwrap().to_str().unwrap().to_string(),
|
||||||
|
};
|
||||||
|
let path = match prefix.clone() {
|
||||||
|
Some(p) => p + target_file.clone().path.as_str(),
|
||||||
|
None => target_file.clone().path,
|
||||||
|
};
|
||||||
|
|
||||||
|
if cfg!(windows) {
|
||||||
|
// replase \ to /
|
||||||
|
files.iter_mut().for_each(|f| {
|
||||||
|
f.path = f.path.replace('\\', "/");
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let is_blockdevice = std::fs::metadata(target_file.file.as_path())
|
||||||
|
.unwrap()
|
||||||
|
.file_type()
|
||||||
|
.is_block_device();
|
||||||
|
|
||||||
|
let file_size = if is_blockdevice {
|
||||||
|
let sectors: u64 = std::fs::read_to_string(format!(
|
||||||
|
"/sys/block/{}/size",
|
||||||
|
target_file.file.file_name().unwrap().to_str().unwrap()
|
||||||
|
))
|
||||||
|
.unwrap()
|
||||||
|
.trim()
|
||||||
|
.parse()
|
||||||
|
.unwrap();
|
||||||
|
sectors.checked_mul(512).unwrap()
|
||||||
|
} else {
|
||||||
|
target_file.file.metadata().unwrap().len()
|
||||||
|
};
|
||||||
|
|
||||||
|
let file_chunk_size = length.unwrap_or(1 * 1024 * 1024 * 1024); // 16GB
|
||||||
|
|
||||||
|
let mut chunk_count = file_size.checked_div(file_chunk_size).unwrap();
|
||||||
|
let mut size_of_last_chunk = file_size % file_chunk_size;
|
||||||
|
|
||||||
|
if size_of_last_chunk == 0 {
|
||||||
|
size_of_last_chunk = file_chunk_size;
|
||||||
|
chunk_count -= 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for file_chunk_index in 0..chunk_count + 1 {
|
||||||
|
// if !force {
|
||||||
|
// println!("Checking file existence...");
|
||||||
|
// println!("Checking: {:?}", path);
|
||||||
|
// let res = client.info(path.as_str()).await;
|
||||||
|
// if res.is_ok() {
|
||||||
|
// println!("File already exists.");
|
||||||
|
// return Err(anyhow::anyhow!("File already exists."));
|
||||||
|
// } else {
|
||||||
|
// println!("{:?}", res.err().unwrap());
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
|
||||||
|
let this_chunk_size = if chunk_count == file_chunk_index {
|
||||||
|
size_of_last_chunk
|
||||||
|
} else {
|
||||||
|
file_chunk_size
|
||||||
|
};
|
||||||
|
|
||||||
|
let offset = file_chunk_index * file_chunk_size;
|
||||||
|
|
||||||
|
let file_name = target_file
|
||||||
|
.file
|
||||||
|
.file_name()
|
||||||
|
.unwrap()
|
||||||
|
.to_str()
|
||||||
|
.unwrap()
|
||||||
|
.to_string()
|
||||||
|
+ "."
|
||||||
|
+ (file_chunk_index + 1).to_string().as_str();
|
||||||
|
|
||||||
|
let file_path = match prefix.clone() {
|
||||||
|
Some(p) => p + file_name.as_str(),
|
||||||
|
None => file_name.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// let file_data = File::open(target_file.file.clone()).await.unwrap();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
println!(
|
||||||
|
"Uploading {}/{} {}...",
|
||||||
|
file_chunk_index + 1,
|
||||||
|
chunk_count + 1,
|
||||||
|
file_name
|
||||||
|
);
|
||||||
|
let pb = ProgressBar::new(this_chunk_size);
|
||||||
|
pb.set_style(ProgressStyle::with_template("{spinner:.green} [{elapsed_precise}] [{wide_bar:.cyan/blue}] {bytes}/{total_bytes} ({eta})")
|
||||||
|
.unwrap()
|
||||||
|
.with_key("eta", |state: &ProgressState, w: &mut dyn std::fmt::Write| write!(w, "{:.1}s", state.eta().as_secs_f64()).unwrap())
|
||||||
|
.progress_chars("#>-"));
|
||||||
|
|
||||||
|
match client
|
||||||
|
.upload_from_path(
|
||||||
|
target_file.clone(),
|
||||||
|
Some(file_name.as_str()),
|
||||||
|
prefix.as_deref(),
|
||||||
|
fake_size,
|
||||||
|
Some(offset),
|
||||||
|
Some(this_chunk_size),
|
||||||
|
Some(pb),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(_) => {}
|
||||||
|
Err(e) => {
|
||||||
|
println!("ERROR: {:#?}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
for i in 0..20 {
|
||||||
|
println!("Checking: {:?}", file_path);
|
||||||
|
let res = client.info(file_path.as_str()).await;
|
||||||
|
if res.is_ok() {
|
||||||
|
println!("File exists.");
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
println!("{:?}", res.err().unwrap());
|
||||||
|
if i > 20 {
|
||||||
|
return Err(anyhow::anyhow!("File not exists."));
|
||||||
|
}
|
||||||
|
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Commands::Download { path, prefix } => {
|
Commands::Download { path, prefix } => {
|
||||||
@ -224,22 +457,22 @@ async fn main() -> anyhow::Result<()> {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
Commands::Move { path, dest } => {
|
Commands::Move { path, dest } => {
|
||||||
client.move_file(path, dest).await.unwrap();
|
client.move_file(&path, &dest).await.unwrap();
|
||||||
}
|
}
|
||||||
Commands::Delete { path, recursive } => {
|
Commands::Delete { path, recursive } => {
|
||||||
client.delete(path, recursive).await.unwrap();
|
client.delete(&path, &recursive).await.unwrap();
|
||||||
}
|
}
|
||||||
Commands::Mkdir { name, path } => {
|
Commands::Mkdir { name, path } => {
|
||||||
client.mkdir(name, path.as_deref()).await.unwrap();
|
client.mkdir(&name, path.as_deref()).await.unwrap();
|
||||||
}
|
}
|
||||||
Commands::Copy { src, dest } => {
|
Commands::Copy { src, dest } => {
|
||||||
client.copy(src, dest).await.unwrap();
|
client.copy(&src, &dest).await.unwrap();
|
||||||
}
|
}
|
||||||
Commands::Rename { path, name } => {
|
Commands::Rename { path, name } => {
|
||||||
client.rename(path, name).await.unwrap();
|
client.rename(&path, &name).await.unwrap();
|
||||||
}
|
}
|
||||||
Commands::Info { path } => {
|
Commands::Info { path } => {
|
||||||
client.info(path).await.unwrap();
|
client.info(&path).await.unwrap();
|
||||||
}
|
}
|
||||||
Commands::Auth {} => {
|
Commands::Auth {} => {
|
||||||
println!("Click the link below to authorize the app:\n");
|
println!("Click the link below to authorize the app:\n");
|
||||||
|
@ -76,7 +76,7 @@ pub struct CheckUploadRequest {
|
|||||||
pub file: Vec<CheckUploadRequestFile>,
|
pub file: Vec<CheckUploadRequestFile>,
|
||||||
pub host_id: String,
|
pub host_id: String,
|
||||||
pub path: String,
|
pub path: String,
|
||||||
pub upload_id: String,
|
// pub upload_id: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
@ -84,6 +84,9 @@ pub struct CheckUploadRequest {
|
|||||||
pub struct CheckUploadRequestFile {
|
pub struct CheckUploadRequestFile {
|
||||||
pub path: String,
|
pub path: String,
|
||||||
pub size: i64,
|
pub size: i64,
|
||||||
|
pub hash: Option<bool>, // always null
|
||||||
|
pub version_id: Option<bool>, // always null
|
||||||
|
pub host_id: Option<bool>, // always null
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
@ -100,6 +103,9 @@ pub struct CompleteUploadRequest {
|
|||||||
pub struct CompleteUploadRequestFile {
|
pub struct CompleteUploadRequestFile {
|
||||||
pub path: String,
|
pub path: String,
|
||||||
pub size: i64,
|
pub size: i64,
|
||||||
|
pub hash: Option<bool>, // always null
|
||||||
|
pub version_id: Option<bool>, // always null
|
||||||
|
pub host_id: Option<bool>, // always null
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
@ -159,3 +165,4 @@ pub struct VerifyCustomTokenRequest {
|
|||||||
pub return_secure_token: bool,
|
pub return_secure_token: bool,
|
||||||
pub token: String,
|
pub token: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ pub struct CheckActionResponse {
|
|||||||
pub message: Option<String>,
|
pub message: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||||
#[serde(rename_all = "snake_case")]
|
#[serde(rename_all = "snake_case")]
|
||||||
pub struct CheckUploadResponse {
|
pub struct CheckUploadResponse {
|
||||||
pub bucket: String,
|
pub bucket: String,
|
||||||
@ -85,7 +85,7 @@ pub struct CheckUploadResponse {
|
|||||||
pub upload_id: String,
|
pub upload_id: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||||
|
|
||||||
pub struct CheckUploadResponseFile {
|
pub struct CheckUploadResponseFile {
|
||||||
pub last_modified: String, // 1970-01-20T22:07:12.804Z
|
pub last_modified: String, // 1970-01-20T22:07:12.804Z
|
||||||
@ -94,7 +94,7 @@ pub struct CheckUploadResponseFile {
|
|||||||
pub version_id: String,
|
pub version_id: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||||
#[serde(rename_all = "PascalCase")]
|
#[serde(rename_all = "PascalCase")]
|
||||||
pub struct GetFileLinkTokenResponse {
|
pub struct GetFileLinkTokenResponse {
|
||||||
pub access_key_id: String,
|
pub access_key_id: String,
|
||||||
|
311
src/util.rs
311
src/util.rs
@ -10,7 +10,7 @@ use aws_sdk_s3::{
|
|||||||
config::Credentials, operation::upload_part, primitives::ByteStream, types::CompletedPart,
|
config::Credentials, operation::upload_part, primitives::ByteStream, types::CompletedPart,
|
||||||
};
|
};
|
||||||
use aws_smithy_runtime::client::http::hyper_014::HyperClientBuilder;
|
use aws_smithy_runtime::client::http::hyper_014::HyperClientBuilder;
|
||||||
use aws_smithy_types::byte_stream::Length;
|
use aws_smithy_types::byte_stream::{FsBuilder, Length};
|
||||||
use clap::{Parser, Subcommand};
|
use clap::{Parser, Subcommand};
|
||||||
use human_bytes::human_bytes;
|
use human_bytes::human_bytes;
|
||||||
use indicatif::{ProgressBar, ProgressState, ProgressStyle};
|
use indicatif::{ProgressBar, ProgressState, ProgressStyle};
|
||||||
@ -19,13 +19,64 @@ use tokio::{fs::File, io::BufReader, sync::Mutex};
|
|||||||
use crate::{
|
use crate::{
|
||||||
client::{self},
|
client::{self},
|
||||||
types::response::ListFilesResponseFile,
|
types::response::ListFilesResponseFile,
|
||||||
|
TargetFile,
|
||||||
};
|
};
|
||||||
use crate::{constants::CHUNK_SIZE, types};
|
use crate::{constants::CHUNK_SIZE, types};
|
||||||
|
|
||||||
#[derive(Debug, Clone)]
|
pub async fn single_upload(
|
||||||
pub struct TargetFile {
|
token_res: &types::response::GetFileLinkTokenResponse,
|
||||||
pub file: PathBuf,
|
bucket: &str,
|
||||||
pub path: String,
|
target_file: &types::response::CheckUploadResponseFile,
|
||||||
|
prefix: &str,
|
||||||
|
region: &str,
|
||||||
|
upload_id: &str,
|
||||||
|
file: &[u8],
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let _ = upload_id;
|
||||||
|
// if !file.file.exists() {
|
||||||
|
// println!("File not found: {:?}", file.file);
|
||||||
|
// return Err(anyhow::anyhow!("File not found: {:?}", file.file));
|
||||||
|
// }
|
||||||
|
|
||||||
|
let file_size = file.len() as u64;
|
||||||
|
|
||||||
|
let cledential = Credentials::new(
|
||||||
|
&token_res.access_key_id,
|
||||||
|
&token_res.secret_access_key,
|
||||||
|
Some(token_res.session_token.clone()),
|
||||||
|
// 2024-07-18T07:14:42Z
|
||||||
|
Some(
|
||||||
|
chrono::DateTime::parse_from_rfc3339(&token_res.expiration)
|
||||||
|
.unwrap()
|
||||||
|
.into(),
|
||||||
|
),
|
||||||
|
"2021-06-01",
|
||||||
|
);
|
||||||
|
|
||||||
|
let config = aws_sdk_s3::Config::builder()
|
||||||
|
.behavior_version_latest()
|
||||||
|
.credentials_provider(cledential)
|
||||||
|
.region(Region::new(region.to_owned()))
|
||||||
|
// .endpoint_url("https://sendy-cloud.s3.ap-northeast-1.amazonaws.com")
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let s3_client = aws_sdk_s3::Client::from_conf(config);
|
||||||
|
|
||||||
|
let key = prefix.to_owned() + target_file.path.as_str();
|
||||||
|
|
||||||
|
let stream = ByteStream::from(file.to_vec());
|
||||||
|
|
||||||
|
let _put_object_res = s3_client
|
||||||
|
.put_object()
|
||||||
|
.bucket(bucket)
|
||||||
|
.key(key)
|
||||||
|
.body(stream)
|
||||||
|
.send()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// println!("Uploaded");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn multipart_upload(
|
pub async fn multipart_upload(
|
||||||
@ -185,6 +236,200 @@ pub async fn multipart_upload(
|
|||||||
|
|
||||||
if let Some(pb) = pb {
|
if let Some(pb) = pb {
|
||||||
pb.finish_with_message("Uploaded");
|
pb.finish_with_message("Uploaded");
|
||||||
|
} else {
|
||||||
|
println!("Uploaded");
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn multipart_upload_from_path(
|
||||||
|
token_res: &types::response::GetFileLinkTokenResponse,
|
||||||
|
bucket: &str,
|
||||||
|
target_file: &types::response::CheckUploadResponseFile,
|
||||||
|
prefix: &str,
|
||||||
|
region: &str,
|
||||||
|
upload_id: &str,
|
||||||
|
file: TargetFile,
|
||||||
|
offset: Option<u64>,
|
||||||
|
length: Option<u64>,
|
||||||
|
pb: Option<ProgressBar>,
|
||||||
|
) -> anyhow::Result<()> {
|
||||||
|
let _ = upload_id;
|
||||||
|
if !file.file.exists() {
|
||||||
|
println!("File not found: {:?}", file.file);
|
||||||
|
return Err(anyhow::anyhow!("File not found: {:?}", file.file));
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset.is_some() && length.is_none() || offset.is_none() && length.is_some() {
|
||||||
|
println!("offset and length are must be set both");
|
||||||
|
return Err(anyhow::anyhow!("offset and length are must be set both"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let file_size = match length {
|
||||||
|
Some(length) => length,
|
||||||
|
None => file.file.metadata().unwrap().len() as u64,
|
||||||
|
};
|
||||||
|
|
||||||
|
println!("file_size: {}", file_size);
|
||||||
|
|
||||||
|
let cledential = Credentials::new(
|
||||||
|
&token_res.access_key_id,
|
||||||
|
&token_res.secret_access_key,
|
||||||
|
Some(token_res.session_token.clone()),
|
||||||
|
// 2024-07-18T07:14:42Z
|
||||||
|
Some(
|
||||||
|
chrono::DateTime::parse_from_rfc3339(&token_res.expiration)
|
||||||
|
.unwrap()
|
||||||
|
.into(),
|
||||||
|
),
|
||||||
|
"2021-06-01",
|
||||||
|
);
|
||||||
|
|
||||||
|
let config = aws_sdk_s3::Config::builder()
|
||||||
|
.behavior_version_latest()
|
||||||
|
.credentials_provider(cledential)
|
||||||
|
.region(Region::new(region.to_owned()))
|
||||||
|
// .endpoint_url("https://sendy-cloud.s3.ap-northeast-1.amazonaws.com")
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let s3_client = aws_sdk_s3::Client::from_conf(config);
|
||||||
|
|
||||||
|
let key = prefix.to_owned() + target_file.path.as_str();
|
||||||
|
|
||||||
|
let multipart_upload_res = s3_client
|
||||||
|
.create_multipart_upload()
|
||||||
|
.bucket(bucket)
|
||||||
|
.key(key.clone())
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let multipart_upload_res = match multipart_upload_res {
|
||||||
|
Ok(res) => res,
|
||||||
|
Err(e) => return Err(anyhow::anyhow!("Can't create multipart upload: {:?}", e)),
|
||||||
|
};
|
||||||
|
|
||||||
|
let upload_id = multipart_upload_res.upload_id().unwrap().to_string();
|
||||||
|
|
||||||
|
let chunk_size = max(CHUNK_SIZE as u64, file_size / 10000);
|
||||||
|
|
||||||
|
let mut chunk_count = file_size / chunk_size;
|
||||||
|
let mut size_of_last_chunk = file_size % chunk_size;
|
||||||
|
|
||||||
|
if size_of_last_chunk == 0 {
|
||||||
|
size_of_last_chunk = chunk_size;
|
||||||
|
chunk_count -= 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
let upload_parts = Arc::new(Mutex::new(Vec::<CompletedPart>::new()));
|
||||||
|
|
||||||
|
let semaphore = Arc::new(tokio::sync::Semaphore::new(10));
|
||||||
|
let mut handles = Vec::new();
|
||||||
|
|
||||||
|
for chunk_index in 0..chunk_count + 1 {
|
||||||
|
let bucket = bucket.to_owned();
|
||||||
|
let key = key.clone();
|
||||||
|
let upload_id = upload_id.clone();
|
||||||
|
let s3_client = s3_client.clone();
|
||||||
|
let pb = pb.clone();
|
||||||
|
let file = file.to_owned();
|
||||||
|
let upload_parts = upload_parts.clone();
|
||||||
|
|
||||||
|
let semaphore = semaphore.clone().acquire_owned().await.unwrap();
|
||||||
|
|
||||||
|
let handle = tokio::spawn(async move {
|
||||||
|
let _permit = semaphore;
|
||||||
|
|
||||||
|
let this_chunk = if chunk_count == chunk_index {
|
||||||
|
size_of_last_chunk
|
||||||
|
} else {
|
||||||
|
chunk_size
|
||||||
|
};
|
||||||
|
loop {
|
||||||
|
let stream = ByteStream::read_from()
|
||||||
|
.path(file.file.clone())
|
||||||
|
.offset(chunk_index * chunk_size + offset.unwrap_or(0))
|
||||||
|
.length(Length::Exact(this_chunk))
|
||||||
|
.build()
|
||||||
|
.await;
|
||||||
|
let stream = match stream {
|
||||||
|
Ok(stream) => stream,
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("Error: {:?}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
// let stream = match stream {
|
||||||
|
// Ok(stream) => stream,
|
||||||
|
// Err(e) => {
|
||||||
|
// eprintln!("Error: {:?}", e);
|
||||||
|
// continue;
|
||||||
|
// }
|
||||||
|
// };
|
||||||
|
//Chunk index needs to start at 0, but part numbers start at 1.
|
||||||
|
let part_number = (chunk_index as i32) + 1;
|
||||||
|
let upload_part_res = s3_client
|
||||||
|
.upload_part()
|
||||||
|
.key(&key)
|
||||||
|
.bucket(bucket.clone())
|
||||||
|
.upload_id(upload_id.clone())
|
||||||
|
.body(stream)
|
||||||
|
.part_number(part_number)
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
let upload_part_res = match upload_part_res {
|
||||||
|
Ok(upload_part_res) => upload_part_res,
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("Error: {:?}", e);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
upload_parts.lock().await.push(
|
||||||
|
CompletedPart::builder()
|
||||||
|
.e_tag(upload_part_res.e_tag.unwrap_or_default())
|
||||||
|
.part_number(part_number)
|
||||||
|
.build(),
|
||||||
|
);
|
||||||
|
if let Some(pb) = &pb {
|
||||||
|
pb.inc(this_chunk);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
handles.push(handle);
|
||||||
|
}
|
||||||
|
|
||||||
|
for handle in handles {
|
||||||
|
handle.await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
upload_parts
|
||||||
|
.lock()
|
||||||
|
.await
|
||||||
|
.sort_by(|a, b| a.part_number.cmp(&b.part_number));
|
||||||
|
|
||||||
|
let completed_multipart_upload = aws_sdk_s3::types::CompletedMultipartUpload::builder()
|
||||||
|
.set_parts(Some(upload_parts.lock().await.clone()))
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let _complete_multipart_upload_res = s3_client
|
||||||
|
.complete_multipart_upload()
|
||||||
|
.bucket(bucket)
|
||||||
|
.key(key)
|
||||||
|
.upload_id(upload_id)
|
||||||
|
.multipart_upload(completed_multipart_upload)
|
||||||
|
.send()
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match _complete_multipart_upload_res {
|
||||||
|
Ok(_) => {}
|
||||||
|
Err(e) => return Err(anyhow::anyhow!("Can't complete multipart upload: {:?}", e)),
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(pb) = pb {
|
||||||
|
pb.finish_with_message("Uploaded");
|
||||||
|
} else {
|
||||||
|
println!("Uploaded");
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
@ -218,33 +463,34 @@ pub async fn list_files(
|
|||||||
thumbnail_size: 130,
|
thumbnail_size: 130,
|
||||||
to: pagination_size,
|
to: pagination_size,
|
||||||
};
|
};
|
||||||
|
println!("{:#?}", req);
|
||||||
let mut res = client.list_files(req).await?;
|
let mut res = client.list_files(req).await?;
|
||||||
|
|
||||||
files.append(&mut res.file);
|
files.append(&mut res.file);
|
||||||
|
|
||||||
if !res.last_page {
|
// if !res.last_page {
|
||||||
let mut cursor = res.file.len() as i64;
|
// let mut cursor = res.file.len() as i64;
|
||||||
loop {
|
// loop {
|
||||||
let req = types::request::ListFilesRequest {
|
// let req = types::request::ListFilesRequest {
|
||||||
from: cursor,
|
// from: cursor,
|
||||||
host_id: client.host_id.clone(),
|
// host_id: client.host_id.clone(),
|
||||||
path: prefix.unwrap_or("").to_string(),
|
// path: prefix.unwrap_or("").to_string(),
|
||||||
sort_type: types::request::ListFilesRequestSortType::Path,
|
// sort_type: types::request::ListFilesRequestSortType::Path,
|
||||||
reverse: false,
|
// reverse: false,
|
||||||
thumbnail_size: 130,
|
// thumbnail_size: 130,
|
||||||
to: pagination_size + cursor,
|
// to: pagination_size + cursor,
|
||||||
};
|
// };
|
||||||
|
//
|
||||||
let mut next_res = client.list_files(req).await?;
|
// let mut next_res = client.list_files(req).await?;
|
||||||
files.append(&mut next_res.file);
|
// files.append(&mut next_res.file);
|
||||||
|
//
|
||||||
if next_res.last_page {
|
// if next_res.last_page {
|
||||||
break;
|
// break;
|
||||||
} else {
|
// } else {
|
||||||
cursor += next_res.file.len() as i64;
|
// cursor += next_res.file.len() as i64;
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
res.file = files;
|
res.file = files;
|
||||||
|
|
||||||
// files.iter().find(|f| f.path == "/").unwrap();
|
// files.iter().find(|f| f.path == "/").unwrap();
|
||||||
@ -257,7 +503,12 @@ pub async fn check_job(key: &str, client: &client::Client) -> anyhow::Result<()>
|
|||||||
let req = types::request::CheckActionRequest {
|
let req = types::request::CheckActionRequest {
|
||||||
key: key.to_string(),
|
key: key.to_string(),
|
||||||
};
|
};
|
||||||
let res = client.check_action(req).await.unwrap();
|
let res = client.check_action(req).await;
|
||||||
|
|
||||||
|
let res = match res {
|
||||||
|
Ok(res) => res,
|
||||||
|
Err(e) => return Err(anyhow::anyhow!("Error: {:?}", e)),
|
||||||
|
};
|
||||||
|
|
||||||
if res.state == "complete" {
|
if res.state == "complete" {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
@ -267,6 +518,6 @@ pub async fn check_job(key: &str, client: &client::Client) -> anyhow::Result<()>
|
|||||||
return Err(anyhow::anyhow!("Error: {:?}", res));
|
return Err(anyhow::anyhow!("Error: {:?}", res));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::thread::sleep(std::time::Duration::from_millis(200));
|
// std::thread::sleep(std::time::Duration::from_millis(100));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user