refactor: unify error handling with anyhow and add From conversions

Phase 5: Replace all Box<dyn Error> return types with anyhow::Result<T>
throughout the codebase. Replace string-based Err("msg".into()) and
format!().into() patterns with bail!() and anyhow!() macros. Fix
dirs::home_dir().unwrap() in settings.rs to use a fallback path instead
of panicking when HOME is unset. Remove stray use std::error::Error
imports no longer needed.

Phase 6: Add From<&User> for CacheUser in models/user.rs and
From<&Laboratory>/From<&Laboratories> for CacheLaboratory/CacheLabsWrapper
in models/laboratory.rs. Simplify commands/login.rs to use .into()
conversions, removing the redundant to_cache_user() and to_cache_labs()
helper functions.

Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
This commit is contained in:
2026-04-20 14:19:10 +09:00
parent e8fd359f54
commit 769a5a68e2
30 changed files with 889 additions and 872 deletions
+25 -5
View File
@@ -9,11 +9,11 @@ struct FileListResponse {
}
impl MDRSConnection {
/// List all files in a folder, following pagination automatically
/// List all files in a folder, following pagination automatically.
pub async fn list_all_files(
&self,
folder_id: &str,
) -> Result<Vec<File>, Box<dyn std::error::Error>> {
) -> Result<Vec<File>, anyhow::Error> {
let mut all_files = Vec::new();
let mut page: u32 = 1;
loop {
@@ -35,15 +35,17 @@ impl MDRSConnection {
Ok(all_files)
}
/// Upload a local file into the given remote folder.
pub async fn upload_file(
&self,
folder_id: &str,
file_path: &str,
) -> Result<(), Box<dyn std::error::Error>> {
) -> Result<(), anyhow::Error> {
use reqwest::multipart;
use anyhow::{anyhow, bail};
let file_name: String = std::path::Path::new(file_path)
.file_name()
.unwrap()
.ok_or_else(|| anyhow!("Invalid file path: `{}`", file_path))?
.to_string_lossy()
.nfc()
.collect();
@@ -54,8 +56,26 @@ impl MDRSConnection {
.part("file", part);
let resp = self.post_multipart("v3/files/", form).await?;
if !resp.status().is_success() {
return Err(format!("Upload failed: {}", resp.status()).into());
bail!("Upload failed: {}", resp.status());
}
Ok(())
}
/// Download a file from `url` and write it to `dest`.
pub async fn download_file(
&self,
url: &str,
dest: &str,
) -> Result<(), anyhow::Error> {
let resp = self
.client
.get(url)
.headers(self.prepare_headers())
.send()
.await?;
let bytes = resp.bytes().await?;
tokio::fs::write(dest, &bytes).await?;
Ok(())
}
}
+45
View File
@@ -1,4 +1,5 @@
use crate::connection::MDRSConnection;
use anyhow::{bail};
pub use crate::models::folder::{FolderDetail, FolderSimple};
impl MDRSConnection {
@@ -26,4 +27,48 @@ impl MDRSConnection {
let resp = self.get(&format!("v3/folders/{}/", id)).await?;
resp.json::<FolderDetail>().await
}
/// Create a new folder under `parent_id` (POST v3/folders/).
pub async fn create_folder(
&self,
parent_id: &str,
folder_name: &str,
) -> reqwest::Result<reqwest::Response> {
let body = serde_json::json!({
"name": folder_name,
"parent_id": parent_id,
"description": "",
"template_id": -1,
});
self.client
.post(self.build_url("v3/folders/"))
.headers(self.prepare_headers())
.json(&body)
.send()
.await
}
/// Authenticate against a password-locked folder (POST v3/folders/{id}/auth/).
/// Returns `Err` if the password is incorrect or the request fails.
pub async fn folder_auth(
&self,
folder_id: &str,
password: &str,
) -> Result<(), anyhow::Error> {
let resp = self
.client
.post(self.build_url(&format!("v3/folders/{}/auth/", folder_id)))
.headers(self.prepare_headers())
.json(&serde_json::json!({"password": password}))
.send()
.await?;
if resp.status() == reqwest::StatusCode::UNAUTHORIZED {
bail!("Password is incorrect.");
}
if !resp.status().is_success() {
bail!("Folder auth failed: {}", resp.status());
}
Ok(())
}
}
+3 -2
View File
@@ -1,6 +1,7 @@
use crate::connection::MDRSConnection;
use crate::models::user::User as ModelUser;
use serde::Deserialize;
use anyhow::{bail};
/// Full API response shape from GET v3/users/current/
#[derive(Debug, Deserialize)]
@@ -40,7 +41,7 @@ impl MDRSConnection {
pub async fn token_refresh(
&self,
refresh_token: &str,
) -> Result<String, Box<dyn std::error::Error>> {
) -> Result<String, anyhow::Error> {
let body = serde_json::json!({ "refresh": refresh_token });
let resp = self
.client
@@ -49,7 +50,7 @@ impl MDRSConnection {
.send()
.await?;
if !resp.status().is_success() {
return Err(format!("Token refresh failed: {}", resp.status()).into());
bail!("Token refresh failed: {}", resp.status());
}
let r: TokenRefreshResponse = resp.json().await?;
Ok(r.access)
+123
View File
@@ -0,0 +1,123 @@
use super::types::{CacheLabsWrapper, CacheUser};
use sha2::{Digest, Sha256};
// ---------------------------------------------------------------------------
// Python-compatible JSON serialization helpers
//
// Python's default json.dumps uses separators=(', ', ': ') and
// ensure_ascii=True. Field order follows dataclass definition order.
// The digest must be byte-for-byte identical to Python's CacheData.__calc_digest.
// ---------------------------------------------------------------------------
/// Escape a string in Python json.dumps style:
/// - Special chars: `"`, `\`, and control chars → standard JSON escapes
/// - Non-ASCII chars → `\uXXXX` (matches Python's `ensure_ascii=True` default)
fn python_json_string(s: &str) -> String {
let mut out = String::with_capacity(s.len() + 2);
out.push('"');
for c in s.chars() {
match c {
'"' => out.push_str("\\\""),
'\\' => out.push_str("\\\\"),
'\n' => out.push_str("\\n"),
'\r' => out.push_str("\\r"),
'\t' => out.push_str("\\t"),
c if (c as u32) < 0x20 => {
out.push_str(&format!("\\u{:04x}", c as u32));
}
c if c.is_ascii() => out.push(c),
c => {
// Non-ASCII: BMP → \uXXXX, outside BMP → surrogate pair
let code = c as u32;
if code <= 0xFFFF {
out.push_str(&format!("\\u{:04x}", code));
} else {
let code = code - 0x10000;
let high = 0xD800 + (code >> 10);
let low = 0xDC00 + (code & 0x3FF);
out.push_str(&format!("\\u{:04x}\\u{:04x}", high, low));
}
}
}
}
out.push('"');
out
}
/// Serialize a `u32` slice as a Python-style JSON array: `[1, 2, 3]`.
fn python_json_u32_array(items: &[u32]) -> String {
if items.is_empty() {
return "[]".to_string();
}
let inner: Vec<String> = items.iter().map(|x| x.to_string()).collect();
format!("[{}]", inner.join(", "))
}
/// Build the JSON array string that Python's `__calc_digest` hashes:
/// `[user_asdict_or_null, token_asdict, labs_asdict]`
///
/// Field order matches each Python dataclass definition:
/// User: id, username, laboratory_ids, is_reviewer
/// Token: access, refresh
/// Laboratories: items
/// Laboratory: id, name, pi_name, full_name
pub fn python_digest_json(
user: Option<&CacheUser>,
access: &str,
refresh: &str,
labs: &CacheLabsWrapper,
) -> String {
let user_str = match user {
None => "null".to_string(),
Some(u) => format!(
"{{\"id\": {}, \"username\": {}, \"laboratory_ids\": {}, \"is_reviewer\": {}}}",
u.id,
python_json_string(&u.username),
python_json_u32_array(&u.laboratory_ids),
if u.is_reviewer { "true" } else { "false" }
),
};
let token_str = format!(
"{{\"access\": {}, \"refresh\": {}}}",
python_json_string(access),
python_json_string(refresh)
);
let items: Vec<String> = labs
.items
.iter()
.map(|lab| {
format!(
"{{\"id\": {}, \"name\": {}, \"pi_name\": {}, \"full_name\": {}}}",
lab.id,
python_json_string(&lab.name),
python_json_string(&lab.pi_name),
python_json_string(&lab.full_name)
)
})
.collect();
let items_str = if items.is_empty() {
"[]".to_string()
} else {
format!("[{}]", items.join(", "))
};
let labs_str = format!("{{\"items\": {}}}", items_str);
format!("[{}, {}, {}]", user_str, token_str, labs_str)
}
/// Compute the cache digest compatible with Python's `CacheData.__calc_digest`:
/// `hashlib.sha256(json.dumps([user, token, labs]).encode("utf-8")).hexdigest()`
pub fn compute_digest(
user: Option<&CacheUser>,
access: &str,
refresh: &str,
labs: &CacheLabsWrapper,
) -> String {
let json_str = python_digest_json(user, access, refresh, labs);
let mut hasher = Sha256::new();
hasher.update(json_str.as_bytes());
format!("{:x}", hasher.finalize())
}
+162
View File
@@ -0,0 +1,162 @@
pub mod digest;
pub mod types;
pub use digest::compute_digest;
pub use types::{Cache, CacheLaboratory, CacheLabsWrapper, CacheUser};
use anyhow::{anyhow, bail};
use crate::connection::MDRSConnection;
use std::collections::HashMap;
use std::fs;
use std::sync::{Arc, LazyLock, Mutex};
// ---------------------------------------------------------------------------
// Per-remote async mutex map (in-process serialization)
// ---------------------------------------------------------------------------
static REMOTE_LOCKS: LazyLock<Mutex<HashMap<String, Arc<tokio::sync::Mutex<()>>>>> =
LazyLock::new(|| Mutex::new(HashMap::new()));
fn get_remote_lock(remote: &str) -> Arc<tokio::sync::Mutex<()>> {
let mut map = REMOTE_LOCKS.lock().unwrap();
map.entry(remote.to_string())
.or_insert_with(|| Arc::new(tokio::sync::Mutex::new(())))
.clone()
}
// ---------------------------------------------------------------------------
// Cache file path helpers
// ---------------------------------------------------------------------------
fn cache_file_path(remote: &str) -> std::path::PathBuf {
crate::settings::SETTINGS
.config_dirname
.join("cache")
.join(format!("{}.json", remote))
}
// ---------------------------------------------------------------------------
// Load cache (low-level, no token refresh)
// ---------------------------------------------------------------------------
/// Load token and laboratories from the login cache file (no token refresh check).
pub fn load_cache(remote: &str) -> Result<Cache, anyhow::Error> {
let cache_path = cache_file_path(remote);
if !cache_path.exists() {
bail!("Not logged in to `{}`. Run `mdrs login {}` first.", remote, remote);
}
let data = fs::read_to_string(&cache_path)?;
serde_json::from_str::<Cache>(&data).map_err(|e| anyhow!("Cache for `{}` is invalid or outdated ({}). Run `mdrs login {}` to refresh it.", remote, e, remote))
}
// ---------------------------------------------------------------------------
// Token-aware cache load with refresh and locking
// ---------------------------------------------------------------------------
/// Load cache, check token expiry, and refresh the access token if needed.
///
/// Locking strategy:
/// - Per-remote `tokio::sync::Mutex` serializes concurrent async tasks within
/// the same process.
/// - `flock(LOCK_EX)` on a dedicated `cache/{remote}.lock` file serializes
/// the entire read-check-refresh-write cycle across separate processes on
/// the same host.
pub async fn load_cache_with_token_refresh(
remote: &str,
) -> Result<Cache, anyhow::Error> {
let lock = get_remote_lock(remote);
let _guard = lock.lock().await;
let lock_path = cache_file_path(remote).with_extension("lock");
use fs2::FileExt;
let lock_file = fs::OpenOptions::new()
.write(true)
.create(true)
.open(&lock_path)?;
lock_file.lock_exclusive()?;
// Re-read inside the lock: another process may have already refreshed the
// token since we last checked.
let result: Result<Cache, anyhow::Error> = async {
let mut cache = load_cache(remote)?;
if crate::token::is_expired(&cache.token.refresh) {
bail!("Session for `{}` has expired. Please run `mdrs login {}` again.", remote, remote);
}
if crate::token::is_refresh_required(&cache.token.access, &cache.token.refresh) {
let new_access = refresh_and_persist(remote, &cache).await?;
cache.token.access = new_access;
}
Ok(cache)
}
.await;
lock_file.unlock()?;
result
}
/// Call the token-refresh endpoint and write the new access token back to the
/// cache file. The caller must already hold the per-remote async mutex.
async fn refresh_and_persist(
remote: &str,
cache: &Cache,
) -> Result<String, anyhow::Error> {
let url = crate::commands::config::get_remote_url(remote)?
.ok_or_else(|| anyhow!("Remote `{}` is not configured.", remote))?;
let conn = MDRSConnection::new(&url);
let new_access = conn.token_refresh(&cache.token.refresh).await?;
let new_digest = compute_digest(
cache.user.as_ref(),
&new_access,
&cache.token.refresh,
&cache.laboratories,
);
let cache_path = cache_file_path(remote);
let raw = fs::read_to_string(&cache_path)?;
let mut obj: serde_json::Value = serde_json::from_str(&raw)?;
obj["token"]["access"] = serde_json::Value::String(new_access.clone());
obj["digest"] = serde_json::Value::String(new_digest);
// Write atomically: write to .tmp then rename.
let tmp_path = cache_path.with_extension("tmp");
{
use std::io::Write;
let mut tmp_file = fs::OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(&tmp_path)?;
tmp_file.write_all(serde_json::to_string(&obj)?.as_bytes())?;
tmp_file.flush()?;
}
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms = fs::metadata(&tmp_path)?.permissions();
perms.set_mode(0o600);
fs::set_permissions(&tmp_path, perms)?;
}
fs::rename(&tmp_path, &cache_path)?;
Ok(new_access)
}
// ---------------------------------------------------------------------------
// Connection helpers
// ---------------------------------------------------------------------------
/// Create an authenticated `MDRSConnection` for the given remote label.
pub fn create_authenticated_conn(
remote: &str,
cache: &Cache,
) -> Result<MDRSConnection, anyhow::Error> {
let url = crate::commands::config::get_remote_url(remote)?
.ok_or_else(|| anyhow!("Remote `{}` is not configured.", remote))?;
Ok(MDRSConnection::new(&url).with_token(cache.token.access.clone()))
}
+42
View File
@@ -0,0 +1,42 @@
use serde::Deserialize;
/// Access and refresh token pair stored in the login cache file.
#[derive(Deserialize, Clone)]
pub struct CacheToken {
pub access: String,
pub refresh: String,
}
/// Minimal user fields stored in the cache, matching Python's `User` dataclass.
#[derive(Deserialize, Clone)]
pub struct CacheUser {
pub id: u32,
pub username: String,
pub laboratory_ids: Vec<u32>,
pub is_reviewer: bool,
}
/// All four laboratory fields needed for digest computation.
#[derive(Deserialize, Clone)]
pub struct CacheLaboratory {
pub id: u32,
pub name: String,
#[serde(default)]
pub pi_name: String,
#[serde(default)]
pub full_name: String,
}
/// Wrapper matching Python's `Laboratories` serialization: `{"items": [...]}`.
#[derive(Deserialize, Clone, Default)]
pub struct CacheLabsWrapper {
pub items: Vec<CacheLaboratory>,
}
/// Full login cache, corresponding to the `<remote>.json` file written by `login`.
#[derive(Deserialize, Clone)]
pub struct Cache {
pub user: Option<CacheUser>,
pub token: CacheToken,
pub laboratories: CacheLabsWrapper,
}
+114
View File
@@ -0,0 +1,114 @@
use clap::{Parser, Subcommand};
use crate::commands::config_subcommand::*;
#[derive(Parser)]
#[command(name = "mdrs")]
#[command(about = "MDRS Rust CLI client", long_about = None)]
pub struct Cli {
#[command(subcommand)]
pub command: Commands,
}
#[derive(Subcommand)]
pub enum Commands {
/// Config management (create, update, list, delete)
#[command(subcommand)]
Config(ConfigSubcommand),
Login {
#[arg(short, long)]
username: Option<String>,
#[arg(short, long)]
password: Option<String>,
remote: String,
},
/// Logout and remove cached credentials for a remote
Logout {
remote: String,
},
Upload {
#[arg(short, long)]
recursive: bool,
#[arg(short = 's', long)]
skip_if_exists: bool,
local_path: String,
remote_path: String,
},
Download {
#[arg(short, long)]
recursive: bool,
#[arg(short = 's', long)]
skip_if_exists: bool,
#[arg(short = 'p', long)]
password: Option<String>,
#[arg(long)]
exclude: Vec<String>,
remote_path: String,
local_path: String,
},
Ls {
remote_path: String,
#[arg(short = 'p', long)]
password: Option<String>,
#[arg(short = 'J', long = "json")]
json: bool,
#[arg(short = 'r', long)]
recursive: bool,
#[arg(short = 'q', long)]
quiet: bool,
},
Whoami {
remote: String,
},
Labs {
remote: String,
},
Chacl {
/// Access level key: private, public, pw_open, cbs_open, 5kikan_open,
/// cbs_or_pw_open, 5kikan_or_pw_open, storage
access_level_key: String,
#[arg(short, long)]
recursive: bool,
#[arg(short = 'p', long)]
password: Option<String>,
remote_path: String,
},
Metadata {
#[arg(short = 'p', long)]
password: Option<String>,
remote_path: String,
},
Mkdir {
remote_path: String,
},
Rm {
#[arg(short, long)]
recursive: bool,
remote_path: String,
},
Mv {
src_path: String,
dest_path: String,
},
Cp {
#[arg(short, long)]
recursive: bool,
src_path: String,
dest_path: String,
},
/// Show metadata for a remote file
FileMetadata {
#[arg(short = 'p', long)]
password: Option<String>,
remote_path: String,
},
/// Show the version of this tool
Version,
/// Update this binary to the latest release
#[command(name = "selfupdate")]
SelfUpdate {
/// Skip the confirmation prompt
#[arg(short = 'y', long)]
yes: bool,
},
}
+6 -4
View File
@@ -1,13 +1,15 @@
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
use crate::commands::shared::{
create_authenticated_conn, find_folder, find_lab_in_cache, load_cache_with_token_refresh, parse_remote_path,
find_folder, find_lab_in_cache, parse_remote_path,
};
use anyhow::{bail};
pub async fn chacl(
remote_path: &str,
access_level_key: &str,
recursive: bool,
password: Option<&str>,
) -> Result<(), Box<dyn std::error::Error>> {
) -> Result<(), anyhow::Error> {
let access_level_id: u32 = match access_level_key {
"private" => 0x0001,
"public" => 0x0002,
@@ -17,7 +19,7 @@ pub async fn chacl(
"cbs_or_pw_open" => 0x0104,
"5kikan_or_pw_open" => 0x0204,
"storage" => 0x0000,
_ => return Err(format!("Unknown access level key: '{}'", access_level_key).into()),
_ => bail!("Unknown access level key: '{}'", access_level_key),
};
let (remote, labname, folder_path) = parse_remote_path(remote_path)?;
@@ -43,7 +45,7 @@ pub async fn chacl(
.await?;
if !resp.status().is_success() {
return Err(format!("ACL change failed: {}", resp.status()).into());
bail!("ACL change failed: {}", resp.status());
}
Ok(())
}
+18 -17
View File
@@ -1,12 +1,13 @@
use configparser::ini::Ini;
use std::fs;
use std::path::PathBuf;
use anyhow::{bail};
fn config_path() -> PathBuf {
crate::settings::SETTINGS.config_dirname.join("config.ini")
}
fn sanitize_config_file(path: &PathBuf) -> Result<(), Box<dyn std::error::Error>> {
fn sanitize_config_file(path: &PathBuf) -> Result<(), anyhow::Error> {
if !path.exists() {
return Ok(());
}
@@ -28,7 +29,7 @@ fn sanitize_config_file(path: &PathBuf) -> Result<(), Box<dyn std::error::Error>
Ok(())
}
fn write_ini_atomic(path: &PathBuf, ini: &Ini) -> Result<(), Box<dyn std::error::Error>> {
fn write_ini_atomic(path: &PathBuf, ini: &Ini) -> Result<(), anyhow::Error> {
let tmp = path.with_extension("tmp");
// write to tmp path then rename for atomicity
ini.write(&tmp)?;
@@ -36,27 +37,27 @@ fn write_ini_atomic(path: &PathBuf, ini: &Ini) -> Result<(), Box<dyn std::error:
Ok(())
}
pub fn get_remote_url(remote: &str) -> Result<Option<String>, Box<dyn std::error::Error>> {
pub fn get_remote_url(remote: &str) -> Result<Option<String>, anyhow::Error> {
let path = config_path();
sanitize_config_file(&path)?;
let path_str = path.to_string_lossy().to_string();
let mut conf = Ini::new();
if path.exists() {
let _ = conf.load(&path_str)?;
let _ = conf.load(&path_str).map_err(|e| anyhow::anyhow!("{}", e))?;
}
Ok(conf.get(remote, "url"))
}
pub fn config_create(remote: &str, url: &str) -> Result<(), Box<dyn std::error::Error>> {
pub fn config_create(remote: &str, url: &str) -> Result<(), anyhow::Error> {
if !validate_url(url) {
return Err("Malformed URL".into());
bail!("Malformed URL");
}
let path = config_path();
sanitize_config_file(&path)?;
let path_str = path.to_string_lossy().to_string();
let mut conf = Ini::new();
if path.exists() {
let _ = conf.load(&path_str)?;
let _ = conf.load(&path_str).map_err(|e| anyhow::anyhow!("{}", e))?;
}
// check if section exists
let section_exists = conf
@@ -64,7 +65,7 @@ pub fn config_create(remote: &str, url: &str) -> Result<(), Box<dyn std::error::
.map(|m| m.contains_key(remote))
.unwrap_or(false);
if section_exists {
return Err(format!("Remote host `{}` is already exists.", remote).into());
bail!("Remote host `{}` is already exists.", remote);
}
// set url
conf.set(remote, "url", Some(url.to_string()));
@@ -76,16 +77,16 @@ pub fn config_create(remote: &str, url: &str) -> Result<(), Box<dyn std::error::
Ok(())
}
pub fn config_update(remote: &str, url: &str) -> Result<(), Box<dyn std::error::Error>> {
pub fn config_update(remote: &str, url: &str) -> Result<(), anyhow::Error> {
if !validate_url(url) {
return Err("Malformed URL".into());
bail!("Malformed URL");
}
let path = config_path();
sanitize_config_file(&path)?;
let path_str = path.to_string_lossy().to_string();
let mut conf = Ini::new();
if path.exists() {
let _ = conf.load(&path_str)?;
let _ = conf.load(&path_str).map_err(|e| anyhow::anyhow!("{}", e))?;
}
// ensure section exists
let section_exists = conf
@@ -93,14 +94,14 @@ pub fn config_update(remote: &str, url: &str) -> Result<(), Box<dyn std::error::
.map(|m| m.contains_key(remote))
.unwrap_or(false);
if !section_exists {
return Err(format!("Remote host `{}` is not exists.", remote).into());
bail!("Remote host `{}` is not exists.", remote);
}
conf.set(remote, "url", Some(url.to_string()));
write_ini_atomic(&path, &conf)?;
Ok(())
}
pub fn config_list() -> Result<(), Box<dyn std::error::Error>> {
pub fn config_list() -> Result<(), anyhow::Error> {
let path = config_path();
if !path.exists() {
return Ok(());
@@ -108,7 +109,7 @@ pub fn config_list() -> Result<(), Box<dyn std::error::Error>> {
sanitize_config_file(&path)?;
let path_str = path.to_string_lossy().to_string();
let mut conf = Ini::new();
let _ = conf.load(&path_str)?;
let _ = conf.load(&path_str).map_err(|e| anyhow::anyhow!("{}", e))?;
let map = conf.get_map().unwrap_or_default();
for (sec, props) in map.iter() {
if sec == "default" {
@@ -120,18 +121,18 @@ pub fn config_list() -> Result<(), Box<dyn std::error::Error>> {
Ok(())
}
pub fn config_delete(remote: &str) -> Result<(), Box<dyn std::error::Error>> {
pub fn config_delete(remote: &str) -> Result<(), anyhow::Error> {
let path = config_path();
sanitize_config_file(&path)?;
let path_str = path.to_string_lossy().to_string();
let mut conf = Ini::new();
if path.exists() {
let _ = conf.load(&path_str)?;
let _ = conf.load(&path_str).map_err(|e| anyhow::anyhow!("{}", e))?;
}
// fallback: reconstruct by removing the section in memory map and writing file
let mut map = conf.get_map().unwrap_or_default();
if map.remove(remote).is_none() {
return Err(format!("Remote host `{}` is not exists.", remote).into());
bail!("Remote host `{}` is not exists.", remote);
}
// build new Ini from map
let mut new_ini = Ini::new();
+16 -26
View File
@@ -1,22 +1,24 @@
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
use crate::commands::shared::{
create_authenticated_conn, find_file_by_name, find_folder, find_lab_in_cache,
find_subfolder_by_name, load_cache_with_token_refresh, nfc, parse_remote_path,
find_file_by_name, find_folder, find_lab_in_cache,
find_subfolder_by_name, nfc, parse_remote_path,
};
use anyhow::{bail};
pub async fn cp(
src_path: &str,
dest_path: &str,
recursive: bool,
) -> Result<(), Box<dyn std::error::Error>> {
) -> Result<(), anyhow::Error> {
let (s_remote, s_lab, s_path) = parse_remote_path(src_path)?;
let dest_ends_with_slash = dest_path.ends_with('/');
let (d_remote, d_lab, d_path) = parse_remote_path(dest_path)?;
if s_remote != d_remote {
return Err("Remote host mismatched.".into());
bail!("Remote host mismatched.");
}
if s_lab != d_lab {
return Err("Laboratory mismatched.".into());
bail!("Laboratory mismatched.");
}
let cache = load_cache_with_token_refresh(&s_remote).await?;
@@ -46,14 +48,10 @@ pub async fn cp(
if let Some(src_file) = find_file_by_name(&s_parent_files, &s_basename) {
let src_file_id = src_file.id.clone();
if find_file_by_name(&d_parent_files, &d_basename).is_some() {
return Err(format!("File `{}` already exists.", d_basename).into());
bail!("File `{}` already exists.", d_basename);
}
if find_subfolder_by_name(&d_parent_folder.sub_folders, &d_basename).is_some() {
return Err(format!(
"Cannot overwrite non-folder `{}` with folder `{}`.",
d_basename, d_path
)
.into());
bail!("Cannot overwrite non-folder `{}` with folder `{}`.", d_basename, d_path);
}
// No-op if source and destination are identical
if s_parent_folder.id == d_parent_folder.id && d_basename == s_basename {
@@ -68,7 +66,7 @@ pub async fn cp(
.send()
.await?;
if !resp.status().is_success() {
return Err(format!("Copy failed: {}", resp.status()).into());
bail!("Copy failed: {}", resp.status());
}
return Ok(());
}
@@ -76,28 +74,20 @@ pub async fn cp(
// Try source as a folder
let src_folder = match find_subfolder_by_name(&s_parent_folder.sub_folders, &s_basename) {
Some(f) => f,
None => return Err(format!("File or folder `{}` not found.", s_basename).into()),
None => bail!("File or folder `{}` not found.", s_basename),
};
if !recursive {
return Err(format!("Cannot copy `{}`: Is a folder.", s_path).into());
bail!("Cannot copy `{}`: Is a folder.", s_path);
}
let src_folder_id = src_folder.id.clone();
if find_file_by_name(&d_parent_files, &d_basename).is_some() {
return Err(format!(
"Cannot overwrite non-folder `{}` with folder `{}`.",
d_basename, s_path
)
.into());
bail!("Cannot overwrite non-folder `{}` with folder `{}`.", d_basename, s_path);
}
if let Some(d_folder) = find_subfolder_by_name(&d_parent_folder.sub_folders, &d_basename) {
if d_folder.id == src_folder_id {
return Err(
format!("`{}` and `{}` are the same folder.", s_path, s_path).into(),
);
bail!("`{}` and `{}` are the same folder.", s_path, s_path);
}
return Err(
format!("Cannot move `{}` to `{}`: Folder not empty.", s_path, d_path).into(),
);
bail!("Cannot move `{}` to `{}`: Folder not empty.", s_path, d_path);
}
// No-op if source and destination are identical
if s_parent_folder.id == d_parent_folder.id && s_basename == d_basename {
@@ -112,7 +102,7 @@ pub async fn cp(
.send()
.await?;
if !resp.status().is_success() {
return Err(format!("Copy failed: {}", resp.status()).into());
bail!("Copy failed: {}", resp.status());
}
Ok(())
}
+9 -7
View File
@@ -1,11 +1,13 @@
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
use crate::commands::shared::{
create_authenticated_conn, find_file_by_name, find_folder, find_lab_in_cache,
find_subfolder_by_name, load_cache_with_token_refresh, parse_remote_path,
find_file_by_name, find_folder, find_lab_in_cache,
find_subfolder_by_name, parse_remote_path,
};
use crate::connection::MDRSConnection;
use futures::stream::{FuturesUnordered, StreamExt};
use std::path::PathBuf;
use std::sync::Arc;
use anyhow::{anyhow, bail};
pub async fn download(
remote_path: &str,
@@ -14,7 +16,7 @@ pub async fn download(
skip_if_exists: bool,
password: Option<&str>,
excludes: Vec<String>,
) -> Result<(), Box<dyn std::error::Error>> {
) -> Result<(), anyhow::Error> {
let (remote, labname, r_path) = parse_remote_path(remote_path)?;
let cache = load_cache_with_token_refresh(&remote).await?;
let conn = Arc::new(create_authenticated_conn(&remote, &cache)?);
@@ -22,9 +24,9 @@ pub async fn download(
// Validate that local_path is an existing directory (matching Python's behaviour).
let local_real = std::fs::canonicalize(local_path)
.map_err(|_| format!("Local directory `{}` not found.", local_path))?;
.map_err(|_| anyhow!("Local directory `{}` not found.", local_path))?;
if !local_real.is_dir() {
return Err(format!("Local directory `{}` not found.", local_path).into());
bail!("Local directory `{}` not found.", local_path);
}
// Split r_path into the parent directory path and the target basename.
@@ -69,7 +71,7 @@ pub async fn download(
let subfolder = find_subfolder_by_name(&parent_folder.sub_folders, &basename);
if let Some(sub) = subfolder {
if !recursive {
return Err(format!("Cannot download `{}`: Is a folder.", r_path_clean).into());
bail!("Cannot download `{}`: Is a folder.", r_path_clean);
}
// Python downloads into local_path/<remote_folder_name>/ (not directly into local_path).
// We create that subdirectory first, then recurse into it.
@@ -155,7 +157,7 @@ pub async fn download(
return Ok(());
}
Err(format!("File or folder `{}` not found.", r_path_clean).into())
Err(anyhow!("File or folder `{}` not found.", r_path_clean))
}
/// Return true if the given lab/folder/file path matches any exclude pattern.
+5 -3
View File
@@ -1,9 +1,11 @@
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
use anyhow::{anyhow};
use crate::commands::shared::{
create_authenticated_conn, find_file_by_name, find_folder, find_lab_in_cache, load_cache_with_token_refresh,
find_file_by_name, find_folder, find_lab_in_cache,
parse_remote_path,
};
pub async fn file_metadata(remote_path: &str, password: Option<&str>) -> Result<(), Box<dyn std::error::Error>> {
pub async fn file_metadata(remote_path: &str, password: Option<&str>) -> Result<(), anyhow::Error> {
let (remote, labname, r_path) = parse_remote_path(remote_path)?;
let cache = load_cache_with_token_refresh(&remote).await?;
@@ -24,7 +26,7 @@ pub async fn file_metadata(remote_path: &str, password: Option<&str>) -> Result<
let files = conn.list_all_files(&parent_folder.id).await?;
let file = find_file_by_name(&files, &basename)
.ok_or_else(|| format!("File `{}` not found.", basename))?;
.ok_or_else(|| anyhow!("File `{}` not found.", basename))?;
let resp = conn.get(&format!("v3/files/{}/metadata/", file.id)).await?;
let json: serde_json::Value = resp.json().await?;
+2 -2
View File
@@ -1,6 +1,6 @@
use crate::commands::shared::{create_authenticated_conn, load_cache_with_token_refresh};
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
pub async fn labs(remote: &str) -> Result<(), Box<dyn std::error::Error>> {
pub async fn labs(remote: &str) -> Result<(), anyhow::Error> {
let cache = load_cache_with_token_refresh(remote).await?;
let conn = create_authenticated_conn(remote, &cache)?;
let labs = conn.list_laboratories().await?;
+33 -40
View File
@@ -1,55 +1,55 @@
use crate::commands::shared::{CacheLaboratory, CacheLabsWrapper, CacheUser, compute_digest};
use anyhow::{anyhow, bail};
use crate::cache::{CacheLabsWrapper, CacheUser, compute_digest};
use crate::connection::MDRSConnection;
use crate::models::laboratory::Laboratories;
use crate::models::user::User;
use reqwest::Client;
use serde::Deserialize;
use serde_json::{Value, json};
use std::error::Error;
use std::fs;
#[cfg(unix)]
use std::os::unix::fs::PermissionsExt;
/// Prompt for credentials if not supplied and then perform login.
/// This is the entry point called from `main`.
pub async fn run_login(
username: Option<&str>,
password: Option<&str>,
remote: &str,
) -> Result<(), anyhow::Error> {
use std::io::{self, Write};
let username_val: String = match username {
Some(u) => u.to_string(),
None => {
print!("Username: ");
io::stdout().flush()?;
let mut s = String::new();
io::stdin().read_line(&mut s)?;
s.trim().to_string()
}
};
let password_val: String = match password {
Some(p) => p.to_string(),
None => rpassword::prompt_password("Password: ")?,
};
login(&username_val, &password_val, remote).await
}
#[derive(Deserialize)]
struct TokenResp {
access: String,
refresh: String,
}
/// Convert an API `User` into a `CacheUser` (same fields, different type).
fn to_cache_user(u: &User) -> CacheUser {
CacheUser {
id: u.id,
username: u.username.clone(),
laboratory_ids: u.laboratory_ids.clone(),
is_reviewer: u.is_reviewer,
}
}
/// Convert API `Laboratories` into `CacheLabsWrapper` (all four fields already present).
fn to_cache_labs(labs: &Laboratories) -> CacheLabsWrapper {
CacheLabsWrapper {
items: labs
.items
.iter()
.map(|l| CacheLaboratory {
id: l.id,
name: l.name.clone(),
pi_name: l.pi_name.clone(),
full_name: l.full_name.clone(),
})
.collect(),
}
}
pub async fn login(
username: &str,
password: &str,
remote: &str,
) -> Result<(), Box<dyn std::error::Error>> {
) -> Result<(), anyhow::Error> {
// resolve remote label to URL from config
let url_opt = crate::commands::config::get_remote_url(remote)?;
let base_url = url_opt.ok_or(format!("Remote host `{}` is not configured", remote))?;
let base_url = url_opt.ok_or_else(|| anyhow!("Remote host `{}` is not configured", remote))?;
let conn0 = MDRSConnection::new(&base_url);
let client = Client::new();
let url = conn0.build_url("v3/users/token/");
@@ -57,19 +57,12 @@ pub async fn login(
let resp_res = client.post(&url).form(&params).send().await;
let resp = match resp_res {
Ok(r) => r,
Err(e) => {
let src = e.source();
return Err(format!(
"Login failed sending request to {}: {} (source: {:?})",
url, e, src
)
.into());
}
Err(e) => bail!("Login failed sending request to {}: {}", url, e),
};
let status = resp.status();
if !status.is_success() {
let body = resp.text().await.unwrap_or_default();
return Err(format!("Login failed: {} - {}", status, body).into());
bail!("Login failed: {} - {}", status, body);
}
let token: TokenResp = resp.json().await?;
@@ -80,8 +73,8 @@ pub async fn login(
let labs: Laboratories = conn.list_laboratories().await.unwrap_or_default();
// convert to cache types (all four Laboratory fields required for digest)
let cache_user_opt: Option<CacheUser> = user_opt.as_ref().map(to_cache_user);
let cache_labs = to_cache_labs(&labs);
let cache_user_opt: Option<CacheUser> = user_opt.as_ref().map(|u| u.into());
let cache_labs: CacheLabsWrapper = (&labs).into();
// compute Python-compatible digest
let digest = compute_digest(
+1 -1
View File
@@ -1,4 +1,4 @@
pub fn logout(remote: &str) -> Result<(), Box<dyn std::error::Error>> {
pub fn logout(remote: &str) -> Result<(), anyhow::Error> {
let cache_path = crate::settings::SETTINGS
.config_dirname
.join("cache")
+7 -7
View File
@@ -1,8 +1,8 @@
use crate::models::file::File;
use crate::models::folder::{FolderDetail, FolderSimple};
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
use crate::commands::shared::{
create_authenticated_conn, find_folder, find_lab_in_cache, fmt_datetime,
load_cache_with_token_refresh, parse_remote_path,
find_folder, find_lab_in_cache, fmt_datetime, parse_remote_path,
};
use crate::connection::MDRSConnection;
use serde_json::{json, Value};
@@ -15,7 +15,7 @@ pub async fn ls(
is_json: bool,
is_recursive: bool,
is_quiet: bool,
) -> Result<(), Box<dyn std::error::Error>> {
) -> Result<(), anyhow::Error> {
let (remote, labname, path) = parse_remote_path(remote_path)?;
let cache = load_cache_with_token_refresh(&remote).await?;
let conn = create_authenticated_conn(&remote, &cache)?;
@@ -151,7 +151,7 @@ fn ls_plain_recursive<'a>(
labname: &'a str,
prefix: &'a str,
password: Option<&'a str>,
) -> Pin<Box<dyn Future<Output = Result<(), Box<dyn std::error::Error>>> + 'a>> {
) -> Pin<Box<dyn Future<Output = Result<(), anyhow::Error>> + 'a>> {
Box::pin(async move {
let files = conn.list_all_files(&folder.id).await?;
let total_size: u64 = files.iter().map(|f| f.size).sum();
@@ -199,7 +199,7 @@ fn ls_plain_recursive<'a>(
async fn get_folder_metadata(
conn: &MDRSConnection,
folder_id: &str,
) -> Result<Value, Box<dyn std::error::Error>> {
) -> Result<Value, anyhow::Error> {
let resp = conn
.get(&format!("v3/folders/{}/metadata/", folder_id))
.await?;
@@ -253,7 +253,7 @@ async fn build_folder_json_flat(
conn: &MDRSConnection,
folder: &FolderDetail,
labname: &str,
) -> Result<Value, Box<dyn std::error::Error>> {
) -> Result<Value, anyhow::Error> {
let metadata = get_folder_metadata(conn, &folder.id).await?;
let files = conn.list_all_files(&folder.id).await?;
let files_json: Vec<Value> = files.iter().map(|f| file_to_json(f, &conn.url)).collect();
@@ -285,7 +285,7 @@ fn build_folder_json_recursive<'a>(
conn: &'a MDRSConnection,
folder: FolderDetail,
labname: &'a str,
) -> Pin<Box<dyn Future<Output = Result<Value, Box<dyn std::error::Error>>> + 'a>> {
) -> Pin<Box<dyn Future<Output = Result<Value, anyhow::Error>> + 'a>> {
Box::pin(async move {
let metadata = get_folder_metadata(conn, &folder.id).await?;
let files = conn.list_all_files(&folder.id).await?;
+3 -2
View File
@@ -1,8 +1,9 @@
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
use crate::commands::shared::{
create_authenticated_conn, find_folder, find_lab_in_cache, load_cache_with_token_refresh, parse_remote_path,
find_folder, find_lab_in_cache, parse_remote_path,
};
pub async fn metadata(remote_path: &str, password: Option<&str>) -> Result<(), Box<dyn std::error::Error>> {
pub async fn metadata(remote_path: &str, password: Option<&str>) -> Result<(), anyhow::Error> {
let (remote, labname, folder_path) = parse_remote_path(remote_path)?;
let cache = load_cache_with_token_refresh(&remote).await?;
let conn = create_authenticated_conn(&remote, &cache)?;
+9 -7
View File
@@ -1,16 +1,18 @@
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
use crate::commands::shared::{
create_authenticated_conn, find_file_by_name, find_folder, find_lab_in_cache,
find_subfolder_by_name, load_cache_with_token_refresh, nfc, parse_remote_path,
find_file_by_name, find_folder, find_lab_in_cache,
find_subfolder_by_name, nfc, parse_remote_path,
};
use anyhow::{anyhow, bail};
pub async fn mkdir(remote_path: &str) -> Result<(), Box<dyn std::error::Error>> {
pub async fn mkdir(remote_path: &str) -> Result<(), anyhow::Error> {
let (remote, labname, path) = parse_remote_path(remote_path)?;
// Split into parent path and new folder name
let path = path.trim_end_matches('/');
let last_slash = path
.rfind('/')
.ok_or("Invalid path: cannot determine parent folder")?;
.ok_or_else(|| anyhow!("Invalid path: cannot determine parent folder"))?;
let parent_path = if last_slash == 0 {
"/"
} else {
@@ -18,7 +20,7 @@ pub async fn mkdir(remote_path: &str) -> Result<(), Box<dyn std::error::Error>>
};
let new_folder_name = &path[last_slash + 1..];
if new_folder_name.is_empty() {
return Err("Invalid path: folder name cannot be empty".into());
bail!("Invalid path: folder name cannot be empty");
}
let cache = load_cache_with_token_refresh(&remote).await?;
@@ -31,14 +33,14 @@ pub async fn mkdir(remote_path: &str) -> Result<(), Box<dyn std::error::Error>>
if find_subfolder_by_name(&parent_folder.sub_folders, new_folder_name).is_some()
|| find_file_by_name(&files, new_folder_name).is_some()
{
return Err(format!("Cannot create folder `{}`: File exists.", path).into());
bail!("Cannot create folder `{}`: File exists.", path);
}
let resp = conn
.create_folder(&parent_folder.id, &nfc(new_folder_name))
.await?;
if !resp.status().is_success() {
return Err(format!("Failed to create folder: {}", resp.status()).into());
bail!("Failed to create folder: {}", resp.status());
}
Ok(())
}
+15 -25
View File
@@ -1,18 +1,20 @@
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
use crate::commands::shared::{
create_authenticated_conn, find_file_by_name, find_folder, find_lab_in_cache,
find_subfolder_by_name, load_cache_with_token_refresh, nfc, parse_remote_path,
find_file_by_name, find_folder, find_lab_in_cache,
find_subfolder_by_name, nfc, parse_remote_path,
};
use anyhow::{bail};
pub async fn mv(src_path: &str, dest_path: &str) -> Result<(), Box<dyn std::error::Error>> {
pub async fn mv(src_path: &str, dest_path: &str) -> Result<(), anyhow::Error> {
let (s_remote, s_lab, s_path) = parse_remote_path(src_path)?;
let dest_ends_with_slash = dest_path.ends_with('/');
let (d_remote, d_lab, d_path) = parse_remote_path(dest_path)?;
if s_remote != d_remote {
return Err("Remote host mismatched.".into());
bail!("Remote host mismatched.");
}
if s_lab != d_lab {
return Err("Laboratory mismatched.".into());
bail!("Laboratory mismatched.");
}
let cache = load_cache_with_token_refresh(&s_remote).await?;
@@ -42,14 +44,10 @@ pub async fn mv(src_path: &str, dest_path: &str) -> Result<(), Box<dyn std::erro
if let Some(src_file) = find_file_by_name(&s_parent_files, &s_basename) {
let src_file_id = src_file.id.clone();
if find_file_by_name(&d_parent_files, &d_basename).is_some() {
return Err(format!("File `{}` already exists.", d_basename).into());
bail!("File `{}` already exists.", d_basename);
}
if find_subfolder_by_name(&d_parent_folder.sub_folders, &d_basename).is_some() {
return Err(format!(
"Cannot overwrite non-folder `{}` with folder `{}`.",
d_basename, d_path
)
.into());
bail!("Cannot overwrite non-folder `{}` with folder `{}`.", d_basename, d_path);
}
// No-op if source and destination are identical
if s_parent_folder.id == d_parent_folder.id && d_basename == s_basename {
@@ -64,7 +62,7 @@ pub async fn mv(src_path: &str, dest_path: &str) -> Result<(), Box<dyn std::erro
.send()
.await?;
if !resp.status().is_success() {
return Err(format!("Move failed: {}", resp.status()).into());
bail!("Move failed: {}", resp.status());
}
return Ok(());
}
@@ -72,25 +70,17 @@ pub async fn mv(src_path: &str, dest_path: &str) -> Result<(), Box<dyn std::erro
// Try source as a folder
let src_folder = match find_subfolder_by_name(&s_parent_folder.sub_folders, &s_basename) {
Some(f) => f,
None => return Err(format!("File or folder `{}` not found.", s_basename).into()),
None => bail!("File or folder `{}` not found.", s_basename),
};
let src_folder_id = src_folder.id.clone();
if find_file_by_name(&d_parent_files, &d_basename).is_some() {
return Err(format!(
"Cannot overwrite non-folder `{}` with folder `{}`.",
d_basename, s_path
)
.into());
bail!("Cannot overwrite non-folder `{}` with folder `{}`.", d_basename, s_path);
}
if let Some(d_folder) = find_subfolder_by_name(&d_parent_folder.sub_folders, &d_basename) {
if d_folder.id == src_folder_id {
return Err(
format!("`{}` and `{}` are the same folder.", s_path, s_path).into(),
);
bail!("`{}` and `{}` are the same folder.", s_path, s_path);
}
return Err(
format!("Cannot move `{}` to `{}`: Folder not empty.", s_path, d_path).into(),
);
bail!("Cannot move `{}` to `{}`: Folder not empty.", s_path, d_path);
}
// No-op if source and destination are identical
if s_parent_folder.id == d_parent_folder.id && s_basename == d_basename {
@@ -105,7 +95,7 @@ pub async fn mv(src_path: &str, dest_path: &str) -> Result<(), Box<dyn std::erro
.send()
.await?;
if !resp.status().is_success() {
return Err(format!("Move failed: {}", resp.status()).into());
bail!("Move failed: {}", resp.status());
}
Ok(())
}
+11 -9
View File
@@ -1,14 +1,16 @@
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
use crate::commands::shared::{
create_authenticated_conn, find_file_by_name, find_folder, find_lab_in_cache,
find_subfolder_by_name, load_cache_with_token_refresh, parse_remote_path,
find_file_by_name, find_folder, find_lab_in_cache,
find_subfolder_by_name, parse_remote_path,
};
use anyhow::{anyhow, bail};
pub async fn rm(remote_path: &str, recursive: bool) -> Result<(), Box<dyn std::error::Error>> {
pub async fn rm(remote_path: &str, recursive: bool) -> Result<(), anyhow::Error> {
let (remote, labname, path) = parse_remote_path(remote_path)?;
// Split into parent path and target name
let path = path.trim_end_matches('/');
let last_slash = path.rfind('/').ok_or("Invalid path")?;
let last_slash = path.rfind('/').ok_or_else(|| anyhow!("Invalid path"))?;
let parent_path = if last_slash == 0 {
"/"
} else {
@@ -16,7 +18,7 @@ pub async fn rm(remote_path: &str, recursive: bool) -> Result<(), Box<dyn std::e
};
let target_name = &path[last_slash + 1..];
if target_name.is_empty() {
return Err("Cannot remove root folder".into());
bail!("Cannot remove root folder");
}
let cache = load_cache_with_token_refresh(&remote).await?;
@@ -34,7 +36,7 @@ pub async fn rm(remote_path: &str, recursive: bool) -> Result<(), Box<dyn std::e
.send()
.await?;
if !resp.status().is_success() {
return Err(format!("Failed to delete file: {}", resp.status()).into());
bail!("Failed to delete file: {}", resp.status());
}
return Ok(());
}
@@ -42,7 +44,7 @@ pub async fn rm(remote_path: &str, recursive: bool) -> Result<(), Box<dyn std::e
// Check if target is a sub-folder
if let Some(subfolder) = find_subfolder_by_name(&parent_folder.sub_folders, target_name) {
if !recursive {
return Err(format!("Cannot remove `{}`: Is a folder.", path).into());
bail!("Cannot remove `{}`: Is a folder.", path);
}
let resp = conn
.client
@@ -52,10 +54,10 @@ pub async fn rm(remote_path: &str, recursive: bool) -> Result<(), Box<dyn std::e
.send()
.await?;
if !resp.status().is_success() {
return Err(format!("Failed to delete folder: {}", resp.status()).into());
bail!("Failed to delete folder: {}", resp.status());
}
return Ok(());
}
Err(format!("Cannot remove `{}`: No such file or folder.", path).into())
Err(anyhow!("Cannot remove `{}`: No such file or folder.", path))
}
+43 -379
View File
@@ -1,379 +1,26 @@
use crate::cache::{Cache, CacheLaboratory};
use crate::connection::MDRSConnection;
use crate::models::file::File;
use crate::models::folder::{FolderDetail, FolderSimple};
use crate::connection::MDRSConnection;
use serde::Deserialize;
use sha2::{Digest, Sha256};
use std::collections::HashMap;
use std::fs;
use std::sync::{Arc, LazyLock, Mutex};
use unicode_normalization::UnicodeNormalization;
use anyhow::{anyhow, bail};
// ---------------------------------------------------------------------------
// Cache structs — matching Python's cache format exactly
// Path helpers
// ---------------------------------------------------------------------------
#[derive(Deserialize, Clone)]
pub struct CacheToken {
pub access: String,
pub refresh: String,
}
/// Minimal user fields stored in cache — matches Python's `User` dataclass.
#[derive(Deserialize, Clone)]
pub struct CacheUser {
pub id: u32,
pub username: String,
pub laboratory_ids: Vec<u32>,
pub is_reviewer: bool,
}
/// All four fields of a laboratory, needed for digest computation.
#[derive(Deserialize, Clone)]
pub struct CacheLaboratory {
pub id: u32,
pub name: String,
#[serde(default)]
pub pi_name: String,
#[serde(default)]
pub full_name: String,
}
/// Wrapper matching Python's `Laboratories` serialization: `{"items": [...]}`.
#[derive(Deserialize, Clone, Default)]
pub struct CacheLabsWrapper {
pub items: Vec<CacheLaboratory>,
}
#[derive(Deserialize, Clone)]
pub struct Cache {
pub user: Option<CacheUser>,
pub token: CacheToken,
pub laboratories: CacheLabsWrapper,
}
// ---------------------------------------------------------------------------
// Digest computation — must produce exactly the same hash as Python's
// `CacheData.__calc_digest()`:
// hashlib.sha256(
// json.dumps([user_asdict, token_asdict, labs_asdict]).encode("utf-8")
// ).hexdigest()
//
// Python's default json.dumps uses separators=(', ', ': ') and
// ensure_ascii=True. Field order follows dataclass definition order.
// ---------------------------------------------------------------------------
/// Escape a string in Python json.dumps style:
/// - Special chars: ", \, and control chars -> standard JSON escapes
/// - Non-ASCII chars -> \uXXXX (matches Python ensure_ascii=True default)
fn python_json_string(s: &str) -> String {
let mut out = String::with_capacity(s.len() + 2);
out.push('"');
for c in s.chars() {
match c {
'"' => out.push_str("\\\""),
'\\' => out.push_str("\\\\"),
'\n' => out.push_str("\\n"),
'\r' => out.push_str("\\r"),
'\t' => out.push_str("\\t"),
c if (c as u32) < 0x20 => {
out.push_str(&format!("\\u{:04x}", c as u32));
}
c if c.is_ascii() => out.push(c),
c => {
// Non-ASCII: encode as \uXXXX (BMP) or surrogate pair (outside BMP)
let code = c as u32;
if code <= 0xFFFF {
out.push_str(&format!("\\u{:04x}", code));
} else {
let code = code - 0x10000;
let high = 0xD800 + (code >> 10);
let low = 0xDC00 + (code & 0x3FF);
out.push_str(&format!("\\u{:04x}\\u{:04x}", high, low));
}
}
}
}
out.push('"');
out
}
/// Serialize a list of u32 as a Python-style JSON array: `[1, 2, 3]`
fn python_json_u32_array(items: &[u32]) -> String {
if items.is_empty() {
return "[]".to_string();
}
let inner: Vec<String> = items.iter().map(|x| x.to_string()).collect();
format!("[{}]", inner.join(", "))
}
/// Build the JSON array string that Python's `__calc_digest` hashes:
/// [user_asdict_or_null, token_asdict, labs_asdict]
///
/// Field order matches each Python dataclass definition:
/// User: id, username, laboratory_ids, is_reviewer
/// Token: access, refresh
/// Laboratories: items
/// Laboratory: id, name, pi_name, full_name
pub fn python_digest_json(
user: Option<&CacheUser>,
access: &str,
refresh: &str,
labs: &CacheLabsWrapper,
) -> String {
let user_str = match user {
None => "null".to_string(),
Some(u) => format!(
"{{\"id\": {}, \"username\": {}, \"laboratory_ids\": {}, \"is_reviewer\": {}}}",
u.id,
python_json_string(&u.username),
python_json_u32_array(&u.laboratory_ids),
if u.is_reviewer { "true" } else { "false" }
),
};
let token_str = format!(
"{{\"access\": {}, \"refresh\": {}}}",
python_json_string(access),
python_json_string(refresh)
);
let items: Vec<String> = labs
.items
.iter()
.map(|lab| {
format!(
"{{\"id\": {}, \"name\": {}, \"pi_name\": {}, \"full_name\": {}}}",
lab.id,
python_json_string(&lab.name),
python_json_string(&lab.pi_name),
python_json_string(&lab.full_name)
)
})
.collect();
let items_str = if items.is_empty() {
"[]".to_string()
} else {
format!("[{}]", items.join(", "))
};
let labs_str = format!("{{\"items\": {}}}", items_str);
format!("[{}, {}, {}]", user_str, token_str, labs_str)
}
/// Compute the cache digest compatible with Python's `CacheData.__calc_digest`.
pub fn compute_digest(
user: Option<&CacheUser>,
access: &str,
refresh: &str,
labs: &CacheLabsWrapper,
) -> String {
let json_str = python_digest_json(user, access, refresh, labs);
let mut hasher = Sha256::new();
hasher.update(json_str.as_bytes());
format!("{:x}", hasher.finalize())
}
// ---------------------------------------------------------------------------
// Per-remote async mutex map (in-process serialization)
// ---------------------------------------------------------------------------
static REMOTE_LOCKS: LazyLock<Mutex<HashMap<String, Arc<tokio::sync::Mutex<()>>>>> =
LazyLock::new(|| Mutex::new(HashMap::new()));
fn get_remote_lock(remote: &str) -> Arc<tokio::sync::Mutex<()>> {
let mut map = REMOTE_LOCKS.lock().unwrap();
map.entry(remote.to_string())
.or_insert_with(|| Arc::new(tokio::sync::Mutex::new(())))
.clone()
}
// ---------------------------------------------------------------------------
// Cache file path helper
// ---------------------------------------------------------------------------
fn cache_file_path(remote: &str) -> std::path::PathBuf {
crate::settings::SETTINGS
.config_dirname
.join("cache")
.join(format!("{}.json", remote))
}
// ---------------------------------------------------------------------------
// Load cache (low-level, no token refresh)
// ---------------------------------------------------------------------------
/// Load token and laboratories from the login cache file (no token refresh check).
pub fn load_cache(remote: &str) -> Result<Cache, Box<dyn std::error::Error>> {
let cache_path = cache_file_path(remote);
if !cache_path.exists() {
return Err(format!(
"Not logged in to `{}`. Run `mdrs login {}` first.",
remote, remote
)
.into());
}
let data = fs::read_to_string(&cache_path)?;
serde_json::from_str::<Cache>(&data).map_err(|e| {
format!(
"Cache for `{}` is invalid or outdated ({}). Run `mdrs login {}` to refresh it.",
remote, e, remote
)
.into()
})
}
// ---------------------------------------------------------------------------
// Token-aware cache load with refresh and locking
// ---------------------------------------------------------------------------
/// Load cache, check token expiry, and refresh the access token if needed.
///
/// Locking strategy:
/// - Per-remote `tokio::sync::Mutex` serializes concurrent async tasks within
/// the same process.
/// - `flock(LOCK_EX)` on a dedicated `cache/{remote}.lock` file serializes
/// the entire read-check-refresh-write cycle across separate processes on
/// the same host. The lock file is stable (never renamed), so the flock
/// reliably protects the same inode for the lifetime of the critical section.
pub async fn load_cache_with_token_refresh(
remote: &str,
) -> Result<Cache, Box<dyn std::error::Error>> {
// Acquire the in-process async mutex for this remote first.
let lock = get_remote_lock(remote);
let _guard = lock.lock().await;
// Acquire an exclusive cross-process file lock. This ensures that no
// other process can race through the read-check-refresh-write cycle at
// the same time as us. The lock file is separate from the cache file so
// that it never disappears (unlike a .tmp file that gets renamed away).
let lock_path = cache_file_path(remote).with_extension("lock");
use fs2::FileExt;
let lock_file = fs::OpenOptions::new()
.write(true)
.create(true)
.open(&lock_path)?;
lock_file.lock_exclusive()?;
// Re-read the cache inside the lock: another process may have already
// refreshed the token since we last checked.
let result: Result<Cache, Box<dyn std::error::Error>> = async {
let mut cache = load_cache(remote)?;
if crate::token::is_expired(&cache.token.refresh) {
return Err(format!(
"Session for `{}` has expired. Please run `mdrs login {}` again.",
remote, remote
)
.into());
}
if crate::token::is_refresh_required(&cache.token.access, &cache.token.refresh) {
let new_access = refresh_and_persist(remote, &cache).await?;
cache.token.access = new_access;
}
Ok(cache)
}
.await;
// Release the cross-process file lock. The OS will also release it when
// lock_file is dropped, but we unlock explicitly for clarity.
lock_file.unlock()?;
result
}
/// Call the token-refresh endpoint and write the new access token back to the
/// cache file. The caller must already hold the per-remote async mutex.
/// Also recomputes the digest so Python can verify the cache.
async fn refresh_and_persist(
remote: &str,
cache: &Cache,
) -> Result<String, Box<dyn std::error::Error>> {
// Build a connection without Bearer token just to reach the refresh endpoint
let url = crate::commands::config::get_remote_url(remote)?
.ok_or_else(|| format!("Remote `{}` is not configured.", remote))?;
let conn = MDRSConnection::new(&url);
let new_access = conn.token_refresh(&cache.token.refresh).await?;
// Recompute the digest with the new access token so the Python client
// can still verify the cache after a token refresh.
let new_digest = compute_digest(
cache.user.as_ref(),
&new_access,
&cache.token.refresh,
&cache.laboratories,
);
// Persist the updated access token to the cache file with an exclusive file
// lock so that other processes do not read a partially written file.
let cache_path = cache_file_path(remote);
let raw = fs::read_to_string(&cache_path)?;
let mut obj: serde_json::Value = serde_json::from_str(&raw)?;
obj["token"]["access"] = serde_json::Value::String(new_access.clone());
obj["digest"] = serde_json::Value::String(new_digest);
// Write atomically: write to .tmp then rename. The caller already holds
// the exclusive .lock file, so no additional flock is needed here.
let tmp_path = cache_path.with_extension("tmp");
{
use std::io::Write;
let mut tmp_file = fs::OpenOptions::new()
.write(true)
.create(true)
.truncate(true)
.open(&tmp_path)?;
tmp_file.write_all(serde_json::to_string(&obj)?.as_bytes())?;
tmp_file.flush()?;
}
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms = fs::metadata(&tmp_path)?.permissions();
perms.set_mode(0o600);
fs::set_permissions(&tmp_path, perms)?;
}
fs::rename(&tmp_path, &cache_path)?;
Ok(new_access)
}
// ---------------------------------------------------------------------------
// Connection helpers
// ---------------------------------------------------------------------------
/// Create an authenticated MDRSConnection for the given remote label
pub fn create_authenticated_conn(
remote: &str,
cache: &Cache,
) -> Result<MDRSConnection, Box<dyn std::error::Error>> {
let url = crate::commands::config::get_remote_url(remote)?
.ok_or_else(|| format!("Remote `{}` is not configured.", remote))?;
let mut conn = MDRSConnection::new(&url);
conn.token = Some(cache.token.access.clone());
Ok(conn)
}
// ---------------------------------------------------------------------------
// Path and lab helpers
// ---------------------------------------------------------------------------
/// Parse "remote:/labname/path/" into (remote, labname, folder_path)
/// Parse "remote:/labname/path/" into (remote, labname, folder_path).
pub fn parse_remote_path(
remote_path: &str,
) -> Result<(String, String, String), Box<dyn std::error::Error>> {
) -> Result<(String, String, String), anyhow::Error> {
let parts: Vec<&str> = remote_path.splitn(2, ':').collect();
if parts.len() != 2 {
return Err("remote_path must be in the form 'remote:/labname/path/'".into());
bail!("remote_path must be in the form 'remote:/labname/path/'");
}
let remote = parts[0].to_string();
let rest = parts[1];
if !rest.starts_with('/') {
return Err("Path must be absolute (start with '/')".into());
bail!("Path must be absolute (start with '/')");
}
let folder_parts: Vec<&str> = rest.trim_start_matches('/').splitn(2, '/').collect();
let labname = folder_parts[0].to_string();
@@ -385,50 +32,56 @@ pub fn parse_remote_path(
Ok((remote, labname, path))
}
/// Look up a laboratory by name in the cache
// ---------------------------------------------------------------------------
// Lab helpers
// ---------------------------------------------------------------------------
/// Look up a laboratory by name in the cache.
pub fn find_lab_in_cache<'a>(
cache: &'a Cache,
labname: &str,
) -> Result<&'a CacheLaboratory, Box<dyn std::error::Error>> {
) -> Result<&'a CacheLaboratory, anyhow::Error> {
cache
.laboratories
.items
.iter()
.find(|l| l.name == labname)
.ok_or_else(|| format!("Laboratory `{}` not found.", labname).into())
.ok_or_else(|| anyhow!("Laboratory `{}` not found.", labname))
}
// ---------------------------------------------------------------------------
// Unicode helpers
// ---------------------------------------------------------------------------
/// Apply Unicode NFC normalization to a string.
pub fn nfc(s: &str) -> String {
s.chars().nfc().collect()
}
/// Resolve a folder by path using the API (GET v3/folders/?path=...&laboratory_id=...)
// ---------------------------------------------------------------------------
// Folder / file search helpers
// ---------------------------------------------------------------------------
/// Resolve a folder by path using the API (GET v3/folders/?path=...&laboratory_id=...).
pub async fn find_folder(
conn: &MDRSConnection,
lab_id: u32,
path: &str,
password: Option<&str>,
) -> Result<FolderDetail, Box<dyn std::error::Error>> {
) -> Result<FolderDetail, anyhow::Error> {
let normalized_path = nfc(path);
let folders = conn.list_folders_by_path(lab_id, &normalized_path).await?;
if folders.is_empty() {
return Err(format!("Folder `{}` not found.", path).into());
bail!("Folder `{}` not found.", path);
}
if folders.len() != 1 {
return Err(
format!("Ambiguous path `{}`: {} folders matched.", path, folders.len()).into(),
);
bail!("Ambiguous path `{}`: {} folders matched.", path, folders.len());
}
let folder_simple = &folders[0];
if folder_simple.lock {
match password {
None => {
return Err(format!(
"Folder `{}` is locked. Use -p/--password to provide a password.",
path
)
.into())
bail!("Folder `{}` is locked. Use -p/--password to provide a password.", path);
}
Some(pw) => conn.folder_auth(&folder_simple.id, pw).await?,
}
@@ -440,16 +93,27 @@ pub async fn find_folder(
/// Find a file by name (NFC-normalized, case-insensitive) in a file list.
pub fn find_file_by_name<'a>(files: &'a [File], name: &str) -> Option<&'a File> {
let name_lower = nfc(name).to_lowercase();
files.iter().find(|f| nfc(&f.name).to_lowercase() == name_lower)
files
.iter()
.find(|f| nfc(&f.name).to_lowercase() == name_lower)
}
/// Find a sub-folder by name (NFC-normalized, case-insensitive).
pub fn find_subfolder_by_name<'a>(subfolders: &'a [FolderSimple], name: &str) -> Option<&'a FolderSimple> {
pub fn find_subfolder_by_name<'a>(
subfolders: &'a [FolderSimple],
name: &str,
) -> Option<&'a FolderSimple> {
let name_lower = nfc(name).to_lowercase();
subfolders.iter().find(|f| nfc(&f.name).to_lowercase() == name_lower)
subfolders
.iter()
.find(|f| nfc(&f.name).to_lowercase() == name_lower)
}
/// Format an ISO 8601 timestamp as "YYYY/MM/DD HH:MM:SS"
// ---------------------------------------------------------------------------
// Display helpers
// ---------------------------------------------------------------------------
/// Format an ISO 8601 timestamp as "YYYY/MM/DD HH:MM:SS".
pub fn fmt_datetime(iso: &str) -> String {
let s = iso.trim();
let s = if let Some(pos) = s[10..].find(|c: char| c == '+' || c == '-') {
+11 -9
View File
@@ -1,19 +1,21 @@
use crate::models::folder::FolderSimple;
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
use crate::commands::shared::{
create_authenticated_conn, find_file_by_name, find_folder, find_lab_in_cache,
load_cache_with_token_refresh, nfc, parse_remote_path,
find_file_by_name, find_folder, find_lab_in_cache,
nfc, parse_remote_path,
};
use futures::stream::{FuturesUnordered, StreamExt};
use std::path::PathBuf;
use std::sync::Arc;
use tokio::fs;
use anyhow::{anyhow, bail};
pub async fn upload(
local_path: &str,
remote_path: &str,
recursive: bool,
skip_if_exists: bool,
) -> Result<(), Box<dyn std::error::Error>> {
) -> Result<(), anyhow::Error> {
let (remote, labname, r_path) = parse_remote_path(remote_path)?;
let cache = load_cache_with_token_refresh(&remote).await?;
let conn = Arc::new(create_authenticated_conn(&remote, &cache)?);
@@ -24,7 +26,7 @@ pub async fn upload(
// trailing slashes and "./" prefixes are handled consistently (matching
// Python's os.path.abspath behaviour).
let local_abs = std::fs::canonicalize(local_path)
.map_err(|_| format!("File or directory `{}` not found.", local_path))?;
.map_err(|_| anyhow!("File or directory `{}` not found.", local_path))?;
let local = local_abs.as_path();
if local.is_file() {
@@ -42,7 +44,7 @@ pub async fn upload(
println!("{}{}", dest_folder.path, filename);
} else if local.is_dir() {
if !recursive {
return Err(format!("Cannot upload `{}`: Is a directory.", local_path).into());
bail!("Cannot upload `{}`: Is a directory.", local_path);
}
// Python always creates a sub-folder named after the local directory inside
// remote_path. E.g. `upload ./mydir remote:/lab/path/` creates
@@ -123,7 +125,7 @@ pub async fn upload(
while futs.next().await.is_some() {}
}
} else {
return Err(format!("File or directory `{}` not found.", local_path).into());
bail!("File or directory `{}` not found.", local_path);
}
Ok(())
@@ -135,17 +137,17 @@ async fn find_or_create_folder(
parent_id: &str,
existing: &[FolderSimple],
name: &str,
) -> Result<String, Box<dyn std::error::Error>> {
) -> Result<String, anyhow::Error> {
if let Some(sf) = existing.iter().find(|f| nfc(&f.name).to_lowercase() == nfc(name).to_lowercase()) {
return Ok(sf.id.clone());
}
let resp = conn.create_folder(parent_id, &nfc(name)).await?;
if !resp.status().is_success() {
return Err(format!("Failed to create remote folder: {}", name).into());
bail!("Failed to create remote folder: {}", name);
}
let json: serde_json::Value = resp.json().await?;
json["id"]
.as_str()
.ok_or_else(|| format!("No id in create_folder response for {}", name).into())
.ok_or_else(|| anyhow!("No id in create_folder response for {}", name))
.map(|s| s.to_string())
}
+1 -1
View File
@@ -11,7 +11,7 @@ struct WhoamiCache {
user: Option<CacheUser>,
}
pub async fn whoami(remote: &str) -> Result<(), Box<dyn std::error::Error>> {
pub async fn whoami(remote: &str) -> Result<(), anyhow::Error> {
let cache_path = crate::settings::SETTINGS
.config_dirname
.join("cache")
+6 -60
View File
@@ -26,6 +26,7 @@ fn build_user_agent() -> String {
)
}
/// HTTP transport layer for MDRS API calls.
pub struct MDRSConnection {
pub url: String,
pub client: Client,
@@ -64,13 +65,15 @@ impl MDRSConnection {
let mut headers = HeaderMap::new();
headers.insert(
USER_AGENT,
HeaderValue::from_str(&build_user_agent()).unwrap(),
HeaderValue::from_str(&build_user_agent())
.expect("user-agent string contains invalid header characters"),
);
headers.insert(ACCEPT, HeaderValue::from_static("application/json"));
if let Some(token) = &self.token {
headers.insert(
AUTHORIZATION,
HeaderValue::from_str(&format!("Bearer {}", token)).unwrap(),
HeaderValue::from_str(&format!("Bearer {}", token))
.expect("token contains invalid header characters"),
);
}
headers
@@ -96,62 +99,5 @@ impl MDRSConnection {
.send()
.await
}
pub async fn download_file(
&self,
url: &str,
dest: &str,
) -> Result<(), Box<dyn std::error::Error>> {
let resp = self
.client
.get(url)
.headers(self.prepare_headers())
.send()
.await?;
let bytes = resp.bytes().await?;
tokio::fs::write(dest, &bytes).await?;
Ok(())
}
pub async fn create_folder(
&self,
parent_id: &str,
folder_name: &str,
) -> reqwest::Result<reqwest::Response> {
let body = serde_json::json!({
"name": folder_name,
"parent_id": parent_id,
"description": "",
"template_id": -1,
});
self.client
.post(self.build_url("v3/folders/"))
.headers(self.prepare_headers())
.json(&body)
.send()
.await
}
/// Authenticate against a password-locked folder (POST v3/folders/{id}/auth/).
/// Returns Err if the password is incorrect or the request fails.
pub async fn folder_auth(
&self,
folder_id: &str,
password: &str,
) -> Result<(), Box<dyn std::error::Error>> {
let resp = self
.client
.post(self.build_url(&format!("v3/folders/{}/auth/", folder_id)))
.headers(self.prepare_headers())
.json(&serde_json::json!({"password": password}))
.send()
.await?;
if resp.status() == reqwest::StatusCode::UNAUTHORIZED {
return Err("Password is incorrect.".into());
}
if !resp.status().is_success() {
return Err(format!("Folder auth failed: {}", resp.status()).into());
}
Ok(())
}
}
+25
View File
@@ -0,0 +1,25 @@
/// Print the error message and exit with code 2.
/// JSON deserialization errors produce a friendlier message matching Python's
/// JSONDecodeError handling.
pub fn handle_error(e: anyhow::Error) -> ! {
if is_json_error(&*e) {
eprintln!(
"Unexpected response returned. Please check the configuration or the server's operational status."
);
} else {
eprintln!("Error: {}", e);
}
std::process::exit(2);
}
/// Walk the error source chain to detect `serde_json` parse errors.
fn is_json_error(e: &(dyn std::error::Error + 'static)) -> bool {
let mut source: Option<&(dyn std::error::Error + 'static)> = Some(e);
while let Some(err) = source {
if err.downcast_ref::<serde_json::Error>().is_some() {
return true;
}
source = err.source();
}
false
}
+112 -258
View File
@@ -1,211 +1,66 @@
pub mod api;
mod api;
mod cache;
mod cli;
mod commands;
mod connection;
mod error;
mod models;
mod settings;
mod token;
use clap::{Parser, Subcommand};
use clap::Parser;
use cli::{Cli, Commands};
use error::handle_error;
#[derive(Parser)]
#[command(name = "mdrs")]
#[command(about = "MDRS Rust CLI client", long_about = None)]
struct Cli {
#[command(subcommand)]
command: Commands,
}
fn run(cli: Cli) {
let build_rt = || {
tokio::runtime::Runtime::new()
.unwrap_or_else(|e| handle_error(e.into()))
};
use commands::config_subcommand::*;
#[derive(Subcommand)]
enum Commands {
/// Config management (create, update, list, delete)
#[command(subcommand)]
Config(ConfigSubcommand),
Login {
#[arg(short, long)]
username: Option<String>,
#[arg(short, long)]
password: Option<String>,
remote: String,
},
/// Logout and remove cached credentials for a remote
Logout {
remote: String,
},
Upload {
#[arg(short, long)]
recursive: bool,
#[arg(short = 's', long)]
skip_if_exists: bool,
local_path: String,
remote_path: String,
},
Download {
#[arg(short, long)]
recursive: bool,
#[arg(short = 's', long)]
skip_if_exists: bool,
#[arg(short = 'p', long)]
password: Option<String>,
#[arg(long)]
exclude: Vec<String>,
remote_path: String,
local_path: String,
},
Ls {
remote_path: String,
#[arg(short = 'p', long)]
password: Option<String>,
#[arg(short = 'J', long = "json")]
json: bool,
#[arg(short = 'r', long)]
recursive: bool,
#[arg(short = 'q', long)]
quiet: bool,
},
Whoami {
remote: String,
},
Labs {
remote: String,
},
Chacl {
/// Access level key: private, public, pw_open, cbs_open, 5kikan_open,
/// cbs_or_pw_open, 5kikan_or_pw_open, storage
access_level_key: String,
#[arg(short, long)]
recursive: bool,
#[arg(short = 'p', long)]
password: Option<String>,
remote_path: String,
},
Metadata {
#[arg(short = 'p', long)]
password: Option<String>,
remote_path: String,
},
Mkdir {
remote_path: String,
},
Rm {
#[arg(short, long)]
recursive: bool,
remote_path: String,
},
Mv {
src_path: String,
dest_path: String,
},
Cp {
#[arg(short, long)]
recursive: bool,
src_path: String,
dest_path: String,
},
/// Show metadata for a remote file
FileMetadata {
#[arg(short = 'p', long)]
password: Option<String>,
remote_path: String,
},
/// Show the version of this tool
Version,
/// Update this binary to the latest release
#[command(name = "selfupdate")]
SelfUpdate {
/// Skip the confirmation prompt
#[arg(short = 'y', long)]
yes: bool,
},
}
/// Print the error message in Python-compatible format and exit with code 2.
/// JSON deserialization errors show a friendlier message matching Python's JSONDecodeError handling.
fn handle_error(e: Box<dyn std::error::Error>) -> ! {
if is_json_error(&*e) {
eprintln!(
"Unexpected response returned. Please check the configuration or the server's operational status."
);
} else {
eprintln!("Error: {}", e);
}
std::process::exit(2);
}
/// Walk the error source chain to detect serde_json parse errors.
fn is_json_error(e: &(dyn std::error::Error + 'static)) -> bool {
let mut source: Option<&(dyn std::error::Error + 'static)> = Some(e);
while let Some(err) = source {
if err.downcast_ref::<serde_json::Error>().is_some() {
return true;
match cli.command {
Commands::Config(subcmd) => {
use commands::config_subcommand::ConfigSubcommand;
match subcmd {
ConfigSubcommand::Create(args) => {
if let Err(e) = commands::config::config_create(&args.remote, &args.url) {
handle_error(e);
}
}
ConfigSubcommand::Update(args) => {
if let Err(e) = commands::config::config_update(&args.remote, &args.url) {
handle_error(e);
}
}
ConfigSubcommand::List(_) => {
if let Err(e) = commands::config::config_list() {
handle_error(e);
}
}
ConfigSubcommand::Delete(args) => {
if let Err(e) = commands::config::config_delete(&args.remote) {
handle_error(e);
}
}
}
}
source = err.source();
}
false
}
fn main() {
// Load .env file from current directory (silently ignore if not present).
dotenvy::dotenv().ok();
// Exit with code 130 on Ctrl+C, matching Python's KeyboardInterrupt handling.
ctrlc::set_handler(|| {
std::process::exit(130);
})
.ok();
let cli = Cli::parse();
match &cli.command {
Commands::Config(subcmd) => match subcmd {
ConfigSubcommand::Create(args) => {
if let Err(e) = crate::commands::config::config_create(&args.remote, &args.url) {
handle_error(e);
}
}
ConfigSubcommand::Update(args) => {
if let Err(e) = crate::commands::config::config_update(&args.remote, &args.url) {
handle_error(e);
}
}
ConfigSubcommand::List(_) => {
if let Err(e) = crate::commands::config::config_list() {
handle_error(e);
}
}
ConfigSubcommand::Delete(args) => {
if let Err(e) = crate::commands::config::config_delete(&args.remote) {
handle_error(e);
}
}
},
Commands::Login {
username,
password,
remote,
} => {
let remote = remote.trim_end_matches(':');
use std::io::{self, Write};
let username_val: String = match username {
Some(u) => u.clone(),
None => {
print!("Username: ");
io::stdout().flush().unwrap();
let mut s = String::new();
io::stdin().read_line(&mut s).unwrap();
s.trim().to_string()
}
};
let password_val: String = match password {
Some(p) => p.clone(),
None => {
rpassword::prompt_password("Password: ").unwrap()
}
};
let rt = tokio::runtime::Runtime::new().unwrap();
if let Err(e) =
rt.block_on(commands::login::login(&username_val, &password_val, remote))
{
let remote = remote.trim_end_matches(':').to_string();
if let Err(e) = build_rt().block_on(commands::login::run_login(
username.as_deref(),
password.as_deref(),
&remote,
)) {
handle_error(e);
}
}
Commands::Logout { remote } => {
let remote = remote.trim_end_matches(':').to_string();
if let Err(e) = commands::logout::logout(&remote) {
handle_error(e);
}
}
@@ -215,15 +70,12 @@ fn main() {
recursive,
skip_if_exists,
} => {
if let Err(e) = tokio::runtime::Runtime::new()
.unwrap()
.block_on(commands::upload::upload(
local_path,
remote_path,
*recursive,
*skip_if_exists,
))
{
if let Err(e) = build_rt().block_on(commands::upload::upload(
&local_path,
&remote_path,
recursive,
skip_if_exists,
)) {
handle_error(e);
}
}
@@ -239,17 +91,14 @@ fn main() {
.iter()
.map(|e| e.trim_end_matches('/').to_lowercase())
.collect();
if let Err(e) = tokio::runtime::Runtime::new()
.unwrap()
.block_on(commands::download::download(
remote_path,
local_path,
*recursive,
*skip_if_exists,
password.as_deref(),
excludes,
))
{
if let Err(e) = build_rt().block_on(commands::download::download(
&remote_path,
&local_path,
recursive,
skip_if_exists,
password.as_deref(),
excludes,
)) {
handle_error(e);
}
}
@@ -260,59 +109,55 @@ fn main() {
recursive,
quiet,
} => {
if let Err(e) = tokio::runtime::Runtime::new()
.unwrap()
.block_on(commands::ls::ls(
remote_path,
password.as_deref(),
*json,
*recursive,
*quiet,
))
{
if let Err(e) = build_rt().block_on(commands::ls::ls(
&remote_path,
password.as_deref(),
json,
recursive,
quiet,
)) {
handle_error(e);
}
}
Commands::Whoami { remote } => {
let remote = remote.trim_end_matches(':');
let rt = tokio::runtime::Runtime::new().unwrap();
if let Err(e) = rt.block_on(commands::whoami::whoami(remote)) {
let remote = remote.trim_end_matches(':').to_string();
if let Err(e) = build_rt().block_on(commands::whoami::whoami(&remote)) {
handle_error(e);
}
}
Commands::Labs { remote } => {
let remote = remote.trim_end_matches(':');
let rt = tokio::runtime::Runtime::new().unwrap();
if let Err(e) = rt.block_on(commands::labs::labs(remote)) {
let remote = remote.trim_end_matches(':').to_string();
if let Err(e) = build_rt().block_on(commands::labs::labs(&remote)) {
handle_error(e);
}
}
Commands::Chacl {
access_level_key,
recursive,
password,
remote_path,
} => {
let rt = tokio::runtime::Runtime::new().unwrap();
if let Err(e) = rt.block_on(crate::commands::chacl::chacl(
remote_path,
access_level_key,
*recursive,
if let Err(e) = build_rt().block_on(commands::chacl::chacl(
&remote_path,
&access_level_key,
recursive,
password.as_deref(),
)) {
handle_error(e);
}
}
Commands::Metadata { remote_path, password } => {
let rt = tokio::runtime::Runtime::new().unwrap();
if let Err(e) = rt.block_on(crate::commands::metadata::metadata(remote_path, password.as_deref())) {
Commands::Metadata {
remote_path,
password,
} => {
if let Err(e) = build_rt()
.block_on(commands::metadata::metadata(&remote_path, password.as_deref()))
{
handle_error(e);
}
}
Commands::Mkdir { remote_path } => {
let rt = tokio::runtime::Runtime::new().unwrap();
if let Err(e) = rt.block_on(crate::commands::mkdir::mkdir(remote_path)) {
if let Err(e) = build_rt().block_on(commands::mkdir::mkdir(&remote_path)) {
handle_error(e);
}
}
@@ -320,20 +165,14 @@ fn main() {
recursive,
remote_path,
} => {
let rt = tokio::runtime::Runtime::new().unwrap();
if let Err(e) = rt.block_on(crate::commands::rm::rm(remote_path, *recursive)) {
handle_error(e);
}
}
Commands::Logout { remote } => {
let remote = remote.trim_end_matches(':');
if let Err(e) = crate::commands::logout::logout(remote) {
if let Err(e) = build_rt().block_on(commands::rm::rm(&remote_path, recursive)) {
handle_error(e);
}
}
Commands::Mv { src_path, dest_path } => {
let rt = tokio::runtime::Runtime::new().unwrap();
if let Err(e) = rt.block_on(crate::commands::mv::mv(src_path, dest_path)) {
if let Err(e) =
build_rt().block_on(commands::mv::mv(&src_path, &dest_path))
{
handle_error(e);
}
}
@@ -342,14 +181,20 @@ fn main() {
dest_path,
recursive,
} => {
let rt = tokio::runtime::Runtime::new().unwrap();
if let Err(e) = rt.block_on(crate::commands::cp::cp(src_path, dest_path, *recursive)) {
if let Err(e) =
build_rt().block_on(commands::cp::cp(&src_path, &dest_path, recursive))
{
handle_error(e);
}
}
Commands::FileMetadata { remote_path, password } => {
let rt = tokio::runtime::Runtime::new().unwrap();
if let Err(e) = rt.block_on(crate::commands::file_metadata::file_metadata(remote_path, password.as_deref())) {
Commands::FileMetadata {
remote_path,
password,
} => {
if let Err(e) = build_rt().block_on(commands::file_metadata::file_metadata(
&remote_path,
password.as_deref(),
)) {
handle_error(e);
}
}
@@ -357,10 +202,19 @@ fn main() {
commands::version::version();
}
Commands::SelfUpdate { yes } => {
let rt = tokio::runtime::Runtime::new().unwrap();
if let Err(e) = rt.block_on(commands::selfupdate::selfupdate(*yes)) {
if let Err(e) = build_rt().block_on(commands::selfupdate::selfupdate(yes)) {
handle_error(e.into());
}
}
}
}
fn main() {
// Load .env file from the current directory (silently ignore if not present).
dotenvy::dotenv().ok();
// Exit with code 130 on Ctrl+C, matching Python's KeyboardInterrupt handling.
ctrlc::set_handler(|| std::process::exit(130)).ok();
run(Cli::parse());
}
+19
View File
@@ -13,3 +13,22 @@ pub struct Laboratory {
pub struct Laboratories {
pub items: Vec<Laboratory>,
}
impl From<&Laboratory> for crate::cache::CacheLaboratory {
fn from(l: &Laboratory) -> Self {
crate::cache::CacheLaboratory {
id: l.id,
name: l.name.clone(),
pi_name: l.pi_name.clone(),
full_name: l.full_name.clone(),
}
}
}
impl From<&Laboratories> for crate::cache::CacheLabsWrapper {
fn from(labs: &Laboratories) -> Self {
crate::cache::CacheLabsWrapper {
items: labs.items.iter().map(Into::into).collect(),
}
}
}
+11
View File
@@ -8,3 +8,14 @@ pub struct User {
pub laboratory_ids: Vec<u32>,
pub is_reviewer: bool,
}
impl From<&User> for crate::cache::CacheUser {
fn from(u: &User) -> Self {
crate::cache::CacheUser {
id: u.id,
username: u.username.clone(),
laboratory_ids: u.laboratory_ids.clone(),
is_reviewer: u.is_reviewer,
}
}
}
+8 -5
View File
@@ -14,16 +14,19 @@ impl Settings {
let config_dirname = std::env::var("MDRS_CLIENT_CONFIG_DIRNAME")
.ok()
.map(|s| {
let expanded = if s.starts_with("~/") {
if s.starts_with("~/") {
dirs::home_dir()
.unwrap()
.unwrap_or_else(|| std::path::PathBuf::from("."))
.join(&s[2..])
} else {
std::path::PathBuf::from(&s)
};
expanded
}
})
.unwrap_or_else(|| dirs::home_dir().unwrap().join(".mdrs-client"));
.unwrap_or_else(|| {
dirs::home_dir()
.unwrap_or_else(|| std::path::PathBuf::from("."))
.join(".mdrs-client")
});
let concurrent = std::env::var("MDRS_CLIENT_CONCURRENT")
.ok()
+4 -3
View File
@@ -2,6 +2,7 @@
use base64::{Engine, engine::general_purpose::URL_SAFE_NO_PAD};
use std::time::{SystemTime, UNIX_EPOCH};
use anyhow::{anyhow, bail};
fn now_secs() -> i64 {
SystemTime::now()
@@ -11,16 +12,16 @@ fn now_secs() -> i64 {
}
/// Decode the `exp` field from a JWT payload without signature verification.
pub fn jwt_exp(token: &str) -> Result<i64, Box<dyn std::error::Error>> {
pub fn jwt_exp(token: &str) -> Result<i64, anyhow::Error> {
let parts: Vec<&str> = token.split('.').collect();
if parts.len() < 2 {
return Err("Invalid JWT: expected at least 2 dot-separated parts".into());
bail!("Invalid JWT: expected at least 2 dot-separated parts");
}
let payload_bytes = URL_SAFE_NO_PAD.decode(parts[1])?;
let json: serde_json::Value = serde_json::from_slice(&payload_bytes)?;
let exp = json["exp"]
.as_i64()
.ok_or("JWT payload missing 'exp' field")?;
.ok_or_else(|| anyhow!("JWT payload missing 'exp' field"))?;
Ok(exp)
}