Compare commits
23 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
beee9b4f41
|
|||
|
7db5c4d53f
|
|||
|
e3cd864a0c
|
|||
|
32149109b4
|
|||
|
3f2ca938bd
|
|||
|
4e73766732
|
|||
|
a67f9a72a6
|
|||
|
d05bd8a08d
|
|||
|
723017a11c
|
|||
| 769a5a68e2 | |||
|
e8fd359f54
|
|||
|
ecd244491b
|
|||
|
d58acd70e5
|
|||
|
0da6a10898
|
|||
|
1d81216c97
|
|||
|
f0671c06ad
|
|||
|
e7692c109d
|
|||
|
3a3186b24a
|
|||
|
032f066307
|
|||
|
3578a39d27
|
|||
|
7947c3bae9
|
|||
|
0d474e7913
|
|||
|
65c0626910
|
@@ -0,0 +1,63 @@
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- 'v*'
|
||||
|
||||
jobs:
|
||||
build-linux-x86_64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Rust stable
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable
|
||||
echo "$HOME/.cargo/bin" >> "$GITHUB_PATH"
|
||||
- name: Add musl target
|
||||
run: rustup target add x86_64-unknown-linux-musl
|
||||
- name: Install musl tools
|
||||
run: sudo apt-get update && sudo apt-get install -y musl-tools
|
||||
- name: Build
|
||||
run: cargo build --release --target x86_64-unknown-linux-musl
|
||||
- name: Create archive
|
||||
run: |
|
||||
VERSION=${GITHUB_REF_NAME#v}
|
||||
TARGET=x86_64-unknown-linux-musl
|
||||
ARCHIVE="mdrs-${VERSION}-${TARGET}.tar.gz"
|
||||
tar -czf "${ARCHIVE}" -C target/${TARGET}/release mdrs
|
||||
echo "ARCHIVE=${ARCHIVE}" >> "$GITHUB_ENV"
|
||||
- name: Create release and upload asset
|
||||
uses: akkuman/gitea-release-action@v1
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
files: ${{ env.ARCHIVE }}
|
||||
|
||||
build-linux-aarch64:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Install Rust stable
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain stable
|
||||
echo "$HOME/.cargo/bin" >> "$GITHUB_PATH"
|
||||
- name: Install cargo-zigbuild
|
||||
run: |
|
||||
pip3 install ziglang --break-system-packages
|
||||
cargo install cargo-zigbuild --locked
|
||||
- name: Add aarch64 musl target
|
||||
run: rustup target add aarch64-unknown-linux-musl
|
||||
- name: Build
|
||||
run: cargo zigbuild --release --target aarch64-unknown-linux-musl
|
||||
- name: Create archive
|
||||
run: |
|
||||
VERSION=${GITHUB_REF_NAME#v}
|
||||
TARGET=aarch64-unknown-linux-musl
|
||||
ARCHIVE="mdrs-${VERSION}-${TARGET}.tar.gz"
|
||||
tar -czf "${ARCHIVE}" -C target/${TARGET}/release mdrs
|
||||
echo "ARCHIVE=${ARCHIVE}" >> "$GITHUB_ENV"
|
||||
- name: Create release and upload asset
|
||||
uses: akkuman/gitea-release-action@v1
|
||||
with:
|
||||
token: ${{ github.token }}
|
||||
files: ${{ env.ARCHIVE }}
|
||||
@@ -36,3 +36,7 @@ rust-toolchain.toml
|
||||
perf.data
|
||||
perf.data.old
|
||||
flamegraph.svg
|
||||
|
||||
# Build Packages
|
||||
mdrs-*.tar.gz
|
||||
mdrs-*.zip
|
||||
|
||||
@@ -0,0 +1,113 @@
|
||||
# Agents Guidelines
|
||||
|
||||
## Project Overview
|
||||
|
||||
This project is a Rust command-line client for MDRS-based repositories.
|
||||
|
||||
- The binary entry point is `src/main.rs`
|
||||
- CLI argument parsing is defined in `src/cli.rs`
|
||||
- Command implementations live under `src/commands/`
|
||||
- API access code lives under `src/api/`
|
||||
- Shared domain types live under `src/models/`
|
||||
- Login cache and runtime settings live under `src/cache/` and `src/settings.rs`
|
||||
|
||||
## Development Workflow
|
||||
|
||||
### Cargo-first workflow
|
||||
|
||||
Prefer standard Cargo commands and existing project tooling over custom scripts
|
||||
or ad hoc edits.
|
||||
|
||||
Common commands:
|
||||
|
||||
- `cargo fmt`
|
||||
- `cargo test`
|
||||
- `cargo build`
|
||||
- `cargo run -- <args>`
|
||||
|
||||
### Formatting requirement
|
||||
|
||||
When you modify Rust code or any code that is formatted by the Rust toolchain,
|
||||
you must run `cargo fmt` before finishing the task.
|
||||
|
||||
### Validation
|
||||
|
||||
Use the narrowest relevant validation for the change:
|
||||
|
||||
- documentation-only changes do not require Cargo validation
|
||||
- for behavior changes, run the relevant Rust tests if they already exist
|
||||
- if no targeted test exists, use the smallest existing Cargo command that
|
||||
meaningfully validates the modified area
|
||||
|
||||
## Code Guidelines
|
||||
|
||||
### Reuse project patterns
|
||||
|
||||
Before adding new code, inspect the existing command, API, and model modules
|
||||
and follow their established patterns.
|
||||
|
||||
In particular:
|
||||
|
||||
- keep CLI behavior consistent with the existing `clap` command structure
|
||||
- prefer existing error handling patterns based on `anyhow`
|
||||
- keep async behavior aligned with the current `tokio`-based implementation
|
||||
- avoid duplicating logic that already exists in sibling command modules
|
||||
|
||||
### Comments and documentation
|
||||
|
||||
All comments written inside source code must be in English.
|
||||
|
||||
This includes:
|
||||
|
||||
- line comments
|
||||
- block comments
|
||||
- doc comments
|
||||
|
||||
Documentation file language may follow the needs of the project, but source
|
||||
code comments must stay in English.
|
||||
|
||||
## Planning and Review
|
||||
|
||||
### Plan mode
|
||||
|
||||
When working in plan mode:
|
||||
|
||||
1. analyze the current codebase state first
|
||||
2. write or update the implementation plan in the session plan file
|
||||
3. require user review of the plan before starting implementation
|
||||
|
||||
Do not begin editing project files until the user has reviewed and approved the
|
||||
plan.
|
||||
|
||||
### Git commits
|
||||
|
||||
Never run `git commit` without explicit user confirmation.
|
||||
|
||||
This requirement still applies after a plan has been reviewed or approved. Plan
|
||||
approval is not commit approval.
|
||||
|
||||
## Git Commit Messages
|
||||
|
||||
Commit messages must always be written in English.
|
||||
|
||||
Follow the Conventional Commits format:
|
||||
|
||||
```text
|
||||
<type>(<scope>): <short description>
|
||||
```
|
||||
|
||||
Use clear, imperative subjects without a trailing period.
|
||||
|
||||
Examples:
|
||||
|
||||
- `feat(upload): add recursive directory support`
|
||||
- `fix(login): handle expired cache gracefully`
|
||||
- `docs(agents): add Rust project guidance`
|
||||
|
||||
## Practical Expectations for Agents
|
||||
|
||||
- make precise, minimal changes that fully solve the task
|
||||
- read nearby modules before changing structure or introducing helpers
|
||||
- prefer updating existing code paths over adding parallel implementations
|
||||
- preserve current CLI semantics unless the task explicitly changes them
|
||||
- surface uncertainties to the user instead of guessing on destructive actions
|
||||
Generated
+661
-15
File diff suppressed because it is too large
Load Diff
+7
-1
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "mdrs-client-rust"
|
||||
version = "0.1.0"
|
||||
version = "2.0.0"
|
||||
edition = "2024"
|
||||
license = "MIT"
|
||||
authors = ["Neuroinformatics Unit, RIKEN CBS"]
|
||||
@@ -28,3 +28,9 @@ fs2 = "0.4"
|
||||
ctrlc = "3"
|
||||
os_info = "3"
|
||||
dotenvy = "0.15"
|
||||
unicode-normalization = "0.1"
|
||||
self-replace = "1"
|
||||
tar = "0.4"
|
||||
flate2 = "1"
|
||||
zip = "2"
|
||||
tempfile = "3"
|
||||
|
||||
@@ -53,7 +53,7 @@ List registered remote hosts.
|
||||
|
||||
```shell
|
||||
mdrs config list
|
||||
mdrs config list -l
|
||||
mdrs config ls
|
||||
```
|
||||
|
||||
### config delete
|
||||
@@ -62,6 +62,7 @@ Remove a registered remote host.
|
||||
|
||||
```shell
|
||||
mdrs config delete neurodata
|
||||
mdrs config rm neurodata
|
||||
```
|
||||
|
||||
### login
|
||||
@@ -199,6 +200,24 @@ mdrs file-metadata neurodata:/NIU/Repository/TEST/dataset/sample.dat
|
||||
mdrs file-metadata -p SHARING_PASSWORD neurodata:/NIU/Repository/PW_Open/Readme.txt
|
||||
```
|
||||
|
||||
### version
|
||||
|
||||
Show the tool name and version number.
|
||||
|
||||
```shell
|
||||
mdrs version
|
||||
```
|
||||
|
||||
### selfupdate
|
||||
|
||||
Update the current `mdrs` binary to the latest published release for
|
||||
the same build target.
|
||||
|
||||
```shell
|
||||
mdrs selfupdate
|
||||
mdrs selfupdate -y
|
||||
```
|
||||
|
||||
### help
|
||||
|
||||
Show help for a command.
|
||||
|
||||
@@ -13,5 +13,10 @@ fn main() {
|
||||
.unwrap_or("unknown")
|
||||
.to_string();
|
||||
println!("cargo:rustc-env=RUSTC_VERSION={}", version);
|
||||
// Expose the build target triple so selfupdate can match release assets.
|
||||
println!(
|
||||
"cargo:rustc-env=BUILD_TARGET={}",
|
||||
std::env::var("TARGET").unwrap_or_default()
|
||||
);
|
||||
println!("cargo:rerun-if-changed=build.rs");
|
||||
}
|
||||
|
||||
@@ -0,0 +1,7 @@
|
||||
# Copy this file to .env and fill in your values.
|
||||
# .env is loaded automatically by the build-release scripts.
|
||||
# Environment variables already set in your shell take precedence.
|
||||
|
||||
GITEA_SERVER_URL=https://git.example.com
|
||||
GITEA_REPOSITORY=owner/mdrs-client-rust
|
||||
GITEA_TOKEN=your_token_here
|
||||
Executable
+104
@@ -0,0 +1,104 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build release archives for Linux (x86_64 and aarch64, musl static linking).
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/build-release-linux.sh [VERSION]
|
||||
#
|
||||
# If VERSION is not provided, it is read from Cargo.toml.
|
||||
# Set GITEA_TOKEN (and optionally GITEA_SERVER_URL / GITEA_REPOSITORY)
|
||||
# to upload the archives to a Gitea release automatically.
|
||||
# These can be provided via scripts/.env.
|
||||
#
|
||||
# Prerequisites:
|
||||
# - cross (https://github.com/cross-rs/cross): cargo install cross
|
||||
# - Docker or Podman (required by cross)
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||
|
||||
# Load .env if present (existing env vars take precedence)
|
||||
if [[ -f "${SCRIPT_DIR}/.env" ]]; then
|
||||
while IFS='=' read -r key value; do
|
||||
[[ "${key}" =~ ^#.*$ || -z "${key}" ]] && continue
|
||||
key="${key%%[[:space:]]}"
|
||||
value="${value##[[:space:]]}"
|
||||
[[ -z "${!key+x}" ]] && export "${key}=${value}"
|
||||
done < "${SCRIPT_DIR}/.env"
|
||||
fi
|
||||
|
||||
# Determine version
|
||||
if [[ $# -ge 1 ]]; then
|
||||
VERSION="$1"
|
||||
else
|
||||
VERSION="$(grep -m1 '^version' "${REPO_ROOT}/Cargo.toml" | sed 's/.*= *"\(.*\)"/\1/')"
|
||||
fi
|
||||
|
||||
TAG="v${VERSION}"
|
||||
TARGETS=(x86_64-unknown-linux-musl aarch64-unknown-linux-musl)
|
||||
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
# Verify that cross is available
|
||||
if ! command -v cross &> /dev/null; then
|
||||
echo "Error: 'cross' is not installed." >&2
|
||||
echo "Install it with: cargo install cross" >&2
|
||||
echo "Docker or Podman is also required." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "==> Building mdrs ${VERSION} for Linux"
|
||||
|
||||
ARCHIVES=()
|
||||
for TARGET in "${TARGETS[@]}"; do
|
||||
echo "--- Target: ${TARGET}"
|
||||
rustup target add "${TARGET}"
|
||||
cross build --release --target "${TARGET}"
|
||||
|
||||
ARCHIVE="mdrs-${VERSION}-${TARGET}.tar.gz"
|
||||
tar -czf "${ARCHIVE}" -C "target/${TARGET}/release" mdrs
|
||||
ARCHIVES+=("${ARCHIVE}")
|
||||
echo " Created: ${ARCHIVE}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "==> Archives ready:"
|
||||
for A in "${ARCHIVES[@]}"; do echo " ${A}"; done
|
||||
|
||||
# Upload to Gitea if token is provided
|
||||
if [[ -z "${GITEA_TOKEN:-}" ]]; then
|
||||
echo ""
|
||||
echo "GITEA_TOKEN not set — skipping upload."
|
||||
echo "Set GITEA_TOKEN (and GITEA_SERVER_URL, GITEA_REPOSITORY) in scripts/.env to enable upload."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
: "${GITEA_SERVER_URL:?GITEA_SERVER_URL is required for upload}"
|
||||
: "${GITEA_REPOSITORY:?GITEA_REPOSITORY is required for upload}"
|
||||
|
||||
echo ""
|
||||
echo "==> Creating Gitea release ${TAG} ..."
|
||||
curl -sf -X POST \
|
||||
-H "Authorization: Bearer ${GITEA_TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
"${GITEA_SERVER_URL}/api/v1/repos/${GITEA_REPOSITORY}/releases" \
|
||||
-d "{\"tag_name\": \"${TAG}\", \"name\": \"${TAG}\"}" > /dev/null || true
|
||||
|
||||
RELEASE_ID="$(curl -sf \
|
||||
-H "Authorization: Bearer ${GITEA_TOKEN}" \
|
||||
"${GITEA_SERVER_URL}/api/v1/repos/${GITEA_REPOSITORY}/releases/tags/${TAG}" \
|
||||
| python3 -c 'import sys,json; print(json.load(sys.stdin)["id"])')"
|
||||
|
||||
echo "==> Uploading assets (release id: ${RELEASE_ID}) ..."
|
||||
for ARCHIVE in "${ARCHIVES[@]}"; do
|
||||
echo " Uploading ${ARCHIVE} ..."
|
||||
curl -sf -X POST \
|
||||
-H "Authorization: Bearer ${GITEA_TOKEN}" \
|
||||
-F "attachment=@${ARCHIVE}" \
|
||||
"${GITEA_SERVER_URL}/api/v1/repos/${GITEA_REPOSITORY}/releases/${RELEASE_ID}/assets" > /dev/null
|
||||
echo " Done."
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "==> Upload complete: ${GITEA_SERVER_URL}/${GITEA_REPOSITORY}/releases/tag/${TAG}"
|
||||
Executable
+92
@@ -0,0 +1,92 @@
|
||||
#!/usr/bin/env bash
|
||||
# Build release archives for macOS (x86_64 and aarch64).
|
||||
#
|
||||
# Usage:
|
||||
# ./scripts/build-release-macos.sh [VERSION]
|
||||
#
|
||||
# If VERSION is not provided, it is read from Cargo.toml.
|
||||
# Set GITEA_TOKEN (and optionally GITEA_SERVER_URL / GITEA_REPOSITORY)
|
||||
# to upload the archives to a Gitea release automatically.
|
||||
# These can be provided via scripts/.env.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO_ROOT="$(cd "${SCRIPT_DIR}/.." && pwd)"
|
||||
|
||||
# Load .env if present (existing env vars take precedence)
|
||||
if [[ -f "${SCRIPT_DIR}/.env" ]]; then
|
||||
while IFS='=' read -r key value; do
|
||||
[[ "${key}" =~ ^#.*$ || -z "${key}" ]] && continue
|
||||
key="${key%%[[:space:]]}"
|
||||
value="${value##[[:space:]]}"
|
||||
[[ -z "${!key+x}" ]] && export "${key}=${value}"
|
||||
done < "${SCRIPT_DIR}/.env"
|
||||
fi
|
||||
|
||||
# Determine version
|
||||
if [[ $# -ge 1 ]]; then
|
||||
VERSION="$1"
|
||||
else
|
||||
VERSION="$(grep -m1 '^version' "${REPO_ROOT}/Cargo.toml" | sed 's/.*= *"\(.*\)"/\1/')"
|
||||
fi
|
||||
|
||||
TAG="v${VERSION}"
|
||||
TARGETS=(x86_64-apple-darwin aarch64-apple-darwin)
|
||||
|
||||
cd "${REPO_ROOT}"
|
||||
|
||||
echo "==> Building mdrs ${VERSION} for macOS"
|
||||
|
||||
ARCHIVES=()
|
||||
for TARGET in "${TARGETS[@]}"; do
|
||||
echo "--- Target: ${TARGET}"
|
||||
rustup target add "${TARGET}"
|
||||
cargo build --release --target "${TARGET}"
|
||||
|
||||
ARCHIVE="mdrs-${VERSION}-${TARGET}.tar.gz"
|
||||
tar -czf "${ARCHIVE}" -C "target/${TARGET}/release" mdrs
|
||||
ARCHIVES+=("${ARCHIVE}")
|
||||
echo " Created: ${ARCHIVE}"
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "==> Archives ready:"
|
||||
for A in "${ARCHIVES[@]}"; do echo " ${A}"; done
|
||||
|
||||
# Upload to Gitea if token is provided
|
||||
if [[ -z "${GITEA_TOKEN:-}" ]]; then
|
||||
echo ""
|
||||
echo "GITEA_TOKEN not set — skipping upload."
|
||||
echo "Set GITEA_TOKEN (and GITEA_SERVER_URL, GITEA_REPOSITORY) in scripts/.env to enable upload."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
: "${GITEA_SERVER_URL:?GITEA_SERVER_URL is required for upload}"
|
||||
: "${GITEA_REPOSITORY:?GITEA_REPOSITORY is required for upload}"
|
||||
|
||||
echo ""
|
||||
echo "==> Creating Gitea release ${TAG} ..."
|
||||
curl -sf -X POST \
|
||||
-H "Authorization: Bearer ${GITEA_TOKEN}" \
|
||||
-H "Content-Type: application/json" \
|
||||
"${GITEA_SERVER_URL}/api/v1/repos/${GITEA_REPOSITORY}/releases" \
|
||||
-d "{\"tag_name\": \"${TAG}\", \"name\": \"${TAG}\"}" > /dev/null || true
|
||||
|
||||
RELEASE_ID="$(curl -sf \
|
||||
-H "Authorization: Bearer ${GITEA_TOKEN}" \
|
||||
"${GITEA_SERVER_URL}/api/v1/repos/${GITEA_REPOSITORY}/releases/tags/${TAG}" \
|
||||
| python3 -c 'import sys,json; print(json.load(sys.stdin)["id"])')"
|
||||
|
||||
echo "==> Uploading assets (release id: ${RELEASE_ID}) ..."
|
||||
for ARCHIVE in "${ARCHIVES[@]}"; do
|
||||
echo " Uploading ${ARCHIVE} ..."
|
||||
curl -sf -X POST \
|
||||
-H "Authorization: Bearer ${GITEA_TOKEN}" \
|
||||
-F "attachment=@${ARCHIVE}" \
|
||||
"${GITEA_SERVER_URL}/api/v1/repos/${GITEA_REPOSITORY}/releases/${RELEASE_ID}/assets" > /dev/null
|
||||
echo " Done."
|
||||
done
|
||||
|
||||
echo ""
|
||||
echo "==> Upload complete: ${GITEA_SERVER_URL}/${GITEA_REPOSITORY}/releases/tag/${TAG}"
|
||||
@@ -0,0 +1,100 @@
|
||||
# Build release archive for Windows (x86_64-pc-windows-msvc).
|
||||
#
|
||||
# Usage:
|
||||
# .\scripts\build-release-windows.ps1 [-Version <VERSION>]
|
||||
#
|
||||
# If -Version is not provided, it is read from Cargo.toml.
|
||||
# Set GITEA_TOKEN (and optionally GITEA_SERVER_URL / GITEA_REPOSITORY)
|
||||
# to upload the archive to a Gitea release automatically.
|
||||
# These can be provided via scripts\.env.
|
||||
|
||||
param(
|
||||
[string]$Version = ""
|
||||
)
|
||||
|
||||
Set-StrictMode -Version Latest
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
$ScriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path
|
||||
$RepoRoot = Split-Path -Parent $ScriptDir
|
||||
|
||||
# Load .env if present (existing env vars take precedence)
|
||||
$EnvFile = Join-Path $ScriptDir ".env"
|
||||
if (Test-Path $EnvFile) {
|
||||
Get-Content $EnvFile | ForEach-Object {
|
||||
if ($_ -match '^\s*#' -or $_ -match '^\s*$') { return }
|
||||
$parts = $_ -split '=', 2
|
||||
$key = $parts[0].Trim()
|
||||
$value = $parts[1].Trim()
|
||||
if (-not [System.Environment]::GetEnvironmentVariable($key)) {
|
||||
[System.Environment]::SetEnvironmentVariable($key, $value, "Process")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Determine version
|
||||
if (-not $Version) {
|
||||
$cargoToml = Get-Content (Join-Path $RepoRoot "Cargo.toml") -Raw
|
||||
if ($cargoToml -match 'version\s*=\s*"([^"]+)"') {
|
||||
$Version = $Matches[1]
|
||||
} else {
|
||||
Write-Error "Could not determine version from Cargo.toml"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
$Tag = "v$Version"
|
||||
$Target = "x86_64-pc-windows-msvc"
|
||||
|
||||
Set-Location $RepoRoot
|
||||
|
||||
Write-Host "==> Building mdrs $Version for Windows ($Target)"
|
||||
|
||||
rustup target add $Target
|
||||
cargo build --release --target $Target
|
||||
|
||||
$Archive = "mdrs-$Version-$Target.zip"
|
||||
Compress-Archive -Force -Path "target\$Target\release\mdrs.exe" -DestinationPath $Archive
|
||||
Write-Host " Created: $Archive"
|
||||
|
||||
# Upload to Gitea if token is provided
|
||||
$GiteaToken = [System.Environment]::GetEnvironmentVariable("GITEA_TOKEN")
|
||||
if (-not $GiteaToken) {
|
||||
Write-Host ""
|
||||
Write-Host "GITEA_TOKEN not set -- skipping upload."
|
||||
Write-Host "Set GITEA_TOKEN (and GITEA_SERVER_URL, GITEA_REPOSITORY) in scripts\.env to enable upload."
|
||||
exit 0
|
||||
}
|
||||
|
||||
$ServerUrl = [System.Environment]::GetEnvironmentVariable("GITEA_SERVER_URL")
|
||||
$Repository = [System.Environment]::GetEnvironmentVariable("GITEA_REPOSITORY")
|
||||
if (-not $ServerUrl) { Write-Error "GITEA_SERVER_URL is required for upload"; exit 1 }
|
||||
if (-not $Repository) { Write-Error "GITEA_REPOSITORY is required for upload"; exit 1 }
|
||||
|
||||
$Headers = @{ Authorization = "Bearer $GiteaToken"; "Content-Type" = "application/json" }
|
||||
|
||||
Write-Host ""
|
||||
Write-Host "==> Creating Gitea release $Tag ..."
|
||||
try {
|
||||
Invoke-RestMethod -Method Post -Uri "$ServerUrl/api/v1/repos/$Repository/releases" `
|
||||
-Headers $Headers `
|
||||
-Body (ConvertTo-Json @{ tag_name = $Tag; name = $Tag }) | Out-Null
|
||||
} catch {
|
||||
# Release may already exist; continue
|
||||
}
|
||||
|
||||
$Release = Invoke-RestMethod -Method Get -Uri "$ServerUrl/api/v1/repos/$Repository/releases/tags/$Tag" `
|
||||
-Headers @{ Authorization = "Bearer $GiteaToken" }
|
||||
$ReleaseId = $Release.id
|
||||
|
||||
Write-Host "==> Uploading $Archive (release id: $ReleaseId) ..."
|
||||
$ArchivePath = Join-Path $RepoRoot $Archive
|
||||
& curl.exe -sf -X POST `
|
||||
-H "Authorization: Bearer $GiteaToken" `
|
||||
-F "attachment=@$ArchivePath" `
|
||||
"$ServerUrl/api/v1/repos/$Repository/releases/$ReleaseId/assets" | Out-Null
|
||||
if ($LASTEXITCODE -ne 0) { Write-Error "Upload failed (exit code $LASTEXITCODE)"; exit 1 }
|
||||
Write-Host " Done."
|
||||
|
||||
Write-Host ""
|
||||
Write-Host "==> Upload complete: $ServerUrl/$Repository/releases/tag/$Tag"
|
||||
+72
-19
@@ -1,5 +1,7 @@
|
||||
use crate::connection::MDRSConnection;
|
||||
use crate::connection::{ApiRequestLimiter, MDRSConnection};
|
||||
pub use crate::models::file::File;
|
||||
use anyhow::bail;
|
||||
use unicode_normalization::UnicodeNormalization;
|
||||
|
||||
#[derive(serde::Deserialize)]
|
||||
struct FileListResponse {
|
||||
@@ -8,21 +10,19 @@ struct FileListResponse {
|
||||
}
|
||||
|
||||
impl MDRSConnection {
|
||||
/// List all files in a folder, following pagination automatically
|
||||
pub async fn list_all_files(
|
||||
&self,
|
||||
folder_id: &str,
|
||||
) -> Result<Vec<File>, Box<dyn std::error::Error>> {
|
||||
/// List all files in a folder, following pagination automatically.
|
||||
pub async fn list_all_files(&self, folder_id: &str) -> Result<Vec<File>, anyhow::Error> {
|
||||
let mut all_files = Vec::new();
|
||||
let mut page: u32 = 1;
|
||||
loop {
|
||||
let resp = self
|
||||
.client
|
||||
.get(self.build_url("v3/files/"))
|
||||
.headers(self.prepare_headers())
|
||||
.query(&[("folder_id", folder_id), ("page", &page.to_string())])
|
||||
.send()
|
||||
.await?;
|
||||
let params = [
|
||||
("folder_id", folder_id.to_string()),
|
||||
("page", page.to_string()),
|
||||
];
|
||||
let resp = self.get_with_query("v3/files/", ¶ms).await?;
|
||||
if !resp.status().is_success() {
|
||||
anyhow::bail!("List files failed: {}", resp.status());
|
||||
}
|
||||
let list: FileListResponse = resp.json().await?;
|
||||
let has_next = list.next.is_some();
|
||||
all_files.extend(list.results);
|
||||
@@ -34,26 +34,79 @@ impl MDRSConnection {
|
||||
Ok(all_files)
|
||||
}
|
||||
|
||||
pub async fn upload_file(
|
||||
/// List all files in a folder while consuming the shared API concurrency budget.
|
||||
pub async fn list_all_files_limited(
|
||||
&self,
|
||||
folder_id: &str,
|
||||
limiter: &ApiRequestLimiter,
|
||||
) -> Result<Vec<File>, anyhow::Error> {
|
||||
let mut all_files = Vec::new();
|
||||
let mut page: u32 = 1;
|
||||
loop {
|
||||
let params = [
|
||||
("folder_id", folder_id.to_string()),
|
||||
("page", page.to_string()),
|
||||
];
|
||||
let _permit = limiter.acquire().await?;
|
||||
let resp = self.get_with_query("v3/files/", ¶ms).await?;
|
||||
if !resp.status().is_success() {
|
||||
anyhow::bail!("List files failed: {}", resp.status());
|
||||
}
|
||||
let list: FileListResponse = resp.json().await?;
|
||||
let has_next = list.next.is_some();
|
||||
all_files.extend(list.results);
|
||||
if !has_next {
|
||||
break;
|
||||
}
|
||||
page += 1;
|
||||
}
|
||||
Ok(all_files)
|
||||
}
|
||||
|
||||
/// Upload a local file into the given remote folder while consuming the
|
||||
/// shared API concurrency budget.
|
||||
pub async fn upload_file_limited(
|
||||
&self,
|
||||
folder_id: &str,
|
||||
file_path: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
limiter: &ApiRequestLimiter,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
use anyhow::{anyhow, bail};
|
||||
use reqwest::multipart;
|
||||
let file_name = std::path::Path::new(file_path)
|
||||
let file_name: String = std::path::Path::new(file_path)
|
||||
.file_name()
|
||||
.unwrap()
|
||||
.ok_or_else(|| anyhow!("Invalid file path: `{}`", file_path))?
|
||||
.to_string_lossy()
|
||||
.to_string();
|
||||
.nfc()
|
||||
.collect();
|
||||
let file_bytes = tokio::fs::read(file_path).await?;
|
||||
let part = multipart::Part::bytes(file_bytes).file_name(file_name.clone());
|
||||
let form = multipart::Form::new()
|
||||
.text("folder_id", folder_id.to_string())
|
||||
.part("file", part);
|
||||
let _permit = limiter.acquire().await?;
|
||||
let resp = self.post_multipart("v3/files/", form).await?;
|
||||
if !resp.status().is_success() {
|
||||
return Err(format!("Upload failed: {}", resp.status()).into());
|
||||
bail!("Upload failed: {}", resp.status());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Download a file while consuming the shared API concurrency budget.
|
||||
pub async fn download_file_limited(
|
||||
&self,
|
||||
url: &str,
|
||||
dest: &str,
|
||||
limiter: &ApiRequestLimiter,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let _permit = limiter.acquire().await?;
|
||||
let resp = self.get_url(url).await?;
|
||||
if !resp.status().is_success() {
|
||||
bail!("Download failed: {}", resp.status());
|
||||
}
|
||||
let bytes = resp.bytes().await?;
|
||||
drop(_permit);
|
||||
tokio::fs::write(dest, &bytes).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
+133
-15
@@ -1,5 +1,6 @@
|
||||
use crate::connection::MDRSConnection;
|
||||
use crate::connection::{ApiRequestLimiter, MDRSConnection};
|
||||
pub use crate::models::folder::{FolderDetail, FolderSimple};
|
||||
use anyhow::{anyhow, bail};
|
||||
|
||||
impl MDRSConnection {
|
||||
/// List folders matching the given path under a laboratory (GET v3/folders/?path=...&laboratory_id=...)
|
||||
@@ -7,23 +8,140 @@ impl MDRSConnection {
|
||||
&self,
|
||||
lab_id: u32,
|
||||
path: &str,
|
||||
) -> Result<Vec<FolderSimple>, reqwest::Error> {
|
||||
let resp = self
|
||||
.client
|
||||
.get(self.build_url("v3/folders/"))
|
||||
.headers(self.prepare_headers())
|
||||
.query(&[
|
||||
("laboratory_id", lab_id.to_string()),
|
||||
("path", path.to_string()),
|
||||
])
|
||||
.send()
|
||||
.await?;
|
||||
resp.json::<Vec<FolderSimple>>().await
|
||||
) -> Result<Vec<FolderSimple>, anyhow::Error> {
|
||||
let params = [
|
||||
("laboratory_id", lab_id.to_string()),
|
||||
("path", path.to_string()),
|
||||
];
|
||||
let resp = self.get_with_query("v3/folders/", ¶ms).await?;
|
||||
if !resp.status().is_success() {
|
||||
bail!("List folders failed: {}", resp.status());
|
||||
}
|
||||
Ok(resp.json::<Vec<FolderSimple>>().await?)
|
||||
}
|
||||
|
||||
/// List folders by path while consuming the shared API concurrency budget.
|
||||
pub async fn list_folders_by_path_limited(
|
||||
&self,
|
||||
lab_id: u32,
|
||||
path: &str,
|
||||
limiter: &ApiRequestLimiter,
|
||||
) -> Result<Vec<FolderSimple>, anyhow::Error> {
|
||||
let params = [
|
||||
("laboratory_id", lab_id.to_string()),
|
||||
("path", path.to_string()),
|
||||
];
|
||||
let _permit = limiter.acquire().await?;
|
||||
let resp = self.get_with_query("v3/folders/", ¶ms).await?;
|
||||
if !resp.status().is_success() {
|
||||
bail!("List folders failed: {}", resp.status());
|
||||
}
|
||||
Ok(resp.json::<Vec<FolderSimple>>().await?)
|
||||
}
|
||||
|
||||
/// Retrieve full folder details including sub_folders (GET v3/folders/{id}/)
|
||||
pub async fn retrieve_folder(&self, id: &str) -> Result<FolderDetail, reqwest::Error> {
|
||||
pub async fn retrieve_folder(&self, id: &str) -> Result<FolderDetail, anyhow::Error> {
|
||||
let resp = self.get(&format!("v3/folders/{}/", id)).await?;
|
||||
resp.json::<FolderDetail>().await
|
||||
if !resp.status().is_success() {
|
||||
bail!("Retrieve folder failed: {}", resp.status());
|
||||
}
|
||||
Ok(resp.json::<FolderDetail>().await?)
|
||||
}
|
||||
|
||||
/// Retrieve folder details while consuming the shared API concurrency budget.
|
||||
pub async fn retrieve_folder_limited(
|
||||
&self,
|
||||
id: &str,
|
||||
limiter: &ApiRequestLimiter,
|
||||
) -> Result<FolderDetail, anyhow::Error> {
|
||||
let _permit = limiter.acquire().await?;
|
||||
let resp = self.get(&format!("v3/folders/{}/", id)).await?;
|
||||
if !resp.status().is_success() {
|
||||
bail!("Retrieve folder failed: {}", resp.status());
|
||||
}
|
||||
Ok(resp.json::<FolderDetail>().await?)
|
||||
}
|
||||
|
||||
/// Create a new folder under `parent_id` (POST v3/folders/).
|
||||
pub async fn create_folder(
|
||||
&self,
|
||||
parent_id: &str,
|
||||
folder_name: &str,
|
||||
) -> Result<reqwest::Response, anyhow::Error> {
|
||||
let body = serde_json::json!({
|
||||
"name": folder_name,
|
||||
"parent_id": parent_id,
|
||||
"description": "",
|
||||
"template_id": -1,
|
||||
});
|
||||
self.post_json("v3/folders/", &body).await
|
||||
}
|
||||
|
||||
/// Create a new folder under `parent_id` and return its ID while consuming
|
||||
/// the shared API concurrency budget.
|
||||
pub async fn create_folder_id_limited(
|
||||
&self,
|
||||
parent_id: &str,
|
||||
folder_name: &str,
|
||||
limiter: &ApiRequestLimiter,
|
||||
) -> Result<String, anyhow::Error> {
|
||||
let body = serde_json::json!({
|
||||
"name": folder_name,
|
||||
"parent_id": parent_id,
|
||||
"description": "",
|
||||
"template_id": -1,
|
||||
});
|
||||
let _permit = limiter.acquire().await?;
|
||||
let resp = self.post_json("v3/folders/", &body).await?;
|
||||
if !resp.status().is_success() {
|
||||
bail!("Failed to create remote folder: {}", folder_name);
|
||||
}
|
||||
let json: serde_json::Value = resp.json().await?;
|
||||
json["id"]
|
||||
.as_str()
|
||||
.ok_or_else(|| anyhow!("No id in create_folder response for {}", folder_name))
|
||||
.map(|s| s.to_string())
|
||||
}
|
||||
|
||||
/// Authenticate against a password-locked folder (POST v3/folders/{id}/auth/).
|
||||
/// Returns `Err` if the password is incorrect or the request fails.
|
||||
pub async fn folder_auth(&self, folder_id: &str, password: &str) -> Result<(), anyhow::Error> {
|
||||
let resp = self
|
||||
.post_json(
|
||||
&format!("v3/folders/{}/auth/", folder_id),
|
||||
&serde_json::json!({"password": password}),
|
||||
)
|
||||
.await?;
|
||||
if resp.status() == reqwest::StatusCode::UNAUTHORIZED {
|
||||
bail!("Password is incorrect.");
|
||||
}
|
||||
if !resp.status().is_success() {
|
||||
bail!("Folder auth failed: {}", resp.status());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Authenticate against a locked folder while consuming the shared API
|
||||
/// concurrency budget.
|
||||
pub async fn folder_auth_limited(
|
||||
&self,
|
||||
folder_id: &str,
|
||||
password: &str,
|
||||
limiter: &ApiRequestLimiter,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let _permit = limiter.acquire().await?;
|
||||
let resp = self
|
||||
.post_json(
|
||||
&format!("v3/folders/{}/auth/", folder_id),
|
||||
&serde_json::json!({"password": password}),
|
||||
)
|
||||
.await?;
|
||||
if resp.status() == reqwest::StatusCode::UNAUTHORIZED {
|
||||
bail!("Password is incorrect.");
|
||||
}
|
||||
if !resp.status().is_success() {
|
||||
bail!("Folder auth failed: {}", resp.status());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
+12
-8
@@ -9,17 +9,21 @@ struct LabListResponse {
|
||||
}
|
||||
|
||||
impl MDRSConnection {
|
||||
pub async fn list_laboratories(&self) -> Result<Laboratories, reqwest::Error> {
|
||||
pub async fn list_laboratories(&self) -> Result<Laboratories, anyhow::Error> {
|
||||
let resp = self.get("v3/laboratories/").await?;
|
||||
if !resp.status().is_success() {
|
||||
anyhow::bail!("List laboratories failed: {}", resp.status());
|
||||
}
|
||||
// The API may return a paginated object or a direct array
|
||||
let text = resp.text().await?;
|
||||
let items: Vec<Laboratory> = if let Ok(list) = serde_json::from_str::<Vec<Laboratory>>(&text) {
|
||||
list
|
||||
} else if let Ok(paged) = serde_json::from_str::<LabListResponse>(&text) {
|
||||
paged.results.unwrap_or_default()
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
let items: Vec<Laboratory> =
|
||||
if let Ok(list) = serde_json::from_str::<Vec<Laboratory>>(&text) {
|
||||
list
|
||||
} else if let Ok(paged) = serde_json::from_str::<LabListResponse>(&text) {
|
||||
paged.results.unwrap_or_default()
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
Ok(Laboratories { items })
|
||||
}
|
||||
}
|
||||
|
||||
+7
-6
@@ -1,5 +1,6 @@
|
||||
use crate::connection::MDRSConnection;
|
||||
use crate::models::user::User as ModelUser;
|
||||
use anyhow::bail;
|
||||
use serde::Deserialize;
|
||||
|
||||
/// Full API response shape from GET v3/users/current/
|
||||
@@ -23,8 +24,11 @@ struct TokenRefreshResponse {
|
||||
|
||||
impl MDRSConnection {
|
||||
/// Fetch current user and return the slim 4-field model matching the Python cache format.
|
||||
pub async fn get_current_user(&self) -> Result<ModelUser, reqwest::Error> {
|
||||
pub async fn get_current_user(&self) -> Result<ModelUser, anyhow::Error> {
|
||||
let resp = self.get("v3/users/current/").await?;
|
||||
if !resp.status().is_success() {
|
||||
bail!("Get current user failed: {}", resp.status());
|
||||
}
|
||||
let obj = resp.json::<UsersApiCurrentResponse>().await?;
|
||||
let laboratory_ids = obj.laboratories.into_iter().map(|l| l.id).collect();
|
||||
Ok(ModelUser {
|
||||
@@ -37,10 +41,7 @@ impl MDRSConnection {
|
||||
|
||||
/// Refresh the access token using the refresh token.
|
||||
/// POST v3/users/token/refresh/ {refresh: ...} -> {access: new_access}
|
||||
pub async fn token_refresh(
|
||||
&self,
|
||||
refresh_token: &str,
|
||||
) -> Result<String, Box<dyn std::error::Error>> {
|
||||
pub async fn token_refresh(&self, refresh_token: &str) -> Result<String, anyhow::Error> {
|
||||
let body = serde_json::json!({ "refresh": refresh_token });
|
||||
let resp = self
|
||||
.client
|
||||
@@ -49,7 +50,7 @@ impl MDRSConnection {
|
||||
.send()
|
||||
.await?;
|
||||
if !resp.status().is_success() {
|
||||
return Err(format!("Token refresh failed: {}", resp.status()).into());
|
||||
bail!("Token refresh failed: {}", resp.status());
|
||||
}
|
||||
let r: TokenRefreshResponse = resp.json().await?;
|
||||
Ok(r.access)
|
||||
|
||||
Vendored
+123
@@ -0,0 +1,123 @@
|
||||
use super::types::{CacheLabsWrapper, CacheUser};
|
||||
use sha2::{Digest, Sha256};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Python-compatible JSON serialization helpers
|
||||
//
|
||||
// Python's default json.dumps uses separators=(', ', ': ') and
|
||||
// ensure_ascii=True. Field order follows dataclass definition order.
|
||||
// The digest must be byte-for-byte identical to Python's CacheData.__calc_digest.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Escape a string in Python json.dumps style:
|
||||
/// - Special chars: `"`, `\`, and control chars → standard JSON escapes
|
||||
/// - Non-ASCII chars → `\uXXXX` (matches Python's `ensure_ascii=True` default)
|
||||
fn python_json_string(s: &str) -> String {
|
||||
let mut out = String::with_capacity(s.len() + 2);
|
||||
out.push('"');
|
||||
for c in s.chars() {
|
||||
match c {
|
||||
'"' => out.push_str("\\\""),
|
||||
'\\' => out.push_str("\\\\"),
|
||||
'\n' => out.push_str("\\n"),
|
||||
'\r' => out.push_str("\\r"),
|
||||
'\t' => out.push_str("\\t"),
|
||||
c if (c as u32) < 0x20 => {
|
||||
out.push_str(&format!("\\u{:04x}", c as u32));
|
||||
}
|
||||
c if c.is_ascii() => out.push(c),
|
||||
c => {
|
||||
// Non-ASCII: BMP → \uXXXX, outside BMP → surrogate pair
|
||||
let code = c as u32;
|
||||
if code <= 0xFFFF {
|
||||
out.push_str(&format!("\\u{:04x}", code));
|
||||
} else {
|
||||
let code = code - 0x10000;
|
||||
let high = 0xD800 + (code >> 10);
|
||||
let low = 0xDC00 + (code & 0x3FF);
|
||||
out.push_str(&format!("\\u{:04x}\\u{:04x}", high, low));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
out.push('"');
|
||||
out
|
||||
}
|
||||
|
||||
/// Serialize a `u32` slice as a Python-style JSON array: `[1, 2, 3]`.
|
||||
fn python_json_u32_array(items: &[u32]) -> String {
|
||||
if items.is_empty() {
|
||||
return "[]".to_string();
|
||||
}
|
||||
let inner: Vec<String> = items.iter().map(|x| x.to_string()).collect();
|
||||
format!("[{}]", inner.join(", "))
|
||||
}
|
||||
|
||||
/// Build the JSON array string that Python's `__calc_digest` hashes:
|
||||
/// `[user_asdict_or_null, token_asdict, labs_asdict]`
|
||||
///
|
||||
/// Field order matches each Python dataclass definition:
|
||||
/// User: id, username, laboratory_ids, is_reviewer
|
||||
/// Token: access, refresh
|
||||
/// Laboratories: items
|
||||
/// Laboratory: id, name, pi_name, full_name
|
||||
pub fn python_digest_json(
|
||||
user: Option<&CacheUser>,
|
||||
access: &str,
|
||||
refresh: &str,
|
||||
labs: &CacheLabsWrapper,
|
||||
) -> String {
|
||||
let user_str = match user {
|
||||
None => "null".to_string(),
|
||||
Some(u) => format!(
|
||||
"{{\"id\": {}, \"username\": {}, \"laboratory_ids\": {}, \"is_reviewer\": {}}}",
|
||||
u.id,
|
||||
python_json_string(&u.username),
|
||||
python_json_u32_array(&u.laboratory_ids),
|
||||
if u.is_reviewer { "true" } else { "false" }
|
||||
),
|
||||
};
|
||||
|
||||
let token_str = format!(
|
||||
"{{\"access\": {}, \"refresh\": {}}}",
|
||||
python_json_string(access),
|
||||
python_json_string(refresh)
|
||||
);
|
||||
|
||||
let items: Vec<String> = labs
|
||||
.items
|
||||
.iter()
|
||||
.map(|lab| {
|
||||
format!(
|
||||
"{{\"id\": {}, \"name\": {}, \"pi_name\": {}, \"full_name\": {}}}",
|
||||
lab.id,
|
||||
python_json_string(&lab.name),
|
||||
python_json_string(&lab.pi_name),
|
||||
python_json_string(&lab.full_name)
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let items_str = if items.is_empty() {
|
||||
"[]".to_string()
|
||||
} else {
|
||||
format!("[{}]", items.join(", "))
|
||||
};
|
||||
let labs_str = format!("{{\"items\": {}}}", items_str);
|
||||
|
||||
format!("[{}, {}, {}]", user_str, token_str, labs_str)
|
||||
}
|
||||
|
||||
/// Compute the cache digest compatible with Python's `CacheData.__calc_digest`:
|
||||
/// `hashlib.sha256(json.dumps([user, token, labs]).encode("utf-8")).hexdigest()`
|
||||
pub fn compute_digest(
|
||||
user: Option<&CacheUser>,
|
||||
access: &str,
|
||||
refresh: &str,
|
||||
labs: &CacheLabsWrapper,
|
||||
) -> String {
|
||||
let json_str = python_digest_json(user, access, refresh, labs);
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(json_str.as_bytes());
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
Vendored
+606
@@ -0,0 +1,606 @@
|
||||
pub mod digest;
|
||||
pub mod types;
|
||||
|
||||
pub use digest::compute_digest;
|
||||
pub use types::{Cache, CacheLaboratory, CacheLabsWrapper, CacheToken, CacheUser};
|
||||
|
||||
use crate::connection::MDRSConnection;
|
||||
use anyhow::{anyhow, bail};
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
#[cfg(unix)]
|
||||
use std::os::unix::fs::{MetadataExt, PermissionsExt};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, LazyLock, Mutex};
|
||||
use std::time::UNIX_EPOCH;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Per-remote async mutex map (in-process serialization)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
static REMOTE_LOCKS: LazyLock<Mutex<HashMap<String, Arc<tokio::sync::Mutex<()>>>>> =
|
||||
LazyLock::new(|| Mutex::new(HashMap::new()));
|
||||
|
||||
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
|
||||
struct CacheStoreKey {
|
||||
config_dir: PathBuf,
|
||||
remote: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
struct CacheFileSnapshot {
|
||||
len: u64,
|
||||
modified_nanos: u128,
|
||||
#[cfg(unix)]
|
||||
inode: u64,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct MemoryCacheEntry {
|
||||
snapshot: CacheFileSnapshot,
|
||||
cache: Cache,
|
||||
}
|
||||
|
||||
static MEMORY_CACHE: LazyLock<Mutex<HashMap<CacheStoreKey, MemoryCacheEntry>>> =
|
||||
LazyLock::new(|| Mutex::new(HashMap::new()));
|
||||
|
||||
fn get_remote_lock(remote: &str) -> Arc<tokio::sync::Mutex<()>> {
|
||||
let mut map = REMOTE_LOCKS.lock().unwrap();
|
||||
map.entry(remote.to_string())
|
||||
.or_insert_with(|| Arc::new(tokio::sync::Mutex::new(())))
|
||||
.clone()
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Cache file path helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
fn cache_store_key(config_dir: &Path, remote: &str) -> CacheStoreKey {
|
||||
CacheStoreKey {
|
||||
config_dir: config_dir.to_path_buf(),
|
||||
remote: remote.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn cache_dir_path(config_dir: &Path) -> PathBuf {
|
||||
config_dir.join("cache")
|
||||
}
|
||||
|
||||
fn cache_file_path_in(config_dir: &Path, remote: &str) -> PathBuf {
|
||||
cache_dir_path(config_dir).join(format!("{}.json", remote))
|
||||
}
|
||||
|
||||
fn cache_file_path(remote: &str) -> PathBuf {
|
||||
cache_file_path_in(&crate::settings::SETTINGS.config_dirname, remote)
|
||||
}
|
||||
|
||||
fn cache_snapshot(metadata: &fs::Metadata) -> CacheFileSnapshot {
|
||||
let modified_nanos = metadata
|
||||
.modified()
|
||||
.ok()
|
||||
.and_then(|time| time.duration_since(UNIX_EPOCH).ok())
|
||||
.map(|duration| duration.as_nanos())
|
||||
.unwrap_or_default();
|
||||
|
||||
CacheFileSnapshot {
|
||||
len: metadata.len(),
|
||||
modified_nanos,
|
||||
#[cfg(unix)]
|
||||
inode: metadata.ino(),
|
||||
}
|
||||
}
|
||||
|
||||
fn read_cache_snapshot(cache_path: &Path) -> Result<CacheFileSnapshot, std::io::Error> {
|
||||
fs::metadata(cache_path).map(|metadata| cache_snapshot(&metadata))
|
||||
}
|
||||
|
||||
fn cached_entry(config_dir: &Path, remote: &str, snapshot: &CacheFileSnapshot) -> Option<Cache> {
|
||||
let key = cache_store_key(config_dir, remote);
|
||||
let map = MEMORY_CACHE.lock().unwrap();
|
||||
map.get(&key)
|
||||
.filter(|entry| entry.snapshot == *snapshot)
|
||||
.map(|entry| entry.cache.clone())
|
||||
}
|
||||
|
||||
fn update_cached_entry(config_dir: &Path, remote: &str, snapshot: CacheFileSnapshot, cache: Cache) {
|
||||
let key = cache_store_key(config_dir, remote);
|
||||
let mut map = MEMORY_CACHE.lock().unwrap();
|
||||
map.insert(key, MemoryCacheEntry { snapshot, cache });
|
||||
}
|
||||
|
||||
fn invalidate_cached_entry(config_dir: &Path, remote: &str) {
|
||||
let key = cache_store_key(config_dir, remote);
|
||||
let mut map = MEMORY_CACHE.lock().unwrap();
|
||||
map.remove(&key);
|
||||
}
|
||||
|
||||
fn ensure_cache_dir(cache_dir: &Path) -> Result<(), anyhow::Error> {
|
||||
fs::create_dir_all(cache_dir)?;
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let mut perms = fs::metadata(cache_dir)?.permissions();
|
||||
perms.set_mode(0o700);
|
||||
fs::set_permissions(cache_dir, perms)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_cache_file(cache_path: &Path, cache: &Cache) -> Result<(), anyhow::Error> {
|
||||
let tmp_path = cache_path.with_extension("tmp");
|
||||
fs::write(&tmp_path, serde_json::to_vec_pretty(cache)?)?;
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let mut perms = fs::metadata(&tmp_path)?.permissions();
|
||||
perms.set_mode(0o600);
|
||||
fs::set_permissions(&tmp_path, perms)?;
|
||||
}
|
||||
fs::rename(&tmp_path, cache_path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn parse_cache(remote: &str, data: &str) -> Result<Cache, anyhow::Error> {
|
||||
serde_json::from_str::<Cache>(data).map_err(|e| {
|
||||
anyhow!(
|
||||
"Cache for `{}` is invalid or outdated ({}). Run `mdrs login {}` to refresh it.",
|
||||
remote,
|
||||
e,
|
||||
remote
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn load_cache_from_dir(remote: &str, config_dir: &Path) -> Result<Cache, anyhow::Error> {
|
||||
let cache_path = cache_file_path_in(config_dir, remote);
|
||||
let snapshot = match read_cache_snapshot(&cache_path) {
|
||||
Ok(snapshot) => snapshot,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
|
||||
invalidate_cached_entry(config_dir, remote);
|
||||
bail!(
|
||||
"Not logged in to `{}`. Run `mdrs login {}` first.",
|
||||
remote,
|
||||
remote
|
||||
);
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
if let Some(cache) = cached_entry(config_dir, remote, &snapshot) {
|
||||
return Ok(cache);
|
||||
}
|
||||
|
||||
let data = match fs::read_to_string(&cache_path) {
|
||||
Ok(data) => data,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
|
||||
invalidate_cached_entry(config_dir, remote);
|
||||
bail!(
|
||||
"Not logged in to `{}`. Run `mdrs login {}` first.",
|
||||
remote,
|
||||
remote
|
||||
);
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
let cache = parse_cache(remote, &data)?;
|
||||
update_cached_entry(config_dir, remote, snapshot, cache.clone());
|
||||
Ok(cache)
|
||||
}
|
||||
|
||||
fn load_cache_if_present_from_dir(
|
||||
remote: &str,
|
||||
config_dir: &Path,
|
||||
) -> Result<Option<Cache>, anyhow::Error> {
|
||||
let cache_path = cache_file_path_in(config_dir, remote);
|
||||
let snapshot = match read_cache_snapshot(&cache_path) {
|
||||
Ok(snapshot) => snapshot,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
|
||||
invalidate_cached_entry(config_dir, remote);
|
||||
return Ok(None);
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
if let Some(cache) = cached_entry(config_dir, remote, &snapshot) {
|
||||
return Ok(Some(cache));
|
||||
}
|
||||
|
||||
let data = match fs::read_to_string(&cache_path) {
|
||||
Ok(data) => data,
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
|
||||
invalidate_cached_entry(config_dir, remote);
|
||||
return Ok(None);
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
let cache = match parse_cache(remote, &data) {
|
||||
Ok(cache) => cache,
|
||||
Err(_) => {
|
||||
remove_cache_in_dir(remote, config_dir)?;
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
update_cached_entry(config_dir, remote, snapshot, cache.clone());
|
||||
Ok(Some(cache))
|
||||
}
|
||||
|
||||
fn persist_cache_in_dir(
|
||||
remote: &str,
|
||||
config_dir: &Path,
|
||||
cache: &Cache,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let cache_dir = cache_dir_path(config_dir);
|
||||
ensure_cache_dir(&cache_dir)?;
|
||||
|
||||
let cache_path = cache_file_path_in(config_dir, remote);
|
||||
write_cache_file(&cache_path, cache)?;
|
||||
|
||||
let snapshot = read_cache_snapshot(&cache_path)?;
|
||||
update_cached_entry(config_dir, remote, snapshot, cache.clone());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn remove_cache_in_dir(remote: &str, config_dir: &Path) -> Result<(), anyhow::Error> {
|
||||
let cache_path = cache_file_path_in(config_dir, remote);
|
||||
match fs::remove_file(&cache_path) {
|
||||
Ok(()) => {}
|
||||
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {}
|
||||
Err(e) => return Err(e.into()),
|
||||
}
|
||||
invalidate_cached_entry(config_dir, remote);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Load cache (low-level, no token refresh)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Load token and laboratories from the login cache file (no token refresh check).
|
||||
pub fn load_cache(remote: &str) -> Result<Cache, anyhow::Error> {
|
||||
load_cache_from_dir(remote, &crate::settings::SETTINGS.config_dirname)
|
||||
}
|
||||
|
||||
/// Persist a cache entry and refresh the in-memory fast path.
|
||||
pub fn persist_cache(remote: &str, cache: &Cache) -> Result<(), anyhow::Error> {
|
||||
persist_cache_in_dir(remote, &crate::settings::SETTINGS.config_dirname, cache)
|
||||
}
|
||||
|
||||
/// Remove a cache entry from disk and memory.
|
||||
pub fn remove_cache(remote: &str) -> Result<(), anyhow::Error> {
|
||||
remove_cache_in_dir(remote, &crate::settings::SETTINGS.config_dirname)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Token-aware cache load with refresh and locking
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Load cache, check token expiry, and refresh the access token if needed.
|
||||
///
|
||||
/// Locking strategy:
|
||||
/// - Per-remote `tokio::sync::Mutex` serializes concurrent async tasks within
|
||||
/// the same process.
|
||||
/// - `flock(LOCK_EX)` on a dedicated `cache/{remote}.lock` file serializes
|
||||
/// the entire read-check-refresh-write cycle across separate processes on
|
||||
/// the same host.
|
||||
pub async fn load_cache_with_token_refresh(remote: &str) -> Result<Cache, anyhow::Error> {
|
||||
let lock = get_remote_lock(remote);
|
||||
let _guard = lock.lock().await;
|
||||
|
||||
ensure_cache_dir(&cache_dir_path(&crate::settings::SETTINGS.config_dirname))?;
|
||||
let lock_path = cache_file_path(remote).with_extension("lock");
|
||||
use fs2::FileExt;
|
||||
let lock_file = fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.open(&lock_path)?;
|
||||
lock_file.lock_exclusive()?;
|
||||
|
||||
// Re-read inside the lock: another process may have already refreshed the
|
||||
// token since we last checked.
|
||||
let result: Result<Cache, anyhow::Error> = async {
|
||||
let mut cache = load_cache(remote)?;
|
||||
|
||||
if crate::token::is_expired(&cache.token.refresh) {
|
||||
bail!(
|
||||
"Session for `{}` has expired. Please run `mdrs login {}` again.",
|
||||
remote,
|
||||
remote
|
||||
);
|
||||
}
|
||||
|
||||
if crate::token::is_refresh_required(&cache.token.access, &cache.token.refresh) {
|
||||
cache = refresh_and_persist(remote, &cache).await?;
|
||||
}
|
||||
|
||||
Ok(cache)
|
||||
}
|
||||
.await;
|
||||
|
||||
lock_file.unlock()?;
|
||||
result
|
||||
}
|
||||
|
||||
async fn load_cache_with_token_refresh_optional_from_dir(
|
||||
remote: &str,
|
||||
config_dir: &Path,
|
||||
) -> Result<Option<Cache>, anyhow::Error> {
|
||||
let lock = get_remote_lock(remote);
|
||||
let _guard = lock.lock().await;
|
||||
|
||||
ensure_cache_dir(&cache_dir_path(config_dir))?;
|
||||
let lock_path = cache_file_path_in(config_dir, remote).with_extension("lock");
|
||||
use fs2::FileExt;
|
||||
let lock_file = fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.open(&lock_path)?;
|
||||
lock_file.lock_exclusive()?;
|
||||
|
||||
let result: Result<Option<Cache>, anyhow::Error> = async {
|
||||
let Some(mut cache) = load_cache_if_present_from_dir(remote, config_dir)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
if crate::token::is_expired(&cache.token.refresh) {
|
||||
remove_cache_in_dir(remote, config_dir)?;
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if crate::token::is_refresh_required(&cache.token.access, &cache.token.refresh) {
|
||||
cache = refresh_and_persist_in_dir(remote, config_dir, &cache).await?;
|
||||
}
|
||||
|
||||
Ok(Some(cache))
|
||||
}
|
||||
.await;
|
||||
|
||||
lock_file.unlock()?;
|
||||
result
|
||||
}
|
||||
|
||||
/// Load cache when present and refresh its token if needed.
|
||||
///
|
||||
/// Unlike `load_cache_with_token_refresh`, this returns `Ok(None)` when the user
|
||||
/// is effectively anonymous: no cache file exists, the cache is invalid, or the
|
||||
/// refresh token has already expired. This mirrors the Python client behavior
|
||||
/// used by read-only commands.
|
||||
pub async fn load_cache_with_token_refresh_optional(
|
||||
remote: &str,
|
||||
) -> Result<Option<Cache>, anyhow::Error> {
|
||||
load_cache_with_token_refresh_optional_from_dir(
|
||||
remote,
|
||||
&crate::settings::SETTINGS.config_dirname,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Call the token-refresh endpoint and write the new access token back to the
|
||||
/// cache file. The caller must already hold the per-remote async mutex.
|
||||
async fn refresh_and_persist(remote: &str, cache: &Cache) -> Result<Cache, anyhow::Error> {
|
||||
refresh_and_persist_in_dir(remote, &crate::settings::SETTINGS.config_dirname, cache).await
|
||||
}
|
||||
|
||||
async fn refresh_and_persist_in_dir(
|
||||
remote: &str,
|
||||
config_dir: &Path,
|
||||
cache: &Cache,
|
||||
) -> Result<Cache, anyhow::Error> {
|
||||
let url = crate::commands::config::get_remote_url(remote)?
|
||||
.ok_or_else(|| anyhow!("Remote `{}` is not configured.", remote))?;
|
||||
let conn = MDRSConnection::new(&url);
|
||||
|
||||
let new_access = conn.token_refresh(&cache.token.refresh).await?;
|
||||
|
||||
let mut updated_cache = cache.clone();
|
||||
updated_cache.token.access = new_access;
|
||||
updated_cache.digest = compute_digest(
|
||||
updated_cache.user.as_ref(),
|
||||
&updated_cache.token.access,
|
||||
&updated_cache.token.refresh,
|
||||
&updated_cache.laboratories,
|
||||
);
|
||||
|
||||
persist_cache_in_dir(remote, config_dir, &updated_cache)?;
|
||||
|
||||
Ok(updated_cache)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Connection helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Create an authenticated `MDRSConnection` for the given remote label.
|
||||
pub fn create_authenticated_conn(
|
||||
remote: &str,
|
||||
cache: &Cache,
|
||||
) -> Result<MDRSConnection, anyhow::Error> {
|
||||
Ok(create_remote_conn(remote)?.with_token(cache.token.access.clone()))
|
||||
}
|
||||
|
||||
/// Create an unauthenticated `MDRSConnection` for the given remote label.
|
||||
pub fn create_remote_conn(remote: &str) -> Result<MDRSConnection, anyhow::Error> {
|
||||
let url = crate::commands::config::get_remote_url(remote)?
|
||||
.ok_or_else(|| anyhow!("Remote `{}` is not configured.", remote))?;
|
||||
Ok(MDRSConnection::new(&url).with_remote(remote))
|
||||
}
|
||||
|
||||
/// Create a connection for read-only commands, attaching a bearer token only
|
||||
/// when a valid login cache is available.
|
||||
pub async fn create_readonly_conn(
|
||||
remote: &str,
|
||||
) -> Result<(MDRSConnection, Option<Cache>), anyhow::Error> {
|
||||
let conn = create_remote_conn(remote)?;
|
||||
match load_cache_with_token_refresh_optional(remote).await? {
|
||||
Some(cache) => Ok((conn.with_token(cache.token.access.clone()), Some(cache))),
|
||||
None => Ok((conn, None)),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tempfile::tempdir;
|
||||
|
||||
fn sample_cache(username: &str) -> Cache {
|
||||
Cache {
|
||||
user: Some(CacheUser {
|
||||
id: 1,
|
||||
username: username.to_string(),
|
||||
laboratory_ids: vec![10, 20],
|
||||
is_reviewer: false,
|
||||
}),
|
||||
token: types::CacheToken {
|
||||
access: format!("access-{username}"),
|
||||
refresh: format!("refresh-{username}"),
|
||||
},
|
||||
laboratories: CacheLabsWrapper {
|
||||
items: vec![CacheLaboratory {
|
||||
id: 10,
|
||||
name: "lab".to_string(),
|
||||
pi_name: String::new(),
|
||||
full_name: "Laboratory".to_string(),
|
||||
}],
|
||||
},
|
||||
digest: format!("digest-{username}"),
|
||||
}
|
||||
}
|
||||
|
||||
fn remote_name(prefix: &str, config_dir: &Path) -> String {
|
||||
format!(
|
||||
"{prefix}-{}",
|
||||
config_dir
|
||||
.file_name()
|
||||
.unwrap_or_default()
|
||||
.to_string_lossy()
|
||||
.replace('.', "_")
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[test]
|
||||
fn load_cache_uses_memory_fast_path_when_snapshot_matches() {
|
||||
let dir = tempdir().unwrap();
|
||||
let remote = remote_name("fast-path", dir.path());
|
||||
let cache = sample_cache("alice");
|
||||
|
||||
persist_cache_in_dir(&remote, dir.path(), &cache).unwrap();
|
||||
let first = load_cache_from_dir(&remote, dir.path()).unwrap();
|
||||
assert_eq!(first.user.unwrap().username, "alice");
|
||||
|
||||
let cache_path = cache_file_path_in(dir.path(), &remote);
|
||||
let mut perms = fs::metadata(&cache_path).unwrap().permissions();
|
||||
perms.set_mode(0o000);
|
||||
fs::set_permissions(&cache_path, perms).unwrap();
|
||||
|
||||
let second = load_cache_from_dir(&remote, dir.path()).unwrap();
|
||||
assert_eq!(second.user.unwrap().username, "alice");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_cache_reloads_when_external_writer_changes_file() {
|
||||
let dir = tempdir().unwrap();
|
||||
let remote = remote_name("reload", dir.path());
|
||||
let original = sample_cache("alice");
|
||||
let updated = sample_cache("bob");
|
||||
|
||||
persist_cache_in_dir(&remote, dir.path(), &original).unwrap();
|
||||
let first = load_cache_from_dir(&remote, dir.path()).unwrap();
|
||||
assert_eq!(first.user.unwrap().username, "alice");
|
||||
|
||||
let cache_dir = cache_dir_path(dir.path());
|
||||
ensure_cache_dir(&cache_dir).unwrap();
|
||||
let cache_path = cache_file_path_in(dir.path(), &remote);
|
||||
write_cache_file(&cache_path, &updated).unwrap();
|
||||
|
||||
let second = load_cache_from_dir(&remote, dir.path()).unwrap();
|
||||
assert_eq!(second.user.unwrap().username, "bob");
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[test]
|
||||
fn persist_cache_refreshes_memory_entry() {
|
||||
let dir = tempdir().unwrap();
|
||||
let remote = remote_name("persist", dir.path());
|
||||
let original = sample_cache("alice");
|
||||
let updated = sample_cache("bob");
|
||||
|
||||
persist_cache_in_dir(&remote, dir.path(), &original).unwrap();
|
||||
let _ = load_cache_from_dir(&remote, dir.path()).unwrap();
|
||||
|
||||
persist_cache_in_dir(&remote, dir.path(), &updated).unwrap();
|
||||
|
||||
let cache_path = cache_file_path_in(dir.path(), &remote);
|
||||
let mut perms = fs::metadata(&cache_path).unwrap().permissions();
|
||||
perms.set_mode(0o000);
|
||||
fs::set_permissions(&cache_path, perms).unwrap();
|
||||
|
||||
let loaded = load_cache_from_dir(&remote, dir.path()).unwrap();
|
||||
assert_eq!(loaded.user.unwrap().username, "bob");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn remove_cache_invalidates_memory_entry() {
|
||||
let dir = tempdir().unwrap();
|
||||
let remote = remote_name("remove", dir.path());
|
||||
let cache = sample_cache("alice");
|
||||
|
||||
persist_cache_in_dir(&remote, dir.path(), &cache).unwrap();
|
||||
let _ = load_cache_from_dir(&remote, dir.path()).unwrap();
|
||||
|
||||
remove_cache_in_dir(&remote, dir.path()).unwrap();
|
||||
|
||||
let err = load_cache_from_dir(&remote, dir.path()).unwrap_err();
|
||||
assert!(
|
||||
err.to_string()
|
||||
.contains(&format!("Not logged in to `{remote}`"))
|
||||
);
|
||||
}
|
||||
|
||||
fn make_jwt_with_exp(exp: i64) -> String {
|
||||
use base64::{Engine, engine::general_purpose::URL_SAFE_NO_PAD};
|
||||
|
||||
let header = URL_SAFE_NO_PAD.encode(r#"{"alg":"none","typ":"JWT"}"#);
|
||||
let payload = URL_SAFE_NO_PAD.encode(format!(r#"{{"exp":{exp}}}"#));
|
||||
format!("{header}.{payload}.")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_cache_if_present_returns_none_when_cache_missing() {
|
||||
let dir = tempdir().unwrap();
|
||||
let remote = remote_name("missing", dir.path());
|
||||
|
||||
let loaded = load_cache_if_present_from_dir(&remote, dir.path()).unwrap();
|
||||
|
||||
assert!(loaded.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_cache_if_present_clears_invalid_cache() {
|
||||
let dir = tempdir().unwrap();
|
||||
let remote = remote_name("invalid", dir.path());
|
||||
let cache_dir = cache_dir_path(dir.path());
|
||||
ensure_cache_dir(&cache_dir).unwrap();
|
||||
let cache_path = cache_file_path_in(dir.path(), &remote);
|
||||
fs::write(&cache_path, b"{invalid json").unwrap();
|
||||
|
||||
let loaded = load_cache_if_present_from_dir(&remote, dir.path()).unwrap();
|
||||
|
||||
assert!(loaded.is_none());
|
||||
assert!(!cache_path.exists());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn optional_cache_load_treats_expired_session_as_anonymous() {
|
||||
let dir = tempdir().unwrap();
|
||||
let remote = remote_name("expired", dir.path());
|
||||
let mut cache = sample_cache("alice");
|
||||
cache.token.access = make_jwt_with_exp(0);
|
||||
cache.token.refresh = make_jwt_with_exp(0);
|
||||
persist_cache_in_dir(&remote, dir.path(), &cache).unwrap();
|
||||
|
||||
let loaded = load_cache_with_token_refresh_optional_from_dir(&remote, dir.path())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert!(loaded.is_none());
|
||||
assert!(!cache_file_path_in(dir.path(), &remote).exists());
|
||||
}
|
||||
}
|
||||
Vendored
+44
@@ -0,0 +1,44 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Access and refresh token pair stored in the login cache file.
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)]
|
||||
pub struct CacheToken {
|
||||
pub access: String,
|
||||
pub refresh: String,
|
||||
}
|
||||
|
||||
/// Minimal user fields stored in the cache, matching Python's `User` dataclass.
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)]
|
||||
pub struct CacheUser {
|
||||
pub id: u32,
|
||||
pub username: String,
|
||||
pub laboratory_ids: Vec<u32>,
|
||||
pub is_reviewer: bool,
|
||||
}
|
||||
|
||||
/// All four laboratory fields needed for digest computation.
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)]
|
||||
pub struct CacheLaboratory {
|
||||
pub id: u32,
|
||||
pub name: String,
|
||||
#[serde(default)]
|
||||
pub pi_name: String,
|
||||
#[serde(default)]
|
||||
pub full_name: String,
|
||||
}
|
||||
|
||||
/// Wrapper matching Python's `Laboratories` serialization: `{"items": [...]}`.
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, Default, PartialEq, Eq)]
|
||||
pub struct CacheLabsWrapper {
|
||||
pub items: Vec<CacheLaboratory>,
|
||||
}
|
||||
|
||||
/// Full login cache, corresponding to the `<remote>.json` file written by `login`.
|
||||
#[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Eq)]
|
||||
pub struct Cache {
|
||||
pub user: Option<CacheUser>,
|
||||
pub token: CacheToken,
|
||||
pub laboratories: CacheLabsWrapper,
|
||||
#[serde(default)]
|
||||
pub digest: String,
|
||||
}
|
||||
+114
@@ -0,0 +1,114 @@
|
||||
use clap::{Parser, Subcommand};
|
||||
|
||||
use crate::commands::config_subcommand::*;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "mdrs")]
|
||||
#[command(about = "MDRS Rust CLI client", long_about = None)]
|
||||
pub struct Cli {
|
||||
#[command(subcommand)]
|
||||
pub command: Commands,
|
||||
}
|
||||
|
||||
#[derive(Subcommand)]
|
||||
pub enum Commands {
|
||||
/// Config management (create, update, list, delete)
|
||||
#[command(subcommand)]
|
||||
Config(ConfigSubcommand),
|
||||
Login {
|
||||
#[arg(short, long)]
|
||||
username: Option<String>,
|
||||
#[arg(short, long)]
|
||||
password: Option<String>,
|
||||
remote: String,
|
||||
},
|
||||
/// Logout and remove cached credentials for a remote
|
||||
Logout {
|
||||
remote: String,
|
||||
},
|
||||
Upload {
|
||||
#[arg(short, long)]
|
||||
recursive: bool,
|
||||
#[arg(short = 's', long)]
|
||||
skip_if_exists: bool,
|
||||
local_path: String,
|
||||
remote_path: String,
|
||||
},
|
||||
Download {
|
||||
#[arg(short, long)]
|
||||
recursive: bool,
|
||||
#[arg(short = 's', long)]
|
||||
skip_if_exists: bool,
|
||||
#[arg(short = 'p', long)]
|
||||
password: Option<String>,
|
||||
#[arg(long)]
|
||||
exclude: Vec<String>,
|
||||
remote_path: String,
|
||||
local_path: String,
|
||||
},
|
||||
Ls {
|
||||
remote_path: String,
|
||||
#[arg(short = 'p', long)]
|
||||
password: Option<String>,
|
||||
#[arg(short = 'J', long = "json")]
|
||||
json: bool,
|
||||
#[arg(short = 'r', long)]
|
||||
recursive: bool,
|
||||
#[arg(short = 'q', long)]
|
||||
quiet: bool,
|
||||
},
|
||||
Whoami {
|
||||
remote: String,
|
||||
},
|
||||
Labs {
|
||||
remote: String,
|
||||
},
|
||||
Chacl {
|
||||
/// Access level key: private, public, pw_open, cbs_open, 5kikan_open,
|
||||
/// cbs_or_pw_open, 5kikan_or_pw_open, storage
|
||||
access_level_key: String,
|
||||
#[arg(short, long)]
|
||||
recursive: bool,
|
||||
#[arg(short = 'p', long)]
|
||||
password: Option<String>,
|
||||
remote_path: String,
|
||||
},
|
||||
Metadata {
|
||||
#[arg(short = 'p', long)]
|
||||
password: Option<String>,
|
||||
remote_path: String,
|
||||
},
|
||||
Mkdir {
|
||||
remote_path: String,
|
||||
},
|
||||
Rm {
|
||||
#[arg(short, long)]
|
||||
recursive: bool,
|
||||
remote_path: String,
|
||||
},
|
||||
Mv {
|
||||
src_path: String,
|
||||
dest_path: String,
|
||||
},
|
||||
Cp {
|
||||
#[arg(short, long)]
|
||||
recursive: bool,
|
||||
src_path: String,
|
||||
dest_path: String,
|
||||
},
|
||||
/// Show metadata for a remote file
|
||||
FileMetadata {
|
||||
#[arg(short = 'p', long)]
|
||||
password: Option<String>,
|
||||
remote_path: String,
|
||||
},
|
||||
/// Show the version of this tool
|
||||
Version,
|
||||
/// Update this binary to the latest release
|
||||
#[command(name = "selfupdate")]
|
||||
SelfUpdate {
|
||||
/// Skip the confirmation prompt
|
||||
#[arg(short = 'y', long)]
|
||||
yes: bool,
|
||||
},
|
||||
}
|
||||
+14
-13
@@ -1,13 +1,13 @@
|
||||
use crate::commands::shared::{
|
||||
create_authenticated_conn, find_folder, find_lab_in_cache, load_cache_with_token_refresh, parse_remote_path,
|
||||
};
|
||||
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
|
||||
use crate::commands::shared::{find_folder, find_lab_in_cache, parse_remote_path};
|
||||
use anyhow::bail;
|
||||
|
||||
pub async fn chacl(
|
||||
remote_path: &str,
|
||||
access_level_key: &str,
|
||||
recursive: bool,
|
||||
password: Option<&str>,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let access_level_id: u32 = match access_level_key {
|
||||
"private" => 0x0001,
|
||||
"public" => 0x0002,
|
||||
@@ -17,7 +17,7 @@ pub async fn chacl(
|
||||
"cbs_or_pw_open" => 0x0104,
|
||||
"5kikan_or_pw_open" => 0x0204,
|
||||
"storage" => 0x0000,
|
||||
_ => return Err(format!("Unknown access level key: '{}'", access_level_key).into()),
|
||||
_ => bail!("Unknown access level key: '{}'", access_level_key),
|
||||
};
|
||||
|
||||
let (remote, labname, folder_path) = parse_remote_path(remote_path)?;
|
||||
@@ -27,7 +27,10 @@ pub async fn chacl(
|
||||
let folder = find_folder(&conn, lab.id, &folder_path, None).await?;
|
||||
|
||||
let mut data = serde_json::Map::new();
|
||||
data.insert("access_level".to_string(), serde_json::json!(access_level_id));
|
||||
data.insert(
|
||||
"access_level".to_string(),
|
||||
serde_json::json!(access_level_id),
|
||||
);
|
||||
if recursive {
|
||||
data.insert("lower".to_string(), serde_json::json!(1));
|
||||
}
|
||||
@@ -35,16 +38,14 @@ pub async fn chacl(
|
||||
data.insert("password".to_string(), serde_json::json!(pw));
|
||||
}
|
||||
let resp = conn
|
||||
.client
|
||||
.post(conn.build_url(&format!("v3/folders/{}/acl/", folder.id)))
|
||||
.headers(conn.prepare_headers())
|
||||
.json(&serde_json::Value::Object(data))
|
||||
.send()
|
||||
.post_json(
|
||||
&format!("v3/folders/{}/acl/", folder.id),
|
||||
&serde_json::Value::Object(data),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !resp.status().is_success() {
|
||||
return Err(format!("ACL change failed: {}", resp.status()).into());
|
||||
bail!("ACL change failed: {}", resp.status());
|
||||
}
|
||||
println!("ACL changed successfully for: {}", remote_path);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
+20
-33
@@ -1,3 +1,4 @@
|
||||
use anyhow::bail;
|
||||
use configparser::ini::Ini;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
@@ -6,7 +7,7 @@ fn config_path() -> PathBuf {
|
||||
crate::settings::SETTINGS.config_dirname.join("config.ini")
|
||||
}
|
||||
|
||||
fn sanitize_config_file(path: &PathBuf) -> Result<(), Box<dyn std::error::Error>> {
|
||||
fn sanitize_config_file(path: &PathBuf) -> Result<(), anyhow::Error> {
|
||||
if !path.exists() {
|
||||
return Ok(());
|
||||
}
|
||||
@@ -28,7 +29,7 @@ fn sanitize_config_file(path: &PathBuf) -> Result<(), Box<dyn std::error::Error>
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn write_ini_atomic(path: &PathBuf, ini: &Ini) -> Result<(), Box<dyn std::error::Error>> {
|
||||
fn write_ini_atomic(path: &PathBuf, ini: &Ini) -> Result<(), anyhow::Error> {
|
||||
let tmp = path.with_extension("tmp");
|
||||
// write to tmp path then rename for atomicity
|
||||
ini.write(&tmp)?;
|
||||
@@ -36,27 +37,27 @@ fn write_ini_atomic(path: &PathBuf, ini: &Ini) -> Result<(), Box<dyn std::error:
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_remote_url(remote: &str) -> Result<Option<String>, Box<dyn std::error::Error>> {
|
||||
pub fn get_remote_url(remote: &str) -> Result<Option<String>, anyhow::Error> {
|
||||
let path = config_path();
|
||||
sanitize_config_file(&path)?;
|
||||
let path_str = path.to_string_lossy().to_string();
|
||||
let mut conf = Ini::new();
|
||||
if path.exists() {
|
||||
let _ = conf.load(&path_str)?;
|
||||
let _ = conf.load(&path_str).map_err(|e| anyhow::anyhow!("{}", e))?;
|
||||
}
|
||||
Ok(conf.get(remote, "url"))
|
||||
}
|
||||
|
||||
pub fn config_create(remote: &str, url: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
pub fn config_create(remote: &str, url: &str) -> Result<(), anyhow::Error> {
|
||||
if !validate_url(url) {
|
||||
return Err("Malformed URL".into());
|
||||
bail!("Malformed URL");
|
||||
}
|
||||
let path = config_path();
|
||||
sanitize_config_file(&path)?;
|
||||
let path_str = path.to_string_lossy().to_string();
|
||||
let mut conf = Ini::new();
|
||||
if path.exists() {
|
||||
let _ = conf.load(&path_str)?;
|
||||
let _ = conf.load(&path_str).map_err(|e| anyhow::anyhow!("{}", e))?;
|
||||
}
|
||||
// check if section exists
|
||||
let section_exists = conf
|
||||
@@ -64,7 +65,7 @@ pub fn config_create(remote: &str, url: &str) -> Result<(), Box<dyn std::error::
|
||||
.map(|m| m.contains_key(remote))
|
||||
.unwrap_or(false);
|
||||
if section_exists {
|
||||
return Err(format!("Remote host `{}` already exists.", remote).into());
|
||||
bail!("Remote host `{}` is already exists.", remote);
|
||||
}
|
||||
// set url
|
||||
conf.set(remote, "url", Some(url.to_string()));
|
||||
@@ -73,20 +74,19 @@ pub fn config_create(remote: &str, url: &str) -> Result<(), Box<dyn std::error::
|
||||
fs::create_dir_all(parent)?;
|
||||
}
|
||||
write_ini_atomic(&path, &conf)?;
|
||||
println!("Created remote host `{}`.", remote);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn config_update(remote: &str, url: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
pub fn config_update(remote: &str, url: &str) -> Result<(), anyhow::Error> {
|
||||
if !validate_url(url) {
|
||||
return Err("Malformed URL".into());
|
||||
bail!("Malformed URL");
|
||||
}
|
||||
let path = config_path();
|
||||
sanitize_config_file(&path)?;
|
||||
let path_str = path.to_string_lossy().to_string();
|
||||
let mut conf = Ini::new();
|
||||
if path.exists() {
|
||||
let _ = conf.load(&path_str)?;
|
||||
let _ = conf.load(&path_str).map_err(|e| anyhow::anyhow!("{}", e))?;
|
||||
}
|
||||
// ensure section exists
|
||||
let section_exists = conf
|
||||
@@ -94,57 +94,45 @@ pub fn config_update(remote: &str, url: &str) -> Result<(), Box<dyn std::error::
|
||||
.map(|m| m.contains_key(remote))
|
||||
.unwrap_or(false);
|
||||
if !section_exists {
|
||||
return Err(format!("Remote host `{}` does not exist.", remote).into());
|
||||
bail!("Remote host `{}` is not exists.", remote);
|
||||
}
|
||||
conf.set(remote, "url", Some(url.to_string()));
|
||||
write_ini_atomic(&path, &conf)?;
|
||||
println!("Updated remote host `{}`.", remote);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn config_list(long: bool) -> Result<(), Box<dyn std::error::Error>> {
|
||||
pub fn config_list() -> Result<(), anyhow::Error> {
|
||||
let path = config_path();
|
||||
if !path.exists() {
|
||||
println!("No config file found at {}", path.to_string_lossy());
|
||||
return Ok(());
|
||||
}
|
||||
sanitize_config_file(&path)?;
|
||||
let path_str = path.to_string_lossy().to_string();
|
||||
let mut conf = Ini::new();
|
||||
let _ = conf.load(&path_str)?;
|
||||
let _ = conf.load(&path_str).map_err(|e| anyhow::anyhow!("{}", e))?;
|
||||
let map = conf.get_map().unwrap_or_default();
|
||||
let mut printed = false;
|
||||
for (sec, props) in map.iter() {
|
||||
if sec == "default" {
|
||||
continue;
|
||||
}
|
||||
if !long {
|
||||
println!("{}", sec);
|
||||
printed = true;
|
||||
} else {
|
||||
let url = props.get("url").and_then(|v| v.clone()).unwrap_or_default();
|
||||
println!("{}:\t{}", sec, url);
|
||||
printed = true;
|
||||
}
|
||||
}
|
||||
if !printed {
|
||||
println!("No remotes configured");
|
||||
let url = props.get("url").and_then(|v| v.clone()).unwrap_or_default();
|
||||
println!("{}:\t{}", sec, url);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn config_delete(remote: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
pub fn config_delete(remote: &str) -> Result<(), anyhow::Error> {
|
||||
let path = config_path();
|
||||
sanitize_config_file(&path)?;
|
||||
let path_str = path.to_string_lossy().to_string();
|
||||
let mut conf = Ini::new();
|
||||
if path.exists() {
|
||||
let _ = conf.load(&path_str)?;
|
||||
let _ = conf.load(&path_str).map_err(|e| anyhow::anyhow!("{}", e))?;
|
||||
}
|
||||
// fallback: reconstruct by removing the section in memory map and writing file
|
||||
let mut map = conf.get_map().unwrap_or_default();
|
||||
if map.remove(remote).is_none() {
|
||||
return Err(format!("Remote host `{}` does not exist.", remote).into());
|
||||
bail!("Remote host `{}` is not exists.", remote);
|
||||
}
|
||||
// build new Ini from map
|
||||
let mut new_ini = Ini::new();
|
||||
@@ -154,7 +142,6 @@ pub fn config_delete(remote: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
}
|
||||
}
|
||||
write_ini_atomic(&path, &new_ini)?;
|
||||
println!("Deleted remote host `{}`.", remote);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -7,8 +7,10 @@ pub enum ConfigSubcommand {
|
||||
/// Update an existing remote host
|
||||
Update(ConfigUpdateArgs),
|
||||
/// List all remote hosts
|
||||
#[command(alias = "ls")]
|
||||
List(ConfigListArgs),
|
||||
/// Delete a remote host
|
||||
#[command(aliases = ["remove", "rm"])]
|
||||
Delete(ConfigDeleteArgs),
|
||||
}
|
||||
|
||||
@@ -25,10 +27,7 @@ pub struct ConfigUpdateArgs {
|
||||
}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
pub struct ConfigListArgs {
|
||||
#[arg(short, long)]
|
||||
pub long: bool,
|
||||
}
|
||||
pub struct ConfigListArgs {}
|
||||
|
||||
#[derive(Args, Debug)]
|
||||
pub struct ConfigDeleteArgs {
|
||||
|
||||
+57
-58
@@ -1,22 +1,20 @@
|
||||
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
|
||||
use crate::commands::shared::{
|
||||
create_authenticated_conn, find_file_by_name, find_folder, find_lab_in_cache, load_cache_with_token_refresh,
|
||||
find_file_by_name, find_folder, find_lab_in_cache, find_subfolder_by_name, nfc,
|
||||
parse_remote_path,
|
||||
};
|
||||
use anyhow::bail;
|
||||
|
||||
pub async fn cp(
|
||||
src_path: &str,
|
||||
dest_path: &str,
|
||||
recursive: bool,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
pub async fn cp(src_path: &str, dest_path: &str, recursive: bool) -> Result<(), anyhow::Error> {
|
||||
let (s_remote, s_lab, s_path) = parse_remote_path(src_path)?;
|
||||
let dest_ends_with_slash = dest_path.ends_with('/');
|
||||
let (d_remote, d_lab, d_path) = parse_remote_path(dest_path)?;
|
||||
|
||||
if s_remote != d_remote {
|
||||
return Err("Source and destination must use the same remote.".into());
|
||||
bail!("Remote host mismatched.");
|
||||
}
|
||||
if s_lab != d_lab {
|
||||
return Err("Source and destination must be in the same laboratory.".into());
|
||||
bail!("Laboratory mismatched.");
|
||||
}
|
||||
|
||||
let cache = load_cache_with_token_refresh(&s_remote).await?;
|
||||
@@ -25,14 +23,16 @@ pub async fn cp(
|
||||
let lab_id = lab.id;
|
||||
|
||||
// Split source path into parent directory and target name
|
||||
let (s_dirname, s_basename) = split_path(&s_path);
|
||||
let (s_dirname, s_basename_raw) = split_path(&s_path);
|
||||
let s_basename = nfc(&s_basename_raw);
|
||||
|
||||
// If dest ends with '/', treat it as a directory and preserve src basename
|
||||
let (d_dirname, d_basename) = if dest_ends_with_slash {
|
||||
let (d_dirname, d_basename_raw) = if dest_ends_with_slash {
|
||||
(d_path.clone(), s_basename.clone())
|
||||
} else {
|
||||
split_path(&d_path)
|
||||
};
|
||||
let d_basename = nfc(&d_basename_raw);
|
||||
|
||||
let s_parent_folder = find_folder(&conn, lab_id, &s_dirname, None).await?;
|
||||
let s_parent_files = conn.list_all_files(&s_parent_folder.id).await?;
|
||||
@@ -44,68 +44,67 @@ pub async fn cp(
|
||||
if let Some(src_file) = find_file_by_name(&s_parent_files, &s_basename) {
|
||||
let src_file_id = src_file.id.clone();
|
||||
if find_file_by_name(&d_parent_files, &d_basename).is_some() {
|
||||
return Err(format!("File `{}` already exists.", d_basename).into());
|
||||
bail!("File `{}` already exists.", d_basename);
|
||||
}
|
||||
if d_parent_folder
|
||||
.sub_folders
|
||||
.iter()
|
||||
.any(|f| f.name.to_lowercase() == d_basename.to_lowercase())
|
||||
{
|
||||
return Err("Cannot overwrite non-folder with folder.".into());
|
||||
if find_subfolder_by_name(&d_parent_folder.sub_folders, &d_basename).is_some() {
|
||||
bail!(
|
||||
"Cannot overwrite non-folder `{}` with folder `{}`.",
|
||||
d_basename,
|
||||
d_path
|
||||
);
|
||||
}
|
||||
// No-op if source and destination are identical
|
||||
if s_parent_folder.id == d_parent_folder.id && d_basename == s_basename {
|
||||
return Ok(());
|
||||
}
|
||||
let body = serde_json::json!({"folder": d_parent_folder.id, "name": d_basename});
|
||||
let resp = conn
|
||||
.client
|
||||
.post(conn.build_url(&format!("v3/files/{}/copy/", src_file_id)))
|
||||
.headers(conn.prepare_headers())
|
||||
.json(&body)
|
||||
.send()
|
||||
.post_json(&format!("v3/files/{}/copy/", src_file_id), &body)
|
||||
.await?;
|
||||
if !resp.status().is_success() {
|
||||
return Err(format!("Copy failed: {}", resp.status()).into());
|
||||
bail!("Copy failed: {}", resp.status());
|
||||
}
|
||||
println!("Copied: {} -> {}", src_path, dest_path);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Try source as a folder
|
||||
if let Some(src_folder) = s_parent_folder
|
||||
.sub_folders
|
||||
.iter()
|
||||
.find(|f| f.name.to_lowercase() == s_basename.to_lowercase())
|
||||
{
|
||||
if !recursive {
|
||||
return Err(
|
||||
format!("{}: is a folder (use -r to copy folders)", src_path).into(),
|
||||
);
|
||||
let src_folder = match find_subfolder_by_name(&s_parent_folder.sub_folders, &s_basename) {
|
||||
Some(f) => f,
|
||||
None => bail!("File or folder `{}` not found.", s_basename),
|
||||
};
|
||||
if !recursive {
|
||||
bail!("Cannot copy `{}`: Is a folder.", s_path);
|
||||
}
|
||||
let src_folder_id = src_folder.id.clone();
|
||||
if find_file_by_name(&d_parent_files, &d_basename).is_some() {
|
||||
bail!(
|
||||
"Cannot overwrite non-folder `{}` with folder `{}`.",
|
||||
d_basename,
|
||||
s_path
|
||||
);
|
||||
}
|
||||
if let Some(d_folder) = find_subfolder_by_name(&d_parent_folder.sub_folders, &d_basename) {
|
||||
if d_folder.id == src_folder_id {
|
||||
bail!("`{}` and `{}` are the same folder.", s_path, s_path);
|
||||
}
|
||||
let src_folder_id = src_folder.id.clone();
|
||||
if find_file_by_name(&d_parent_files, &d_basename).is_some() {
|
||||
return Err(format!("File `{}` already exists.", d_basename).into());
|
||||
}
|
||||
if d_parent_folder
|
||||
.sub_folders
|
||||
.iter()
|
||||
.any(|f| f.name.to_lowercase() == d_basename.to_lowercase())
|
||||
{
|
||||
return Err("Folder not empty.".into());
|
||||
}
|
||||
let body = serde_json::json!({"parent": d_parent_folder.id, "name": d_basename});
|
||||
let resp = conn
|
||||
.client
|
||||
.post(conn.build_url(&format!("v3/folders/{}/copy/", src_folder_id)))
|
||||
.headers(conn.prepare_headers())
|
||||
.json(&body)
|
||||
.send()
|
||||
.await?;
|
||||
if !resp.status().is_success() {
|
||||
return Err(format!("Copy failed: {}", resp.status()).into());
|
||||
}
|
||||
println!("Copied: {} -> {}", src_path, dest_path);
|
||||
bail!(
|
||||
"Cannot move `{}` to `{}`: Folder not empty.",
|
||||
s_path,
|
||||
d_path
|
||||
);
|
||||
}
|
||||
// No-op if source and destination are identical
|
||||
if s_parent_folder.id == d_parent_folder.id && s_basename == d_basename {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Err(format!("Source `{}` not found.", src_path).into())
|
||||
let body = serde_json::json!({"parent": d_parent_folder.id, "name": d_basename});
|
||||
let resp = conn
|
||||
.post_json(&format!("v3/folders/{}/copy/", src_folder_id), &body)
|
||||
.await?;
|
||||
if !resp.status().is_success() {
|
||||
bail!("Copy failed: {}", resp.status());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Split a path into (parent_dir, basename).
|
||||
|
||||
+331
-96
@@ -1,11 +1,13 @@
|
||||
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
|
||||
use crate::commands::shared::{
|
||||
create_authenticated_conn, find_file_by_name, find_folder, find_lab_in_cache, load_cache_with_token_refresh,
|
||||
find_file_by_name, find_folder_limited, find_lab_in_cache, find_subfolder_by_name,
|
||||
parse_remote_path,
|
||||
};
|
||||
use crate::connection::MDRSConnection;
|
||||
use futures::stream::{FuturesUnordered, StreamExt};
|
||||
use std::path::Path;
|
||||
use crate::connection::{ApiRequestLimiter, MDRSConnection};
|
||||
use anyhow::{anyhow, bail};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
pub async fn download(
|
||||
remote_path: &str,
|
||||
@@ -14,19 +16,19 @@ pub async fn download(
|
||||
skip_if_exists: bool,
|
||||
password: Option<&str>,
|
||||
excludes: Vec<String>,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let (remote, labname, r_path) = parse_remote_path(remote_path)?;
|
||||
let cache = load_cache_with_token_refresh(&remote).await?;
|
||||
let conn = Arc::new(create_authenticated_conn(&remote, &cache)?);
|
||||
let limiter = ApiRequestLimiter::new(crate::settings::SETTINGS.concurrent);
|
||||
let lab = find_lab_in_cache(&cache, &labname)?;
|
||||
|
||||
// Validate that local_path is an existing directory (matching Python's behaviour).
|
||||
let local_real = std::fs::canonicalize(local_path)
|
||||
.map_err(|_| format!("Local directory `{}` not found.", local_path))?;
|
||||
.map_err(|_| anyhow!("Local directory `{}` not found.", local_path))?;
|
||||
if !local_real.is_dir() {
|
||||
return Err(format!("Local directory `{}` not found.", local_path).into());
|
||||
bail!("Local directory `{}` not found.", local_path);
|
||||
}
|
||||
let local_dir_base = local_real.to_string_lossy().to_string();
|
||||
|
||||
// Split r_path into the parent directory path and the target basename.
|
||||
// Trailing slashes are already stripped by parse_remote_path, so this is safe.
|
||||
@@ -40,8 +42,11 @@ pub async fn download(
|
||||
None => ("/".to_string(), r_path_clean.to_string()),
|
||||
};
|
||||
|
||||
let parent_folder = find_folder(&conn, lab.id, &parent_path, password).await?;
|
||||
let files = conn.list_all_files(&parent_folder.id).await?;
|
||||
let parent_folder =
|
||||
find_folder_limited(&conn, &limiter, lab.id, &parent_path, password).await?;
|
||||
let files = conn
|
||||
.list_all_files_limited(&parent_folder.id, &limiter)
|
||||
.await?;
|
||||
|
||||
// Case 1: basename matches a file in the parent folder.
|
||||
if let Some(file) = find_file_by_name(&files, &basename) {
|
||||
@@ -49,126 +54,91 @@ pub async fn download(
|
||||
return Ok(());
|
||||
}
|
||||
// Python always places the downloaded file inside the local directory.
|
||||
let dest = format!("{}/{}", local_dir_base, file.name);
|
||||
let dest = local_real.join(&file.name);
|
||||
if skip_if_exists {
|
||||
if Path::new(&dest).exists() {
|
||||
if dest.exists() {
|
||||
if let Ok(meta) = std::fs::metadata(&dest) {
|
||||
if meta.len() == file.size {
|
||||
println!("{}", dest);
|
||||
println!("{}", dest.display());
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let url = make_absolute_url(&conn, &file.download_url);
|
||||
conn.download_file(&url, &dest).await?;
|
||||
println!("{}", dest);
|
||||
conn.download_file_limited(&url, &dest.to_string_lossy(), &limiter)
|
||||
.await?;
|
||||
println!("{}", dest.display());
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Case 2: basename matches a sub-folder.
|
||||
let subfolder = parent_folder
|
||||
.sub_folders
|
||||
.iter()
|
||||
.find(|f| f.name.to_lowercase() == basename.to_lowercase());
|
||||
let subfolder = find_subfolder_by_name(&parent_folder.sub_folders, &basename);
|
||||
if let Some(sub) = subfolder {
|
||||
if !recursive {
|
||||
return Err(format!("Cannot download `{}`: Is a folder.", r_path_clean).into());
|
||||
bail!("Cannot download `{}`: Is a folder.", r_path_clean);
|
||||
}
|
||||
// Python downloads into local_path/<remote_folder_name>/ (not directly into local_path).
|
||||
// We create that subdirectory first, then recurse into it.
|
||||
let top_local = format!("{}/{}", local_dir_base, sub.name);
|
||||
let top_local = local_real.join(&sub.name);
|
||||
|
||||
// Iterative DFS: each entry is (remote_folder_id, local_dir)
|
||||
let mut stack: Vec<(String, String)> = vec![(sub.id.clone(), top_local)];
|
||||
let mut folder_tasks: JoinSet<Result<DownloadFolderTaskResult, anyhow::Error>> =
|
||||
JoinSet::new();
|
||||
let mut download_tasks: JoinSet<Result<(), anyhow::Error>> = JoinSet::new();
|
||||
let mut errors = Vec::new();
|
||||
let excludes = Arc::new(excludes);
|
||||
let lab_name = Arc::new(lab.name.clone());
|
||||
let password = password.map(str::to_string);
|
||||
|
||||
while let Some((folder_id, local_dir)) = stack.pop() {
|
||||
let folder = conn.retrieve_folder(&folder_id).await?;
|
||||
spawn_download_folder_task(
|
||||
&mut folder_tasks,
|
||||
conn.clone(),
|
||||
limiter.clone(),
|
||||
lab_name.clone(),
|
||||
excludes.clone(),
|
||||
sub.id.clone(),
|
||||
top_local,
|
||||
password.clone(),
|
||||
skip_if_exists,
|
||||
);
|
||||
|
||||
if is_excluded(&excludes, &lab.name, &folder.path, None) {
|
||||
continue;
|
||||
}
|
||||
drive_download_tasks(
|
||||
&mut folder_tasks,
|
||||
&mut download_tasks,
|
||||
&mut errors,
|
||||
conn.clone(),
|
||||
limiter,
|
||||
lab_name,
|
||||
excludes,
|
||||
password,
|
||||
skip_if_exists,
|
||||
)
|
||||
.await;
|
||||
|
||||
tokio::fs::create_dir_all(&local_dir).await?;
|
||||
println!("{}", local_dir);
|
||||
|
||||
let dir_files = conn.list_all_files(&folder_id).await?;
|
||||
|
||||
// Download files in this folder (up to 10 concurrent).
|
||||
let mut futs: FuturesUnordered<tokio::task::JoinHandle<()>> =
|
||||
FuturesUnordered::new();
|
||||
for f in &dir_files {
|
||||
if is_excluded(&excludes, &lab.name, &folder.path, Some(&f.name)) {
|
||||
continue;
|
||||
}
|
||||
let dest_path = format!("{}/{}", local_dir, f.name);
|
||||
if skip_if_exists {
|
||||
if Path::new(&dest_path).exists() {
|
||||
if let Ok(meta) = std::fs::metadata(&dest_path) {
|
||||
if meta.len() == f.size {
|
||||
println!("{}", dest_path);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let url = make_absolute_url(&conn, &f.download_url);
|
||||
let conn = conn.clone();
|
||||
let _fname = f.name.clone();
|
||||
futs.push(tokio::spawn(async move {
|
||||
match conn.download_file(&url, &dest_path).await {
|
||||
Ok(_) => println!("{}", dest_path),
|
||||
Err(_) => {
|
||||
eprintln!("Failed: {}", dest_path);
|
||||
if Path::new(&dest_path).is_file() {
|
||||
let _ = std::fs::remove_file(&dest_path);
|
||||
}
|
||||
}
|
||||
}
|
||||
}));
|
||||
if futs.len() >= crate::settings::SETTINGS.concurrent {
|
||||
let _ = futs.next().await;
|
||||
}
|
||||
}
|
||||
while futs.next().await.is_some() {}
|
||||
|
||||
// Push sub-folders onto the stack for recursive processing.
|
||||
for sf in &folder.sub_folders {
|
||||
if sf.lock {
|
||||
match password {
|
||||
Some(pw) => {
|
||||
if conn.folder_auth(&sf.id, pw).await.is_err() {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
None => continue,
|
||||
}
|
||||
}
|
||||
let sub_local = format!("{}/{}", local_dir, sf.name);
|
||||
stack.push((sf.id.clone(), sub_local));
|
||||
}
|
||||
if !errors.is_empty() {
|
||||
bail!(errors.join("\n"));
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Err(format!("File or folder `{}` not found.", r_path_clean).into())
|
||||
Err(anyhow!("File or folder `{}` not found.", r_path_clean))
|
||||
}
|
||||
|
||||
/// Return true if the given lab/folder/file path matches any exclude pattern.
|
||||
/// Constructs: `/{lab_name}{folder_path}{file_name}` lowercased, trailing slash stripped.
|
||||
/// `folder_path` is expected to already start (and end) with "/".
|
||||
fn is_excluded(excludes: &[String], lab_name: &str, folder_path: &str, file_name: Option<&str>) -> bool {
|
||||
fn is_excluded(
|
||||
excludes: &[String],
|
||||
lab_name: &str,
|
||||
folder_path: &str,
|
||||
file_name: Option<&str>,
|
||||
) -> bool {
|
||||
if excludes.is_empty() {
|
||||
return false;
|
||||
}
|
||||
let path = format!(
|
||||
"/{}{}{}",
|
||||
lab_name,
|
||||
folder_path,
|
||||
file_name.unwrap_or("")
|
||||
)
|
||||
.trim_end_matches('/')
|
||||
.to_lowercase();
|
||||
let path = format!("/{}{}{}", lab_name, folder_path, file_name.unwrap_or(""))
|
||||
.trim_end_matches('/')
|
||||
.to_lowercase();
|
||||
excludes.iter().any(|e| e == &path)
|
||||
}
|
||||
|
||||
@@ -185,3 +155,268 @@ fn make_absolute_url(conn: &MDRSConnection, url: &str) -> String {
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
struct DownloadFolderTaskResult {
|
||||
child_folders: Vec<(String, PathBuf)>,
|
||||
download_jobs: Vec<DownloadJob>,
|
||||
}
|
||||
|
||||
struct DownloadJob {
|
||||
url: String,
|
||||
dest_path: PathBuf,
|
||||
}
|
||||
|
||||
fn spawn_download_folder_task(
|
||||
folder_tasks: &mut JoinSet<Result<DownloadFolderTaskResult, anyhow::Error>>,
|
||||
conn: Arc<MDRSConnection>,
|
||||
limiter: ApiRequestLimiter,
|
||||
lab_name: Arc<String>,
|
||||
excludes: Arc<Vec<String>>,
|
||||
folder_id: String,
|
||||
local_dir: PathBuf,
|
||||
password: Option<String>,
|
||||
skip_if_exists: bool,
|
||||
) {
|
||||
folder_tasks.spawn(async move {
|
||||
process_download_folder(
|
||||
conn,
|
||||
limiter,
|
||||
lab_name,
|
||||
excludes,
|
||||
folder_id,
|
||||
local_dir,
|
||||
password,
|
||||
skip_if_exists,
|
||||
)
|
||||
.await
|
||||
});
|
||||
}
|
||||
|
||||
fn spawn_download_task(
|
||||
download_tasks: &mut JoinSet<Result<(), anyhow::Error>>,
|
||||
conn: Arc<MDRSConnection>,
|
||||
limiter: ApiRequestLimiter,
|
||||
job: DownloadJob,
|
||||
) {
|
||||
download_tasks.spawn(async move {
|
||||
let dest_str = job.dest_path.to_string_lossy().to_string();
|
||||
match conn
|
||||
.download_file_limited(&job.url, &dest_str, &limiter)
|
||||
.await
|
||||
{
|
||||
Ok(()) => {
|
||||
println!("{}", job.dest_path.display());
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
if job.dest_path.is_file() {
|
||||
let _ = std::fs::remove_file(&job.dest_path);
|
||||
}
|
||||
Err(anyhow!(
|
||||
"Failed to download {}: {}",
|
||||
job.dest_path.display(),
|
||||
err
|
||||
))
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async fn process_download_folder(
|
||||
conn: Arc<MDRSConnection>,
|
||||
limiter: ApiRequestLimiter,
|
||||
lab_name: Arc<String>,
|
||||
excludes: Arc<Vec<String>>,
|
||||
folder_id: String,
|
||||
local_dir: PathBuf,
|
||||
password: Option<String>,
|
||||
skip_if_exists: bool,
|
||||
) -> Result<DownloadFolderTaskResult, anyhow::Error> {
|
||||
let folder = conn.retrieve_folder_limited(&folder_id, &limiter).await?;
|
||||
|
||||
if is_excluded(excludes.as_slice(), lab_name.as_str(), &folder.path, None) {
|
||||
return Ok(DownloadFolderTaskResult {
|
||||
child_folders: Vec::new(),
|
||||
download_jobs: Vec::new(),
|
||||
});
|
||||
}
|
||||
|
||||
tokio::fs::create_dir_all(&local_dir).await?;
|
||||
println!("{}", local_dir.display());
|
||||
|
||||
let dir_files = conn.list_all_files_limited(&folder_id, &limiter).await?;
|
||||
let mut download_jobs = Vec::new();
|
||||
for file in &dir_files {
|
||||
if is_excluded(
|
||||
excludes.as_slice(),
|
||||
lab_name.as_str(),
|
||||
&folder.path,
|
||||
Some(&file.name),
|
||||
) {
|
||||
continue;
|
||||
}
|
||||
let dest_path = local_dir.join(&file.name);
|
||||
if skip_if_exists && dest_path.exists() {
|
||||
if let Ok(meta) = std::fs::metadata(&dest_path) {
|
||||
if meta.len() == file.size {
|
||||
println!("{}", dest_path.display());
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
download_jobs.push(DownloadJob {
|
||||
url: make_absolute_url(&conn, &file.download_url),
|
||||
dest_path,
|
||||
});
|
||||
}
|
||||
|
||||
let mut child_folder_tasks: JoinSet<Result<Option<(String, PathBuf)>, anyhow::Error>> =
|
||||
JoinSet::new();
|
||||
for sub_folder in folder.sub_folders {
|
||||
let conn = conn.clone();
|
||||
let limiter = limiter.clone();
|
||||
let password = password.clone();
|
||||
let sub_local = local_dir.join(&sub_folder.name);
|
||||
child_folder_tasks.spawn(async move {
|
||||
if sub_folder.lock {
|
||||
match password.as_deref() {
|
||||
Some(pw) => {
|
||||
if conn
|
||||
.folder_auth_limited(&sub_folder.id, pw, &limiter)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
None => return Ok(None),
|
||||
}
|
||||
}
|
||||
Ok(Some((sub_folder.id, sub_local)))
|
||||
});
|
||||
}
|
||||
|
||||
let mut child_folders = Vec::new();
|
||||
while let Some(result) = child_folder_tasks.join_next().await {
|
||||
if let Some(child) = flatten_join_result(result)? {
|
||||
child_folders.push(child);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(DownloadFolderTaskResult {
|
||||
child_folders,
|
||||
download_jobs,
|
||||
})
|
||||
}
|
||||
|
||||
async fn drive_download_tasks(
|
||||
folder_tasks: &mut JoinSet<Result<DownloadFolderTaskResult, anyhow::Error>>,
|
||||
download_tasks: &mut JoinSet<Result<(), anyhow::Error>>,
|
||||
errors: &mut Vec<String>,
|
||||
conn: Arc<MDRSConnection>,
|
||||
limiter: ApiRequestLimiter,
|
||||
lab_name: Arc<String>,
|
||||
excludes: Arc<Vec<String>>,
|
||||
password: Option<String>,
|
||||
skip_if_exists: bool,
|
||||
) {
|
||||
loop {
|
||||
match (folder_tasks.is_empty(), download_tasks.is_empty()) {
|
||||
(true, true) => break,
|
||||
(false, true) => {
|
||||
if let Some(result) = folder_tasks.join_next().await {
|
||||
handle_download_folder_result(
|
||||
result,
|
||||
folder_tasks,
|
||||
download_tasks,
|
||||
errors,
|
||||
conn.clone(),
|
||||
limiter.clone(),
|
||||
lab_name.clone(),
|
||||
excludes.clone(),
|
||||
password.clone(),
|
||||
skip_if_exists,
|
||||
);
|
||||
}
|
||||
}
|
||||
(true, false) => {
|
||||
if let Some(result) = download_tasks.join_next().await {
|
||||
if let Err(err) = flatten_join_result(result) {
|
||||
errors.push(err.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
(false, false) => {
|
||||
tokio::select! {
|
||||
result = folder_tasks.join_next() => {
|
||||
if let Some(result) = result {
|
||||
handle_download_folder_result(
|
||||
result,
|
||||
folder_tasks,
|
||||
download_tasks,
|
||||
errors,
|
||||
conn.clone(),
|
||||
limiter.clone(),
|
||||
lab_name.clone(),
|
||||
excludes.clone(),
|
||||
password.clone(),
|
||||
skip_if_exists,
|
||||
);
|
||||
}
|
||||
}
|
||||
result = download_tasks.join_next() => {
|
||||
if let Some(result) = result {
|
||||
if let Err(err) = flatten_join_result(result) {
|
||||
errors.push(err.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_download_folder_result(
|
||||
result: Result<Result<DownloadFolderTaskResult, anyhow::Error>, tokio::task::JoinError>,
|
||||
folder_tasks: &mut JoinSet<Result<DownloadFolderTaskResult, anyhow::Error>>,
|
||||
download_tasks: &mut JoinSet<Result<(), anyhow::Error>>,
|
||||
errors: &mut Vec<String>,
|
||||
conn: Arc<MDRSConnection>,
|
||||
limiter: ApiRequestLimiter,
|
||||
lab_name: Arc<String>,
|
||||
excludes: Arc<Vec<String>>,
|
||||
password: Option<String>,
|
||||
skip_if_exists: bool,
|
||||
) {
|
||||
match flatten_join_result(result) {
|
||||
Ok(task_result) => {
|
||||
for (folder_id, local_dir) in task_result.child_folders {
|
||||
spawn_download_folder_task(
|
||||
folder_tasks,
|
||||
conn.clone(),
|
||||
limiter.clone(),
|
||||
lab_name.clone(),
|
||||
excludes.clone(),
|
||||
folder_id,
|
||||
local_dir,
|
||||
password.clone(),
|
||||
skip_if_exists,
|
||||
);
|
||||
}
|
||||
for job in task_result.download_jobs {
|
||||
spawn_download_task(download_tasks, conn.clone(), limiter.clone(), job);
|
||||
}
|
||||
}
|
||||
Err(err) => errors.push(err.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
fn flatten_join_result<T>(
|
||||
result: Result<Result<T, anyhow::Error>, tokio::task::JoinError>,
|
||||
) -> Result<T, anyhow::Error> {
|
||||
match result {
|
||||
Ok(inner) => inner,
|
||||
Err(err) => Err(anyhow!("Task join failed: {}", err)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
use crate::commands::shared::{
|
||||
create_authenticated_conn, find_file_by_name, find_folder, find_lab_in_cache, load_cache_with_token_refresh,
|
||||
parse_remote_path,
|
||||
};
|
||||
use crate::cache::create_readonly_conn;
|
||||
use crate::commands::shared::{find_file_by_name, find_folder, find_laboratory, parse_remote_path};
|
||||
use anyhow::anyhow;
|
||||
|
||||
pub async fn file_metadata(remote_path: &str, password: Option<&str>) -> Result<(), Box<dyn std::error::Error>> {
|
||||
pub async fn file_metadata(remote_path: &str, password: Option<&str>) -> Result<(), anyhow::Error> {
|
||||
let (remote, labname, r_path) = parse_remote_path(remote_path)?;
|
||||
|
||||
let cache = load_cache_with_token_refresh(&remote).await?;
|
||||
let conn = create_authenticated_conn(&remote, &cache)?;
|
||||
let lab = find_lab_in_cache(&cache, &labname)?;
|
||||
let (conn, cache) = create_readonly_conn(&remote).await?;
|
||||
let lab = find_laboratory(&conn, cache.as_ref(), &labname).await?;
|
||||
let lab_id = lab.id;
|
||||
|
||||
// Split the file path into parent directory and filename
|
||||
@@ -24,10 +22,10 @@ pub async fn file_metadata(remote_path: &str, password: Option<&str>) -> Result<
|
||||
let files = conn.list_all_files(&parent_folder.id).await?;
|
||||
|
||||
let file = find_file_by_name(&files, &basename)
|
||||
.ok_or_else(|| format!("File `{}` not found.", basename))?;
|
||||
.ok_or_else(|| anyhow!("File `{}` not found.", basename))?;
|
||||
|
||||
let resp = conn.get(&format!("v3/files/{}/metadata/", file.id)).await?;
|
||||
let json: serde_json::Value = resp.json().await?;
|
||||
println!("{}", serde_json::to_string_pretty(&json)?);
|
||||
println!("{}", serde_json::to_string(&json)?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
+36
-49
@@ -1,56 +1,43 @@
|
||||
use crate::connection::MDRSConnection;
|
||||
use crate::cache::create_readonly_conn;
|
||||
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
pub async fn labs(remote: &str) -> Result<(), anyhow::Error> {
|
||||
let (conn, _) = create_readonly_conn(remote).await?;
|
||||
let labs = conn.list_laboratories().await?;
|
||||
|
||||
pub async fn labs(
|
||||
conn: Arc<MDRSConnection>,
|
||||
remote_label: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Try API first
|
||||
match conn.list_laboratories().await {
|
||||
Ok(labs) => {
|
||||
println!("Laboratories:");
|
||||
for lab in labs.items {
|
||||
println!(
|
||||
" {} (PI: {}, Full: {})",
|
||||
lab.name, lab.pi_name, lab.full_name
|
||||
);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
Err(_) => {
|
||||
// fallback to cache
|
||||
}
|
||||
let header = ("Name", "PI", "Laboratory");
|
||||
let mut w_name = header.0.len();
|
||||
let mut w_pi = header.1.len();
|
||||
let mut w_full = header.2.len();
|
||||
|
||||
for lab in &labs.items {
|
||||
w_name = w_name.max(lab.name.len());
|
||||
w_pi = w_pi.max(lab.pi_name.len());
|
||||
w_full = w_full.max(lab.full_name.len());
|
||||
}
|
||||
|
||||
// fallback: read cache file using remote_label
|
||||
let cache_path = crate::settings::SETTINGS
|
||||
.config_dirname
|
||||
.join("cache")
|
||||
.join(format!("{}.json", remote_label));
|
||||
if !cache_path.exists() {
|
||||
println!("No laboratories available (API failed and no cache)");
|
||||
return Ok(());
|
||||
}
|
||||
let text = fs::read_to_string(&cache_path)?;
|
||||
let v: serde_json::Value = serde_json::from_str(&text)?;
|
||||
// Cache stores laboratories as `{"items": [...]}` (Python-compatible format)
|
||||
let labs_arr = v
|
||||
.get("laboratories")
|
||||
.and_then(|l| l.get("items"))
|
||||
.and_then(|a| a.as_array());
|
||||
if let Some(arr) = labs_arr {
|
||||
println!("Laboratories (from cache):");
|
||||
for lab in arr {
|
||||
let name = lab.get("name").and_then(|s| s.as_str()).unwrap_or("");
|
||||
let pi = lab.get("pi_name").and_then(|s| s.as_str()).unwrap_or("");
|
||||
let full = lab.get("full_name").and_then(|s| s.as_str()).unwrap_or("");
|
||||
println!(" {} (PI: {}, Full: {})", name, pi, full);
|
||||
}
|
||||
} else {
|
||||
println!("No laboratories found in cache");
|
||||
println!(
|
||||
"{:<w_name$} {:<w_pi$} {:<w_full$}",
|
||||
header.0,
|
||||
header.1,
|
||||
header.2,
|
||||
w_name = w_name,
|
||||
w_pi = w_pi,
|
||||
w_full = w_full,
|
||||
);
|
||||
let sep_len = w_name + 2 + w_pi + 2 + w_full;
|
||||
println!("{}", "-".repeat(sep_len));
|
||||
|
||||
for lab in &labs.items {
|
||||
println!(
|
||||
"{:<w_name$} {:<w_pi$} {:<w_full$}",
|
||||
lab.name,
|
||||
lab.pi_name,
|
||||
lab.full_name,
|
||||
w_name = w_name,
|
||||
w_pi = w_pi,
|
||||
w_full = w_full,
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
+50
-110
@@ -1,14 +1,35 @@
|
||||
use crate::commands::shared::{CacheLaboratory, CacheLabsWrapper, CacheUser, compute_digest};
|
||||
use crate::cache::{Cache, CacheLabsWrapper, CacheToken, CacheUser, compute_digest, persist_cache};
|
||||
use crate::connection::MDRSConnection;
|
||||
use crate::models::laboratory::Laboratories;
|
||||
use crate::models::user::User;
|
||||
use anyhow::{anyhow, bail};
|
||||
use reqwest::Client;
|
||||
use serde::Deserialize;
|
||||
use serde_json::{Value, json};
|
||||
use std::error::Error;
|
||||
use std::fs;
|
||||
#[cfg(unix)]
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
|
||||
/// Prompt for credentials if not supplied and then perform login.
|
||||
/// This is the entry point called from `main`.
|
||||
pub async fn run_login(
|
||||
username: Option<&str>,
|
||||
password: Option<&str>,
|
||||
remote: &str,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
use std::io::{self, Write};
|
||||
let username_val: String = match username {
|
||||
Some(u) => u.to_string(),
|
||||
None => {
|
||||
print!("Username: ");
|
||||
io::stdout().flush()?;
|
||||
let mut s = String::new();
|
||||
io::stdin().read_line(&mut s)?;
|
||||
s.trim().to_string()
|
||||
}
|
||||
};
|
||||
let password_val: String = match password {
|
||||
Some(p) => p.to_string(),
|
||||
None => rpassword::prompt_password("Password: ")?,
|
||||
};
|
||||
login(&username_val, &password_val, remote).await
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct TokenResp {
|
||||
@@ -16,40 +37,10 @@ struct TokenResp {
|
||||
refresh: String,
|
||||
}
|
||||
|
||||
/// Convert an API `User` into a `CacheUser` (same fields, different type).
|
||||
fn to_cache_user(u: &User) -> CacheUser {
|
||||
CacheUser {
|
||||
id: u.id,
|
||||
username: u.username.clone(),
|
||||
laboratory_ids: u.laboratory_ids.clone(),
|
||||
is_reviewer: u.is_reviewer,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert API `Laboratories` into `CacheLabsWrapper` (all four fields already present).
|
||||
fn to_cache_labs(labs: &Laboratories) -> CacheLabsWrapper {
|
||||
CacheLabsWrapper {
|
||||
items: labs
|
||||
.items
|
||||
.iter()
|
||||
.map(|l| CacheLaboratory {
|
||||
id: l.id,
|
||||
name: l.name.clone(),
|
||||
pi_name: l.pi_name.clone(),
|
||||
full_name: l.full_name.clone(),
|
||||
})
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn login(
|
||||
username: &str,
|
||||
password: &str,
|
||||
remote: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
pub async fn login(username: &str, password: &str, remote: &str) -> Result<(), anyhow::Error> {
|
||||
// resolve remote label to URL from config
|
||||
let url_opt = crate::commands::config::get_remote_url(remote)?;
|
||||
let base_url = url_opt.ok_or(format!("Remote host `{}` is not configured", remote))?;
|
||||
let base_url = url_opt.ok_or_else(|| anyhow!("Remote host `{}` is not configured", remote))?;
|
||||
let conn0 = MDRSConnection::new(&base_url);
|
||||
let client = Client::new();
|
||||
let url = conn0.build_url("v3/users/token/");
|
||||
@@ -57,19 +48,12 @@ pub async fn login(
|
||||
let resp_res = client.post(&url).form(¶ms).send().await;
|
||||
let resp = match resp_res {
|
||||
Ok(r) => r,
|
||||
Err(e) => {
|
||||
let src = e.source();
|
||||
return Err(format!(
|
||||
"Login failed sending request to {}: {} (source: {:?})",
|
||||
url, e, src
|
||||
)
|
||||
.into());
|
||||
}
|
||||
Err(e) => bail!("Login failed sending request to {}: {}", url, e),
|
||||
};
|
||||
let status = resp.status();
|
||||
if !status.is_success() {
|
||||
let body = resp.text().await.unwrap_or_default();
|
||||
return Err(format!("Login failed: {} - {}", status, body).into());
|
||||
bail!("Login failed: {} - {}", status, body);
|
||||
}
|
||||
let token: TokenResp = resp.json().await?;
|
||||
|
||||
@@ -80,72 +64,28 @@ pub async fn login(
|
||||
let labs: Laboratories = conn.list_laboratories().await.unwrap_or_default();
|
||||
|
||||
// convert to cache types (all four Laboratory fields required for digest)
|
||||
let cache_user_opt: Option<CacheUser> = user_opt.as_ref().map(to_cache_user);
|
||||
let cache_labs = to_cache_labs(&labs);
|
||||
let cache_user_opt: Option<CacheUser> = user_opt.as_ref().map(|u| u.into());
|
||||
let cache_labs: CacheLabsWrapper = (&labs).into();
|
||||
|
||||
// compute Python-compatible digest
|
||||
let digest = compute_digest(
|
||||
cache_user_opt.as_ref(),
|
||||
&token.access,
|
||||
&token.refresh,
|
||||
&cache_labs,
|
||||
);
|
||||
|
||||
// build the cache JSON — field order matches Python's dataclass layout:
|
||||
// user (id, username, laboratory_ids, is_reviewer)
|
||||
// token (access, refresh)
|
||||
// laboratories (items)
|
||||
// digest
|
||||
let user_val: Value = match &cache_user_opt {
|
||||
Some(u) => json!({
|
||||
"id": u.id,
|
||||
"username": u.username,
|
||||
"laboratory_ids": u.laboratory_ids,
|
||||
"is_reviewer": u.is_reviewer
|
||||
}),
|
||||
None => Value::Null,
|
||||
let cache = Cache {
|
||||
user: cache_user_opt,
|
||||
token: CacheToken {
|
||||
access: token.access,
|
||||
refresh: token.refresh,
|
||||
},
|
||||
laboratories: cache_labs,
|
||||
digest: String::new(),
|
||||
};
|
||||
let labs_items: Vec<Value> = cache_labs
|
||||
.items
|
||||
.iter()
|
||||
.map(|l| {
|
||||
json!({
|
||||
"id": l.id,
|
||||
"name": l.name,
|
||||
"pi_name": l.pi_name,
|
||||
"full_name": l.full_name
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
let obj = json!({
|
||||
"user": user_val,
|
||||
"token": {"access": token.access, "refresh": token.refresh},
|
||||
"laboratories": {"items": labs_items},
|
||||
"digest": digest
|
||||
});
|
||||
let mut cache = cache;
|
||||
cache.digest = compute_digest(
|
||||
cache.user.as_ref(),
|
||||
&cache.token.access,
|
||||
&cache.token.refresh,
|
||||
&cache.laboratories,
|
||||
);
|
||||
persist_cache(remote, &cache)?;
|
||||
|
||||
// write cache file: {config_dirname}/cache/<remote>.json
|
||||
let cache_dir = crate::settings::SETTINGS.config_dirname.join("cache");
|
||||
fs::create_dir_all(&cache_dir)?;
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let mut perms = fs::metadata(&cache_dir)?.permissions();
|
||||
perms.set_mode(0o700);
|
||||
fs::set_permissions(&cache_dir, perms)?;
|
||||
}
|
||||
let cache_file = cache_dir.join(format!("{}.json", remote));
|
||||
let tmp = cache_file.with_extension("tmp");
|
||||
|
||||
fs::write(&tmp, serde_json::to_vec_pretty(&obj)?)?;
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let mut perms = fs::metadata(&tmp)?.permissions();
|
||||
perms.set_mode(0o600);
|
||||
fs::set_permissions(&tmp, perms)?;
|
||||
}
|
||||
fs::rename(&tmp, &cache_file)?;
|
||||
|
||||
println!("Login successful and cached for {}.", remote);
|
||||
println!("Login Successful");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
+2
-14
@@ -1,15 +1,3 @@
|
||||
use std::fs;
|
||||
|
||||
pub fn logout(remote: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let cache_path = crate::settings::SETTINGS
|
||||
.config_dirname
|
||||
.join("cache")
|
||||
.join(format!("{}.json", remote));
|
||||
if cache_path.exists() {
|
||||
fs::remove_file(&cache_path)?;
|
||||
println!("Logged out from {}", remote);
|
||||
} else {
|
||||
println!("No login cache found for {}", remote);
|
||||
}
|
||||
Ok(())
|
||||
pub fn logout(remote: &str) -> Result<(), anyhow::Error> {
|
||||
crate::cache::remove_cache(remote)
|
||||
}
|
||||
|
||||
+17
-22
@@ -1,11 +1,9 @@
|
||||
use crate::cache::create_readonly_conn;
|
||||
use crate::commands::shared::{find_folder, find_laboratory, fmt_datetime, parse_remote_path};
|
||||
use crate::connection::MDRSConnection;
|
||||
use crate::models::file::File;
|
||||
use crate::models::folder::{FolderDetail, FolderSimple};
|
||||
use crate::commands::shared::{
|
||||
create_authenticated_conn, find_folder, find_lab_in_cache, fmt_datetime,
|
||||
load_cache_with_token_refresh, parse_remote_path,
|
||||
};
|
||||
use crate::connection::MDRSConnection;
|
||||
use serde_json::{json, Value};
|
||||
use serde_json::{Value, json};
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
|
||||
@@ -14,12 +12,11 @@ pub async fn ls(
|
||||
password: Option<&str>,
|
||||
is_json: bool,
|
||||
is_recursive: bool,
|
||||
is_quick: bool,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
is_quiet: bool,
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let (remote, labname, path) = parse_remote_path(remote_path)?;
|
||||
let cache = load_cache_with_token_refresh(&remote).await?;
|
||||
let conn = create_authenticated_conn(&remote, &cache)?;
|
||||
let lab = find_lab_in_cache(&cache, &labname)?;
|
||||
let (conn, cache) = create_readonly_conn(&remote).await?;
|
||||
let lab = find_laboratory(&conn, cache.as_ref(), &labname).await?;
|
||||
|
||||
let folder = find_folder(&conn, lab.id, &path, password).await?;
|
||||
|
||||
@@ -29,7 +26,7 @@ pub async fn ls(
|
||||
} else {
|
||||
build_folder_json_flat(&conn, &folder, &labname).await?
|
||||
};
|
||||
println!("{}", serde_json::to_string_pretty(&output)?);
|
||||
println!("{}", serde_json::to_string(&output)?);
|
||||
} else if is_recursive {
|
||||
let prefix = format!("{}:/{}", remote, labname);
|
||||
ls_plain_recursive(&conn, folder, &labname, &prefix, password).await?;
|
||||
@@ -44,7 +41,7 @@ pub async fn ls(
|
||||
&files_sorted,
|
||||
folder.access_level_name(),
|
||||
&labname,
|
||||
!is_quick,
|
||||
!is_quiet,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -151,7 +148,7 @@ fn ls_plain_recursive<'a>(
|
||||
labname: &'a str,
|
||||
prefix: &'a str,
|
||||
password: Option<&'a str>,
|
||||
) -> Pin<Box<dyn Future<Output = Result<(), Box<dyn std::error::Error>>> + 'a>> {
|
||||
) -> Pin<Box<dyn Future<Output = Result<(), anyhow::Error>> + 'a>> {
|
||||
Box::pin(async move {
|
||||
let files = conn.list_all_files(&folder.id).await?;
|
||||
let total_size: u64 = files.iter().map(|f| f.size).sum();
|
||||
@@ -167,6 +164,8 @@ fn ls_plain_recursive<'a>(
|
||||
|
||||
print_folder_plain(&sub_folders, &files_sorted, access, labname, false);
|
||||
|
||||
println!();
|
||||
|
||||
for sf in sub_folders {
|
||||
if sf.lock {
|
||||
match password {
|
||||
@@ -197,7 +196,7 @@ fn ls_plain_recursive<'a>(
|
||||
async fn get_folder_metadata(
|
||||
conn: &MDRSConnection,
|
||||
folder_id: &str,
|
||||
) -> Result<Value, Box<dyn std::error::Error>> {
|
||||
) -> Result<Value, anyhow::Error> {
|
||||
let resp = conn
|
||||
.get(&format!("v3/folders/{}/metadata/", folder_id))
|
||||
.await?;
|
||||
@@ -212,11 +211,7 @@ fn file_to_json(f: &File, base_url: &str) -> Value {
|
||||
let download_url = if f.download_url.starts_with("http") {
|
||||
f.download_url.clone()
|
||||
} else {
|
||||
format!(
|
||||
"{}{}",
|
||||
base_url.trim_end_matches('/'),
|
||||
f.download_url
|
||||
)
|
||||
format!("{}{}", base_url.trim_end_matches('/'), f.download_url)
|
||||
};
|
||||
json!({
|
||||
"id": f.id,
|
||||
@@ -251,7 +246,7 @@ async fn build_folder_json_flat(
|
||||
conn: &MDRSConnection,
|
||||
folder: &FolderDetail,
|
||||
labname: &str,
|
||||
) -> Result<Value, Box<dyn std::error::Error>> {
|
||||
) -> Result<Value, anyhow::Error> {
|
||||
let metadata = get_folder_metadata(conn, &folder.id).await?;
|
||||
let files = conn.list_all_files(&folder.id).await?;
|
||||
let files_json: Vec<Value> = files.iter().map(|f| file_to_json(f, &conn.url)).collect();
|
||||
@@ -283,7 +278,7 @@ fn build_folder_json_recursive<'a>(
|
||||
conn: &'a MDRSConnection,
|
||||
folder: FolderDetail,
|
||||
labname: &'a str,
|
||||
) -> Pin<Box<dyn Future<Output = Result<Value, Box<dyn std::error::Error>>> + 'a>> {
|
||||
) -> Pin<Box<dyn Future<Output = Result<Value, anyhow::Error>> + 'a>> {
|
||||
Box::pin(async move {
|
||||
let metadata = get_folder_metadata(conn, &folder.id).await?;
|
||||
let files = conn.list_all_files(&folder.id).await?;
|
||||
|
||||
@@ -1,18 +1,16 @@
|
||||
use crate::commands::shared::{
|
||||
create_authenticated_conn, find_folder, find_lab_in_cache, load_cache_with_token_refresh, parse_remote_path,
|
||||
};
|
||||
use crate::cache::create_readonly_conn;
|
||||
use crate::commands::shared::{find_folder, find_laboratory, parse_remote_path};
|
||||
|
||||
pub async fn metadata(remote_path: &str, password: Option<&str>) -> Result<(), Box<dyn std::error::Error>> {
|
||||
pub async fn metadata(remote_path: &str, password: Option<&str>) -> Result<(), anyhow::Error> {
|
||||
let (remote, labname, folder_path) = parse_remote_path(remote_path)?;
|
||||
let cache = load_cache_with_token_refresh(&remote).await?;
|
||||
let conn = create_authenticated_conn(&remote, &cache)?;
|
||||
let lab = find_lab_in_cache(&cache, &labname)?;
|
||||
let (conn, cache) = create_readonly_conn(&remote).await?;
|
||||
let lab = find_laboratory(&conn, cache.as_ref(), &labname).await?;
|
||||
let folder = find_folder(&conn, lab.id, &folder_path, password).await?;
|
||||
|
||||
let resp = conn
|
||||
.get(&format!("v3/folders/{}/metadata/", folder.id))
|
||||
.await?;
|
||||
let json: serde_json::Value = resp.json().await?;
|
||||
println!("{}", serde_json::to_string_pretty(&json)?);
|
||||
println!("{}", serde_json::to_string(&json)?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
+13
-18
@@ -1,16 +1,18 @@
|
||||
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
|
||||
use crate::commands::shared::{
|
||||
create_authenticated_conn, find_file_by_name, find_folder, find_lab_in_cache, load_cache_with_token_refresh,
|
||||
find_file_by_name, find_folder, find_lab_in_cache, find_subfolder_by_name, nfc,
|
||||
parse_remote_path,
|
||||
};
|
||||
use anyhow::{anyhow, bail};
|
||||
|
||||
pub async fn mkdir(remote_path: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
pub async fn mkdir(remote_path: &str) -> Result<(), anyhow::Error> {
|
||||
let (remote, labname, path) = parse_remote_path(remote_path)?;
|
||||
|
||||
// Split into parent path and new folder name
|
||||
let path = path.trim_end_matches('/');
|
||||
let last_slash = path
|
||||
.rfind('/')
|
||||
.ok_or("Invalid path: cannot determine parent folder")?;
|
||||
.ok_or_else(|| anyhow!("Invalid path: cannot determine parent folder"))?;
|
||||
let parent_path = if last_slash == 0 {
|
||||
"/"
|
||||
} else {
|
||||
@@ -18,7 +20,7 @@ pub async fn mkdir(remote_path: &str) -> Result<(), Box<dyn std::error::Error>>
|
||||
};
|
||||
let new_folder_name = &path[last_slash + 1..];
|
||||
if new_folder_name.is_empty() {
|
||||
return Err("Invalid path: folder name cannot be empty".into());
|
||||
bail!("Invalid path: folder name cannot be empty");
|
||||
}
|
||||
|
||||
let cache = load_cache_with_token_refresh(&remote).await?;
|
||||
@@ -26,26 +28,19 @@ pub async fn mkdir(remote_path: &str) -> Result<(), Box<dyn std::error::Error>>
|
||||
let lab = find_lab_in_cache(&cache, &labname)?;
|
||||
let parent_folder = find_folder(&conn, lab.id, parent_path, None).await?;
|
||||
|
||||
// Check for name conflict in sub-folders
|
||||
if parent_folder
|
||||
.sub_folders
|
||||
.iter()
|
||||
.any(|f| f.name == new_folder_name)
|
||||
{
|
||||
return Err(format!("'{}' already exists as a folder", new_folder_name).into());
|
||||
}
|
||||
// Check for name conflict in files
|
||||
// Check for name conflict in sub-folders or files
|
||||
let files = conn.list_all_files(&parent_folder.id).await?;
|
||||
if find_file_by_name(&files, new_folder_name).is_some() {
|
||||
return Err(format!("'{}' already exists as a file", new_folder_name).into());
|
||||
if find_subfolder_by_name(&parent_folder.sub_folders, new_folder_name).is_some()
|
||||
|| find_file_by_name(&files, new_folder_name).is_some()
|
||||
{
|
||||
bail!("Cannot create folder `{}`: File exists.", path);
|
||||
}
|
||||
|
||||
let resp = conn
|
||||
.create_folder(&parent_folder.id, new_folder_name)
|
||||
.create_folder(&parent_folder.id, &nfc(new_folder_name))
|
||||
.await?;
|
||||
if !resp.status().is_success() {
|
||||
return Err(format!("Failed to create folder: {}", resp.status()).into());
|
||||
bail!("Failed to create folder: {}", resp.status());
|
||||
}
|
||||
println!("Created folder: {}", new_folder_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -14,6 +14,8 @@ pub mod metadata;
|
||||
pub mod mkdir;
|
||||
pub mod mv;
|
||||
pub mod rm;
|
||||
pub mod selfupdate;
|
||||
pub mod shared;
|
||||
pub mod upload;
|
||||
pub mod version;
|
||||
pub mod whoami;
|
||||
|
||||
+54
-49
@@ -1,18 +1,20 @@
|
||||
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
|
||||
use crate::commands::shared::{
|
||||
create_authenticated_conn, find_file_by_name, find_folder, find_lab_in_cache, load_cache_with_token_refresh,
|
||||
find_file_by_name, find_folder, find_lab_in_cache, find_subfolder_by_name, nfc,
|
||||
parse_remote_path,
|
||||
};
|
||||
use anyhow::bail;
|
||||
|
||||
pub async fn mv(src_path: &str, dest_path: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
pub async fn mv(src_path: &str, dest_path: &str) -> Result<(), anyhow::Error> {
|
||||
let (s_remote, s_lab, s_path) = parse_remote_path(src_path)?;
|
||||
let dest_ends_with_slash = dest_path.ends_with('/');
|
||||
let (d_remote, d_lab, d_path) = parse_remote_path(dest_path)?;
|
||||
|
||||
if s_remote != d_remote {
|
||||
return Err("Source and destination must use the same remote.".into());
|
||||
bail!("Remote host mismatched.");
|
||||
}
|
||||
if s_lab != d_lab {
|
||||
return Err("Source and destination must be in the same laboratory.".into());
|
||||
bail!("Laboratory mismatched.");
|
||||
}
|
||||
|
||||
let cache = load_cache_with_token_refresh(&s_remote).await?;
|
||||
@@ -21,14 +23,16 @@ pub async fn mv(src_path: &str, dest_path: &str) -> Result<(), Box<dyn std::erro
|
||||
let lab_id = lab.id;
|
||||
|
||||
// Split source path into parent directory and target name
|
||||
let (s_dirname, s_basename) = split_path(&s_path);
|
||||
let (s_dirname, s_basename_raw) = split_path(&s_path);
|
||||
let s_basename = nfc(&s_basename_raw);
|
||||
|
||||
// If dest ends with '/', treat it as a directory and preserve src basename
|
||||
let (d_dirname, d_basename) = if dest_ends_with_slash {
|
||||
let (d_dirname, d_basename_raw) = if dest_ends_with_slash {
|
||||
(d_path.clone(), s_basename.clone())
|
||||
} else {
|
||||
split_path(&d_path)
|
||||
};
|
||||
let d_basename = nfc(&d_basename_raw);
|
||||
|
||||
let s_parent_folder = find_folder(&conn, lab_id, &s_dirname, None).await?;
|
||||
let s_parent_files = conn.list_all_files(&s_parent_folder.id).await?;
|
||||
@@ -40,63 +44,64 @@ pub async fn mv(src_path: &str, dest_path: &str) -> Result<(), Box<dyn std::erro
|
||||
if let Some(src_file) = find_file_by_name(&s_parent_files, &s_basename) {
|
||||
let src_file_id = src_file.id.clone();
|
||||
if find_file_by_name(&d_parent_files, &d_basename).is_some() {
|
||||
return Err(format!("File `{}` already exists.", d_basename).into());
|
||||
bail!("File `{}` already exists.", d_basename);
|
||||
}
|
||||
if d_parent_folder
|
||||
.sub_folders
|
||||
.iter()
|
||||
.any(|f| f.name.to_lowercase() == d_basename.to_lowercase())
|
||||
{
|
||||
return Err("Cannot overwrite non-folder with folder.".into());
|
||||
if find_subfolder_by_name(&d_parent_folder.sub_folders, &d_basename).is_some() {
|
||||
bail!(
|
||||
"Cannot overwrite non-folder `{}` with folder `{}`.",
|
||||
d_basename,
|
||||
d_path
|
||||
);
|
||||
}
|
||||
// No-op if source and destination are identical
|
||||
if s_parent_folder.id == d_parent_folder.id && d_basename == s_basename {
|
||||
return Ok(());
|
||||
}
|
||||
let body = serde_json::json!({"folder": d_parent_folder.id, "name": d_basename});
|
||||
let resp = conn
|
||||
.client
|
||||
.post(conn.build_url(&format!("v3/files/{}/move/", src_file_id)))
|
||||
.headers(conn.prepare_headers())
|
||||
.json(&body)
|
||||
.send()
|
||||
.post_json(&format!("v3/files/{}/move/", src_file_id), &body)
|
||||
.await?;
|
||||
if !resp.status().is_success() {
|
||||
return Err(format!("Move failed: {}", resp.status()).into());
|
||||
bail!("Move failed: {}", resp.status());
|
||||
}
|
||||
println!("Moved: {} -> {}", src_path, dest_path);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Try source as a folder
|
||||
if let Some(src_folder) = s_parent_folder
|
||||
.sub_folders
|
||||
.iter()
|
||||
.find(|f| f.name.to_lowercase() == s_basename.to_lowercase())
|
||||
{
|
||||
let src_folder_id = src_folder.id.clone();
|
||||
if find_file_by_name(&d_parent_files, &d_basename).is_some() {
|
||||
return Err(format!("File `{}` already exists.", d_basename).into());
|
||||
let src_folder = match find_subfolder_by_name(&s_parent_folder.sub_folders, &s_basename) {
|
||||
Some(f) => f,
|
||||
None => bail!("File or folder `{}` not found.", s_basename),
|
||||
};
|
||||
let src_folder_id = src_folder.id.clone();
|
||||
if find_file_by_name(&d_parent_files, &d_basename).is_some() {
|
||||
bail!(
|
||||
"Cannot overwrite non-folder `{}` with folder `{}`.",
|
||||
d_basename,
|
||||
s_path
|
||||
);
|
||||
}
|
||||
if let Some(d_folder) = find_subfolder_by_name(&d_parent_folder.sub_folders, &d_basename) {
|
||||
if d_folder.id == src_folder_id {
|
||||
bail!("`{}` and `{}` are the same folder.", s_path, s_path);
|
||||
}
|
||||
if d_parent_folder
|
||||
.sub_folders
|
||||
.iter()
|
||||
.any(|f| f.name.to_lowercase() == d_basename.to_lowercase())
|
||||
{
|
||||
return Err("Folder not empty.".into());
|
||||
}
|
||||
let body = serde_json::json!({"parent": d_parent_folder.id, "name": d_basename});
|
||||
let resp = conn
|
||||
.client
|
||||
.post(conn.build_url(&format!("v3/folders/{}/move/", src_folder_id)))
|
||||
.headers(conn.prepare_headers())
|
||||
.json(&body)
|
||||
.send()
|
||||
.await?;
|
||||
if !resp.status().is_success() {
|
||||
return Err(format!("Move failed: {}", resp.status()).into());
|
||||
}
|
||||
println!("Moved: {} -> {}", src_path, dest_path);
|
||||
bail!(
|
||||
"Cannot move `{}` to `{}`: Folder not empty.",
|
||||
s_path,
|
||||
d_path
|
||||
);
|
||||
}
|
||||
// No-op if source and destination are identical
|
||||
if s_parent_folder.id == d_parent_folder.id && s_basename == d_basename {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Err(format!("Source `{}` not found.", src_path).into())
|
||||
let body = serde_json::json!({"parent": d_parent_folder.id, "name": d_basename});
|
||||
let resp = conn
|
||||
.post_json(&format!("v3/folders/{}/move/", src_folder_id), &body)
|
||||
.await?;
|
||||
if !resp.status().is_success() {
|
||||
bail!("Move failed: {}", resp.status());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Split a path into (parent_dir, basename).
|
||||
|
||||
+16
-27
@@ -1,14 +1,15 @@
|
||||
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
|
||||
use crate::commands::shared::{
|
||||
create_authenticated_conn, find_file_by_name, find_folder, find_lab_in_cache, load_cache_with_token_refresh,
|
||||
parse_remote_path,
|
||||
find_file_by_name, find_folder, find_lab_in_cache, find_subfolder_by_name, parse_remote_path,
|
||||
};
|
||||
use anyhow::{anyhow, bail};
|
||||
|
||||
pub async fn rm(remote_path: &str, recursive: bool) -> Result<(), Box<dyn std::error::Error>> {
|
||||
pub async fn rm(remote_path: &str, recursive: bool) -> Result<(), anyhow::Error> {
|
||||
let (remote, labname, path) = parse_remote_path(remote_path)?;
|
||||
|
||||
// Split into parent path and target name
|
||||
let path = path.trim_end_matches('/');
|
||||
let last_slash = path.rfind('/').ok_or("Invalid path")?;
|
||||
let last_slash = path.rfind('/').ok_or_else(|| anyhow!("Invalid path"))?;
|
||||
let parent_path = if last_slash == 0 {
|
||||
"/"
|
||||
} else {
|
||||
@@ -16,7 +17,7 @@ pub async fn rm(remote_path: &str, recursive: bool) -> Result<(), Box<dyn std::e
|
||||
};
|
||||
let target_name = &path[last_slash + 1..];
|
||||
if target_name.is_empty() {
|
||||
return Err("Cannot remove root folder".into());
|
||||
bail!("Cannot remove root folder");
|
||||
}
|
||||
|
||||
let cache = load_cache_with_token_refresh(&remote).await?;
|
||||
@@ -27,41 +28,29 @@ pub async fn rm(remote_path: &str, recursive: bool) -> Result<(), Box<dyn std::e
|
||||
// Check if target is a file
|
||||
let files = conn.list_all_files(&parent_folder.id).await?;
|
||||
if let Some(file) = find_file_by_name(&files, target_name) {
|
||||
let resp = conn
|
||||
.client
|
||||
.delete(conn.build_url(&format!("v3/files/{}/", file.id)))
|
||||
.headers(conn.prepare_headers())
|
||||
.send()
|
||||
.await?;
|
||||
let resp = conn.delete(&format!("v3/files/{}/", file.id)).await?;
|
||||
if !resp.status().is_success() {
|
||||
return Err(format!("Failed to delete file: {}", resp.status()).into());
|
||||
bail!("Failed to delete file: {}", resp.status());
|
||||
}
|
||||
println!("Deleted file: {}", target_name);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Check if target is a sub-folder
|
||||
if let Some(subfolder) = parent_folder
|
||||
.sub_folders
|
||||
.iter()
|
||||
.find(|f| f.name == target_name)
|
||||
{
|
||||
if let Some(subfolder) = find_subfolder_by_name(&parent_folder.sub_folders, target_name) {
|
||||
if !recursive {
|
||||
return Err(format!("'{}': Is a folder", target_name).into());
|
||||
bail!("Cannot remove `{}`: Is a folder.", path);
|
||||
}
|
||||
let resp = conn
|
||||
.client
|
||||
.delete(conn.build_url(&format!("v3/folders/{}/", subfolder.id)))
|
||||
.headers(conn.prepare_headers())
|
||||
.query(&[("recursive", "true")])
|
||||
.send()
|
||||
.delete_with_query(
|
||||
&format!("v3/folders/{}/", subfolder.id),
|
||||
&[("recursive", "true")],
|
||||
)
|
||||
.await?;
|
||||
if !resp.status().is_success() {
|
||||
return Err(format!("Failed to delete folder: {}", resp.status()).into());
|
||||
bail!("Failed to delete folder: {}", resp.status());
|
||||
}
|
||||
println!("Deleted folder: {}", target_name);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Err(format!("'{}': No such file or directory", target_name).into())
|
||||
Err(anyhow!("Cannot remove `{}`: No such file or folder.", path))
|
||||
}
|
||||
|
||||
@@ -0,0 +1,218 @@
|
||||
use anyhow::{anyhow, bail};
|
||||
use reqwest::header::{AUTHORIZATION, USER_AGENT};
|
||||
use serde::Deserialize;
|
||||
use std::env;
|
||||
use std::io::{self, Write};
|
||||
use std::path::Path;
|
||||
|
||||
const GITEA_HOST: &str = "https://git.ni.riken.jp";
|
||||
const REPO_OWNER: &str = "niu";
|
||||
const REPO_NAME: &str = "mdrs-client-rust";
|
||||
|
||||
/// Current build target triple, captured at compile time via build.rs.
|
||||
const BUILD_TARGET: &str = env!("BUILD_TARGET");
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct GiteaRelease {
|
||||
tag_name: String,
|
||||
assets: Vec<GiteaAsset>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct GiteaAsset {
|
||||
name: String,
|
||||
browser_download_url: String,
|
||||
}
|
||||
|
||||
/// Returns true if `latest` is strictly greater than `current` (semver-like comparison).
|
||||
fn is_newer(current: &str, latest: &str) -> bool {
|
||||
let parse = |s: &str| -> Vec<u64> {
|
||||
s.trim_start_matches('v')
|
||||
.split('.')
|
||||
.map(|p| p.parse::<u64>().unwrap_or(0))
|
||||
.collect()
|
||||
};
|
||||
let cur = parse(current);
|
||||
let lat = parse(latest);
|
||||
let len = cur.len().max(lat.len());
|
||||
for i in 0..len {
|
||||
let c = cur.get(i).copied().unwrap_or(0);
|
||||
let l = lat.get(i).copied().unwrap_or(0);
|
||||
if l > c {
|
||||
return true;
|
||||
}
|
||||
if l < c {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// Extract the binary named `bin_name` from a `.tar.gz` archive at `archive_path`
|
||||
/// and write it to `dest_path`.
|
||||
fn extract_from_tar_gz(
|
||||
archive_path: &Path,
|
||||
bin_name: &str,
|
||||
dest_path: &Path,
|
||||
) -> anyhow::Result<()> {
|
||||
use flate2::read::GzDecoder;
|
||||
use tar::Archive;
|
||||
|
||||
let file = std::fs::File::open(archive_path)?;
|
||||
let gz = GzDecoder::new(file);
|
||||
let mut archive = Archive::new(gz);
|
||||
|
||||
for entry in archive.entries()? {
|
||||
let mut entry = entry?;
|
||||
let path = entry.path()?;
|
||||
// Match by file name only (ignore directory prefix in archive).
|
||||
if path.file_name().and_then(|n| n.to_str()) == Some(bin_name) {
|
||||
entry.unpack(dest_path)?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
bail!("Binary '{}' not found in archive", bin_name)
|
||||
}
|
||||
|
||||
/// Extract the binary named `bin_name` from a `.zip` archive at `archive_path`
|
||||
/// and write it to `dest_path`.
|
||||
fn extract_from_zip(archive_path: &Path, bin_name: &str, dest_path: &Path) -> anyhow::Result<()> {
|
||||
use std::io::Read;
|
||||
|
||||
let file = std::fs::File::open(archive_path)?;
|
||||
let mut archive = zip::ZipArchive::new(file)?;
|
||||
|
||||
for i in 0..archive.len() {
|
||||
let mut entry = archive.by_index(i)?;
|
||||
let entry_name = entry.name().to_owned();
|
||||
let file_name = Path::new(&entry_name)
|
||||
.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.unwrap_or("");
|
||||
if file_name == bin_name {
|
||||
let mut buf = Vec::new();
|
||||
entry.read_to_end(&mut buf)?;
|
||||
std::fs::write(dest_path, &buf)?;
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
bail!("Binary '{}' not found in archive", bin_name)
|
||||
}
|
||||
|
||||
pub async fn selfupdate(yes: bool) -> anyhow::Result<()> {
|
||||
let current_version = env!("CARGO_PKG_VERSION");
|
||||
|
||||
println!(
|
||||
"Checking for updates (current version: {current_version}, target: {BUILD_TARGET})..."
|
||||
);
|
||||
|
||||
let api_url = format!("{GITEA_HOST}/api/v1/repos/{REPO_OWNER}/{REPO_NAME}/releases?limit=1");
|
||||
|
||||
let client = reqwest::Client::new();
|
||||
let mut req = client
|
||||
.get(&api_url)
|
||||
.header(USER_AGENT, format!("mdrs/{current_version}"));
|
||||
|
||||
if let Ok(token) = env::var("GITEA_TOKEN") {
|
||||
req = req.header(AUTHORIZATION, format!("Bearer {token}"));
|
||||
}
|
||||
|
||||
let resp = req.send().await?;
|
||||
if !resp.status().is_success() {
|
||||
bail!("Failed to fetch release info: HTTP {}", resp.status());
|
||||
}
|
||||
|
||||
let releases: Vec<GiteaRelease> = resp.json().await?;
|
||||
let release = releases
|
||||
.into_iter()
|
||||
.next()
|
||||
.ok_or_else(|| anyhow!("No releases found"))?;
|
||||
|
||||
let latest_version = release.tag_name.trim_start_matches('v');
|
||||
|
||||
if !is_newer(current_version, latest_version) {
|
||||
println!("Already up-to-date ({current_version}).");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
println!("New version available: {latest_version}");
|
||||
|
||||
// Find the asset matching the current build target.
|
||||
let asset = release
|
||||
.assets
|
||||
.iter()
|
||||
.find(|a| a.name.contains(BUILD_TARGET))
|
||||
.ok_or_else(|| {
|
||||
let names: Vec<&str> = release.assets.iter().map(|a| a.name.as_str()).collect();
|
||||
anyhow!(
|
||||
"No release asset found for target '{BUILD_TARGET}'. \
|
||||
Available assets: {}",
|
||||
names.join(", ")
|
||||
)
|
||||
})?;
|
||||
|
||||
println!("Asset: {}", asset.name);
|
||||
|
||||
if !yes {
|
||||
print!("Update to version {latest_version}? [y/N] ");
|
||||
io::stdout().flush()?;
|
||||
let mut input = String::new();
|
||||
io::stdin().read_line(&mut input)?;
|
||||
if !input.trim().eq_ignore_ascii_case("y") {
|
||||
println!("Update cancelled.");
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// Download the asset to a temporary directory.
|
||||
let tmp_dir = tempfile::Builder::new()
|
||||
.prefix("mdrs-selfupdate-")
|
||||
.tempdir()?;
|
||||
let archive_path = tmp_dir.path().join(&asset.name);
|
||||
|
||||
println!("Downloading {}...", asset.browser_download_url);
|
||||
|
||||
let mut download_req = client
|
||||
.get(&asset.browser_download_url)
|
||||
.header(USER_AGENT, format!("mdrs/{current_version}"));
|
||||
|
||||
if let Ok(token) = env::var("GITEA_TOKEN") {
|
||||
download_req = download_req.header(AUTHORIZATION, format!("Bearer {token}"));
|
||||
}
|
||||
|
||||
let download_resp = download_req.send().await?;
|
||||
if !download_resp.status().is_success() {
|
||||
bail!("Failed to download asset: HTTP {}", download_resp.status());
|
||||
}
|
||||
|
||||
let bytes = download_resp.bytes().await?;
|
||||
std::fs::write(&archive_path, &bytes)?;
|
||||
|
||||
// Extract the binary from the archive.
|
||||
let bin_name = if cfg!(windows) { "mdrs.exe" } else { "mdrs" };
|
||||
let new_bin = tmp_dir.path().join(bin_name);
|
||||
let name = asset.name.as_str();
|
||||
|
||||
if name.ends_with(".tar.gz") || name.ends_with(".tgz") {
|
||||
extract_from_tar_gz(&archive_path, bin_name, &new_bin)?;
|
||||
} else if name.ends_with(".zip") {
|
||||
extract_from_zip(&archive_path, bin_name, &new_bin)?;
|
||||
} else {
|
||||
bail!("Unsupported archive format: {}", asset.name);
|
||||
}
|
||||
|
||||
// Make the extracted binary executable on Unix.
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let mut perms = std::fs::metadata(&new_bin)?.permissions();
|
||||
perms.set_mode(0o755);
|
||||
std::fs::set_permissions(&new_bin, perms)?;
|
||||
}
|
||||
|
||||
// Atomically replace the current executable.
|
||||
self_replace::self_replace(&new_bin)?;
|
||||
|
||||
println!("Successfully updated to version {latest_version}.");
|
||||
Ok(())
|
||||
}
|
||||
+173
-360
@@ -1,357 +1,26 @@
|
||||
use crate::models::file::File;
|
||||
use crate::models::folder::FolderDetail;
|
||||
use crate::cache::{Cache, CacheLaboratory};
|
||||
use crate::connection::ApiRequestLimiter;
|
||||
use crate::connection::MDRSConnection;
|
||||
use serde::Deserialize;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::collections::HashMap;
|
||||
use std::fs;
|
||||
use std::sync::{Arc, LazyLock, Mutex};
|
||||
use crate::models::file::File;
|
||||
use crate::models::folder::{FolderDetail, FolderSimple};
|
||||
use crate::models::laboratory::Laboratory;
|
||||
use anyhow::{anyhow, bail};
|
||||
use unicode_normalization::UnicodeNormalization;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Cache structs — matching Python's cache format exactly
|
||||
// Path helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[derive(Deserialize, Clone)]
|
||||
pub struct CacheToken {
|
||||
pub access: String,
|
||||
pub refresh: String,
|
||||
}
|
||||
|
||||
/// Minimal user fields stored in cache — matches Python's `User` dataclass.
|
||||
#[derive(Deserialize, Clone)]
|
||||
pub struct CacheUser {
|
||||
pub id: u32,
|
||||
pub username: String,
|
||||
pub laboratory_ids: Vec<u32>,
|
||||
pub is_reviewer: bool,
|
||||
}
|
||||
|
||||
/// All four fields of a laboratory, needed for digest computation.
|
||||
#[derive(Deserialize, Clone)]
|
||||
pub struct CacheLaboratory {
|
||||
pub id: u32,
|
||||
pub name: String,
|
||||
#[serde(default)]
|
||||
pub pi_name: String,
|
||||
#[serde(default)]
|
||||
pub full_name: String,
|
||||
}
|
||||
|
||||
/// Wrapper matching Python's `Laboratories` serialization: `{"items": [...]}`.
|
||||
#[derive(Deserialize, Clone, Default)]
|
||||
pub struct CacheLabsWrapper {
|
||||
pub items: Vec<CacheLaboratory>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Clone)]
|
||||
pub struct Cache {
|
||||
pub user: Option<CacheUser>,
|
||||
pub token: CacheToken,
|
||||
pub laboratories: CacheLabsWrapper,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Digest computation — must produce exactly the same hash as Python's
|
||||
// `CacheData.__calc_digest()`:
|
||||
// hashlib.sha256(
|
||||
// json.dumps([user_asdict, token_asdict, labs_asdict]).encode("utf-8")
|
||||
// ).hexdigest()
|
||||
//
|
||||
// Python's default json.dumps uses separators=(', ', ': ') and
|
||||
// ensure_ascii=True. Field order follows dataclass definition order.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Escape a string in Python json.dumps style:
|
||||
/// - Special chars: ", \, and control chars -> standard JSON escapes
|
||||
/// - Non-ASCII chars -> \uXXXX (matches Python ensure_ascii=True default)
|
||||
fn python_json_string(s: &str) -> String {
|
||||
let mut out = String::with_capacity(s.len() + 2);
|
||||
out.push('"');
|
||||
for c in s.chars() {
|
||||
match c {
|
||||
'"' => out.push_str("\\\""),
|
||||
'\\' => out.push_str("\\\\"),
|
||||
'\n' => out.push_str("\\n"),
|
||||
'\r' => out.push_str("\\r"),
|
||||
'\t' => out.push_str("\\t"),
|
||||
c if (c as u32) < 0x20 => {
|
||||
out.push_str(&format!("\\u{:04x}", c as u32));
|
||||
}
|
||||
c if c.is_ascii() => out.push(c),
|
||||
c => {
|
||||
// Non-ASCII: encode as \uXXXX (BMP) or surrogate pair (outside BMP)
|
||||
let code = c as u32;
|
||||
if code <= 0xFFFF {
|
||||
out.push_str(&format!("\\u{:04x}", code));
|
||||
} else {
|
||||
let code = code - 0x10000;
|
||||
let high = 0xD800 + (code >> 10);
|
||||
let low = 0xDC00 + (code & 0x3FF);
|
||||
out.push_str(&format!("\\u{:04x}\\u{:04x}", high, low));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
out.push('"');
|
||||
out
|
||||
}
|
||||
|
||||
/// Serialize a list of u32 as a Python-style JSON array: `[1, 2, 3]`
|
||||
fn python_json_u32_array(items: &[u32]) -> String {
|
||||
if items.is_empty() {
|
||||
return "[]".to_string();
|
||||
}
|
||||
let inner: Vec<String> = items.iter().map(|x| x.to_string()).collect();
|
||||
format!("[{}]", inner.join(", "))
|
||||
}
|
||||
|
||||
/// Build the JSON array string that Python's `__calc_digest` hashes:
|
||||
/// [user_asdict_or_null, token_asdict, labs_asdict]
|
||||
///
|
||||
/// Field order matches each Python dataclass definition:
|
||||
/// User: id, username, laboratory_ids, is_reviewer
|
||||
/// Token: access, refresh
|
||||
/// Laboratories: items
|
||||
/// Laboratory: id, name, pi_name, full_name
|
||||
pub fn python_digest_json(
|
||||
user: Option<&CacheUser>,
|
||||
access: &str,
|
||||
refresh: &str,
|
||||
labs: &CacheLabsWrapper,
|
||||
) -> String {
|
||||
let user_str = match user {
|
||||
None => "null".to_string(),
|
||||
Some(u) => format!(
|
||||
"{{\"id\": {}, \"username\": {}, \"laboratory_ids\": {}, \"is_reviewer\": {}}}",
|
||||
u.id,
|
||||
python_json_string(&u.username),
|
||||
python_json_u32_array(&u.laboratory_ids),
|
||||
if u.is_reviewer { "true" } else { "false" }
|
||||
),
|
||||
};
|
||||
|
||||
let token_str = format!(
|
||||
"{{\"access\": {}, \"refresh\": {}}}",
|
||||
python_json_string(access),
|
||||
python_json_string(refresh)
|
||||
);
|
||||
|
||||
let items: Vec<String> = labs
|
||||
.items
|
||||
.iter()
|
||||
.map(|lab| {
|
||||
format!(
|
||||
"{{\"id\": {}, \"name\": {}, \"pi_name\": {}, \"full_name\": {}}}",
|
||||
lab.id,
|
||||
python_json_string(&lab.name),
|
||||
python_json_string(&lab.pi_name),
|
||||
python_json_string(&lab.full_name)
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let items_str = if items.is_empty() {
|
||||
"[]".to_string()
|
||||
} else {
|
||||
format!("[{}]", items.join(", "))
|
||||
};
|
||||
let labs_str = format!("{{\"items\": {}}}", items_str);
|
||||
|
||||
format!("[{}, {}, {}]", user_str, token_str, labs_str)
|
||||
}
|
||||
|
||||
/// Compute the cache digest compatible with Python's `CacheData.__calc_digest`.
|
||||
pub fn compute_digest(
|
||||
user: Option<&CacheUser>,
|
||||
access: &str,
|
||||
refresh: &str,
|
||||
labs: &CacheLabsWrapper,
|
||||
) -> String {
|
||||
let json_str = python_digest_json(user, access, refresh, labs);
|
||||
let mut hasher = Sha256::new();
|
||||
hasher.update(json_str.as_bytes());
|
||||
format!("{:x}", hasher.finalize())
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Per-remote async mutex map (in-process serialization)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
static REMOTE_LOCKS: LazyLock<Mutex<HashMap<String, Arc<tokio::sync::Mutex<()>>>>> =
|
||||
LazyLock::new(|| Mutex::new(HashMap::new()));
|
||||
|
||||
fn get_remote_lock(remote: &str) -> Arc<tokio::sync::Mutex<()>> {
|
||||
let mut map = REMOTE_LOCKS.lock().unwrap();
|
||||
map.entry(remote.to_string())
|
||||
.or_insert_with(|| Arc::new(tokio::sync::Mutex::new(())))
|
||||
.clone()
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Cache file path helper
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
fn cache_file_path(remote: &str) -> std::path::PathBuf {
|
||||
crate::settings::SETTINGS
|
||||
.config_dirname
|
||||
.join("cache")
|
||||
.join(format!("{}.json", remote))
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Load cache (low-level, no token refresh)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Load token and laboratories from the login cache file (no token refresh check).
|
||||
pub fn load_cache(remote: &str) -> Result<Cache, Box<dyn std::error::Error>> {
|
||||
let cache_path = cache_file_path(remote);
|
||||
if !cache_path.exists() {
|
||||
return Err(format!(
|
||||
"Not logged in to `{}`. Run `mdrs login {}` first.",
|
||||
remote, remote
|
||||
)
|
||||
.into());
|
||||
}
|
||||
let data = fs::read_to_string(&cache_path)?;
|
||||
serde_json::from_str::<Cache>(&data).map_err(|e| {
|
||||
format!(
|
||||
"Cache for `{}` is invalid or outdated ({}). Run `mdrs login {}` to refresh it.",
|
||||
remote, e, remote
|
||||
)
|
||||
.into()
|
||||
})
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Token-aware cache load with refresh and locking
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Load cache, check token expiry, and refresh the access token if needed.
|
||||
///
|
||||
/// Locking strategy:
|
||||
/// - Per-remote `tokio::sync::Mutex` serializes concurrent async tasks within
|
||||
/// the same process.
|
||||
/// - `flock(LOCK_EX)` on the cache file serializes across separate processes
|
||||
/// on the same host.
|
||||
pub async fn load_cache_with_token_refresh(
|
||||
remote: &str,
|
||||
) -> Result<Cache, Box<dyn std::error::Error>> {
|
||||
// Acquire the in-process async mutex for this remote
|
||||
let lock = get_remote_lock(remote);
|
||||
let _guard = lock.lock().await;
|
||||
|
||||
// Re-read the cache inside the lock (another task may have already refreshed)
|
||||
let mut cache = load_cache(remote)?;
|
||||
|
||||
if crate::token::is_expired(&cache.token.refresh) {
|
||||
return Err(format!(
|
||||
"Session for `{}` has expired. Please run `mdrs login {}` again.",
|
||||
remote, remote
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
if crate::token::is_refresh_required(&cache.token.access, &cache.token.refresh) {
|
||||
let new_access = refresh_and_persist(remote, &cache).await?;
|
||||
cache.token.access = new_access;
|
||||
}
|
||||
|
||||
Ok(cache)
|
||||
}
|
||||
|
||||
/// Call the token-refresh endpoint and write the new access token back to the
|
||||
/// cache file. The caller must already hold the per-remote async mutex.
|
||||
/// Also recomputes the digest so Python can verify the cache.
|
||||
async fn refresh_and_persist(
|
||||
remote: &str,
|
||||
cache: &Cache,
|
||||
) -> Result<String, Box<dyn std::error::Error>> {
|
||||
// Build a connection without Bearer token just to reach the refresh endpoint
|
||||
let url = crate::commands::config::get_remote_url(remote)?
|
||||
.ok_or_else(|| format!("Remote `{}` is not configured.", remote))?;
|
||||
let conn = MDRSConnection::new(&url);
|
||||
|
||||
let new_access = conn.token_refresh(&cache.token.refresh).await?;
|
||||
|
||||
// Recompute the digest with the new access token so the Python client
|
||||
// can still verify the cache after a token refresh.
|
||||
let new_digest = compute_digest(
|
||||
cache.user.as_ref(),
|
||||
&new_access,
|
||||
&cache.token.refresh,
|
||||
&cache.laboratories,
|
||||
);
|
||||
|
||||
// Persist the updated access token to the cache file with an exclusive file
|
||||
// lock so that other processes do not read a partially written file.
|
||||
let cache_path = cache_file_path(remote);
|
||||
|
||||
let raw = fs::read_to_string(&cache_path)?;
|
||||
let mut obj: serde_json::Value = serde_json::from_str(&raw)?;
|
||||
|
||||
obj["token"]["access"] = serde_json::Value::String(new_access.clone());
|
||||
obj["digest"] = serde_json::Value::String(new_digest);
|
||||
|
||||
// Write atomically: write to .tmp then rename, while holding an exclusive
|
||||
// flock on the .tmp file for cross-process safety.
|
||||
let tmp_path = cache_path.with_extension("tmp");
|
||||
{
|
||||
use fs2::FileExt;
|
||||
use std::io::Write;
|
||||
let mut tmp_file = fs::OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
.open(&tmp_path)?;
|
||||
tmp_file.lock_exclusive()?;
|
||||
tmp_file.write_all(serde_json::to_string(&obj)?.as_bytes())?;
|
||||
tmp_file.flush()?;
|
||||
tmp_file.unlock()?;
|
||||
}
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let mut perms = fs::metadata(&tmp_path)?.permissions();
|
||||
perms.set_mode(0o600);
|
||||
fs::set_permissions(&tmp_path, perms)?;
|
||||
}
|
||||
fs::rename(&tmp_path, &cache_path)?;
|
||||
|
||||
Ok(new_access)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Connection helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Create an authenticated MDRSConnection for the given remote label
|
||||
pub fn create_authenticated_conn(
|
||||
remote: &str,
|
||||
cache: &Cache,
|
||||
) -> Result<MDRSConnection, Box<dyn std::error::Error>> {
|
||||
let url = crate::commands::config::get_remote_url(remote)?
|
||||
.ok_or_else(|| format!("Remote `{}` is not configured.", remote))?;
|
||||
let mut conn = MDRSConnection::new(&url);
|
||||
conn.token = Some(cache.token.access.clone());
|
||||
Ok(conn)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Path and lab helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Parse "remote:/labname/path/" into (remote, labname, folder_path)
|
||||
pub fn parse_remote_path(
|
||||
remote_path: &str,
|
||||
) -> Result<(String, String, String), Box<dyn std::error::Error>> {
|
||||
/// Parse "remote:/labname/path/" into (remote, labname, folder_path).
|
||||
pub fn parse_remote_path(remote_path: &str) -> Result<(String, String, String), anyhow::Error> {
|
||||
let parts: Vec<&str> = remote_path.splitn(2, ':').collect();
|
||||
if parts.len() != 2 {
|
||||
return Err("remote_path must be in the form 'remote:/labname/path/'".into());
|
||||
bail!("remote_path must be in the form 'remote:/labname/path/'");
|
||||
}
|
||||
let remote = parts[0].to_string();
|
||||
let rest = parts[1];
|
||||
if !rest.starts_with('/') {
|
||||
return Err("Path must be absolute (start with '/')".into());
|
||||
bail!("Path must be absolute (start with '/')");
|
||||
}
|
||||
let folder_parts: Vec<&str> = rest.trim_start_matches('/').splitn(2, '/').collect();
|
||||
let labname = folder_parts[0].to_string();
|
||||
@@ -363,44 +32,89 @@ pub fn parse_remote_path(
|
||||
Ok((remote, labname, path))
|
||||
}
|
||||
|
||||
/// Look up a laboratory by name in the cache
|
||||
// ---------------------------------------------------------------------------
|
||||
// Lab helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Look up a laboratory by name in the cache.
|
||||
pub fn find_lab_in_cache<'a>(
|
||||
cache: &'a Cache,
|
||||
labname: &str,
|
||||
) -> Result<&'a CacheLaboratory, Box<dyn std::error::Error>> {
|
||||
) -> Result<&'a CacheLaboratory, anyhow::Error> {
|
||||
cache
|
||||
.laboratories
|
||||
.items
|
||||
.iter()
|
||||
.find(|l| l.name == labname)
|
||||
.ok_or_else(|| format!("Laboratory `{}` not found.", labname).into())
|
||||
.ok_or_else(|| anyhow!("Laboratory `{}` not found.", labname))
|
||||
}
|
||||
|
||||
/// Resolve a folder by path using the API (GET v3/folders/?path=...&laboratory_id=...)
|
||||
/// Resolve a laboratory by name using cached laboratories when available, and
|
||||
/// falling back to the API when the user is anonymous.
|
||||
pub async fn find_laboratory(
|
||||
conn: &MDRSConnection,
|
||||
cache: Option<&Cache>,
|
||||
labname: &str,
|
||||
) -> Result<Laboratory, anyhow::Error> {
|
||||
if let Some(cache) = cache {
|
||||
if let Ok(lab) = find_lab_in_cache(cache, labname) {
|
||||
return Ok(Laboratory {
|
||||
id: lab.id,
|
||||
name: lab.name.clone(),
|
||||
pi_name: lab.pi_name.clone(),
|
||||
full_name: lab.full_name.clone(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
conn.list_laboratories()
|
||||
.await?
|
||||
.items
|
||||
.into_iter()
|
||||
.find(|lab| lab.name == labname)
|
||||
.ok_or_else(|| anyhow!("Laboratory `{}` not found.", labname))
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Unicode helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Apply Unicode NFC normalization to a string.
|
||||
pub fn nfc(s: &str) -> String {
|
||||
s.chars().nfc().collect()
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Folder / file search helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Resolve a folder by path using the API (GET v3/folders/?path=...&laboratory_id=...).
|
||||
pub async fn find_folder(
|
||||
conn: &MDRSConnection,
|
||||
lab_id: u32,
|
||||
path: &str,
|
||||
password: Option<&str>,
|
||||
) -> Result<FolderDetail, Box<dyn std::error::Error>> {
|
||||
let folders = conn.list_folders_by_path(lab_id, path).await?;
|
||||
) -> Result<FolderDetail, anyhow::Error> {
|
||||
let normalized_path = nfc(path);
|
||||
let folders = conn.list_folders_by_path(lab_id, &normalized_path).await?;
|
||||
if folders.is_empty() {
|
||||
return Err(format!("Folder `{}` not found.", path).into());
|
||||
bail!("Folder `{}` not found.", path);
|
||||
}
|
||||
if folders.len() != 1 {
|
||||
return Err(
|
||||
format!("Ambiguous path `{}`: {} folders matched.", path, folders.len()).into(),
|
||||
bail!(
|
||||
"Ambiguous path `{}`: {} folders matched.",
|
||||
path,
|
||||
folders.len()
|
||||
);
|
||||
}
|
||||
let folder_simple = &folders[0];
|
||||
if folder_simple.lock {
|
||||
match password {
|
||||
None => {
|
||||
return Err(format!(
|
||||
bail!(
|
||||
"Folder `{}` is locked. Use -p/--password to provide a password.",
|
||||
path
|
||||
)
|
||||
.into())
|
||||
);
|
||||
}
|
||||
Some(pw) => conn.folder_auth(&folder_simple.id, pw).await?,
|
||||
}
|
||||
@@ -409,13 +123,73 @@ pub async fn find_folder(
|
||||
Ok(folder)
|
||||
}
|
||||
|
||||
/// Find a file by name (case-insensitive) in a file list
|
||||
pub fn find_file_by_name<'a>(files: &'a [File], name: &str) -> Option<&'a File> {
|
||||
let name_lower = name.to_lowercase();
|
||||
files.iter().find(|f| f.name.to_lowercase() == name_lower)
|
||||
/// Resolve a folder by path while consuming the shared API concurrency budget.
|
||||
pub async fn find_folder_limited(
|
||||
conn: &MDRSConnection,
|
||||
limiter: &ApiRequestLimiter,
|
||||
lab_id: u32,
|
||||
path: &str,
|
||||
password: Option<&str>,
|
||||
) -> Result<FolderDetail, anyhow::Error> {
|
||||
let normalized_path = nfc(path);
|
||||
let folders = conn
|
||||
.list_folders_by_path_limited(lab_id, &normalized_path, limiter)
|
||||
.await?;
|
||||
if folders.is_empty() {
|
||||
bail!("Folder `{}` not found.", path);
|
||||
}
|
||||
if folders.len() != 1 {
|
||||
bail!(
|
||||
"Ambiguous path `{}`: {} folders matched.",
|
||||
path,
|
||||
folders.len()
|
||||
);
|
||||
}
|
||||
let folder_simple = &folders[0];
|
||||
if folder_simple.lock {
|
||||
match password {
|
||||
None => {
|
||||
bail!(
|
||||
"Folder `{}` is locked. Use -p/--password to provide a password.",
|
||||
path
|
||||
);
|
||||
}
|
||||
Some(pw) => {
|
||||
conn.folder_auth_limited(&folder_simple.id, pw, limiter)
|
||||
.await?
|
||||
}
|
||||
}
|
||||
}
|
||||
let folder = conn
|
||||
.retrieve_folder_limited(&folder_simple.id, limiter)
|
||||
.await?;
|
||||
Ok(folder)
|
||||
}
|
||||
|
||||
/// Format an ISO 8601 timestamp as "YYYY/MM/DD HH:MM:SS"
|
||||
/// Find a file by name (NFC-normalized, case-insensitive) in a file list.
|
||||
pub fn find_file_by_name<'a>(files: &'a [File], name: &str) -> Option<&'a File> {
|
||||
let name_lower = nfc(name).to_lowercase();
|
||||
files
|
||||
.iter()
|
||||
.find(|f| nfc(&f.name).to_lowercase() == name_lower)
|
||||
}
|
||||
|
||||
/// Find a sub-folder by name (NFC-normalized, case-insensitive).
|
||||
pub fn find_subfolder_by_name<'a>(
|
||||
subfolders: &'a [FolderSimple],
|
||||
name: &str,
|
||||
) -> Option<&'a FolderSimple> {
|
||||
let name_lower = nfc(name).to_lowercase();
|
||||
subfolders
|
||||
.iter()
|
||||
.find(|f| nfc(&f.name).to_lowercase() == name_lower)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Display helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Format an ISO 8601 timestamp as "YYYY/MM/DD HH:MM:SS".
|
||||
pub fn fmt_datetime(iso: &str) -> String {
|
||||
let s = iso.trim();
|
||||
let s = if let Some(pos) = s[10..].find(|c: char| c == '+' || c == '-') {
|
||||
@@ -431,3 +205,42 @@ pub fn fmt_datetime(iso: &str) -> String {
|
||||
iso.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
#[tokio::test]
|
||||
async fn find_laboratory_falls_back_to_api_without_authorization() {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let addr = listener.local_addr().unwrap();
|
||||
|
||||
let server = tokio::spawn(async move {
|
||||
let (mut stream, _) = listener.accept().await.unwrap();
|
||||
let mut buf = [0u8; 4096];
|
||||
let n = stream.read(&mut buf).await.unwrap();
|
||||
let req = String::from_utf8_lossy(&buf[..n]);
|
||||
|
||||
assert!(req.starts_with("GET /v3/laboratories/ HTTP/1.1"));
|
||||
assert!(!req.contains("\r\nAuthorization: Bearer "));
|
||||
|
||||
let body =
|
||||
r#"[{"id":1,"name":"public-lab","pi_name":"PI","full_name":"Public Laboratory"}]"#;
|
||||
let response = format!(
|
||||
"HTTP/1.1 200 OK\r\ncontent-type: application/json\r\ncontent-length: {}\r\nconnection: close\r\n\r\n{}",
|
||||
body.len(),
|
||||
body
|
||||
);
|
||||
stream.write_all(response.as_bytes()).await.unwrap();
|
||||
});
|
||||
|
||||
let conn = MDRSConnection::new(&format!("http://{addr}"));
|
||||
let lab = find_laboratory(&conn, None, "public-lab").await.unwrap();
|
||||
|
||||
assert_eq!(lab.id, 1);
|
||||
assert_eq!(lab.name, "public-lab");
|
||||
server.await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
+281
-84
@@ -1,35 +1,40 @@
|
||||
use crate::models::folder::FolderSimple;
|
||||
use crate::cache::{create_authenticated_conn, load_cache_with_token_refresh};
|
||||
use crate::commands::shared::{
|
||||
create_authenticated_conn, find_file_by_name, find_folder, find_lab_in_cache, load_cache_with_token_refresh,
|
||||
parse_remote_path,
|
||||
find_file_by_name, find_folder_limited, find_lab_in_cache, nfc, parse_remote_path,
|
||||
};
|
||||
use futures::stream::{FuturesUnordered, StreamExt};
|
||||
use crate::connection::{ApiRequestLimiter, MDRSConnection};
|
||||
use crate::models::folder::FolderSimple;
|
||||
use anyhow::{anyhow, bail};
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use tokio::fs;
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
pub async fn upload(
|
||||
local_path: &str,
|
||||
remote_path: &str,
|
||||
recursive: bool,
|
||||
skip_if_exists: bool,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
) -> Result<(), anyhow::Error> {
|
||||
let (remote, labname, r_path) = parse_remote_path(remote_path)?;
|
||||
let cache = load_cache_with_token_refresh(&remote).await?;
|
||||
let conn = Arc::new(create_authenticated_conn(&remote, &cache)?);
|
||||
let limiter = ApiRequestLimiter::new(crate::settings::SETTINGS.concurrent);
|
||||
let lab = find_lab_in_cache(&cache, &labname)?;
|
||||
let dest_folder = find_folder(&conn, lab.id, &r_path, None).await?;
|
||||
let dest_folder = find_folder_limited(&conn, &limiter, lab.id, &r_path, None).await?;
|
||||
|
||||
// Normalize local_path: resolve to an absolute canonical path so that
|
||||
// trailing slashes and "./" prefixes are handled consistently (matching
|
||||
// Python's os.path.abspath behaviour).
|
||||
let local_abs = std::fs::canonicalize(local_path)
|
||||
.map_err(|_| format!("File or directory `{}` not found.", local_path))?;
|
||||
.map_err(|_| anyhow!("File or directory `{}` not found.", local_path))?;
|
||||
let local = local_abs.as_path();
|
||||
|
||||
if local.is_file() {
|
||||
let filename = local.file_name().unwrap().to_string_lossy().to_string();
|
||||
let remote_files = conn.list_all_files(&dest_folder.id).await?;
|
||||
let remote_files = conn
|
||||
.list_all_files_limited(&dest_folder.id, &limiter)
|
||||
.await?;
|
||||
if skip_if_exists {
|
||||
if let Some(rf) = find_file_by_name(&remote_files, &filename) {
|
||||
if rf.size == std::fs::metadata(local)?.len() {
|
||||
@@ -38,84 +43,59 @@ pub async fn upload(
|
||||
}
|
||||
}
|
||||
}
|
||||
conn.upload_file(&dest_folder.id, &local.to_string_lossy()).await?;
|
||||
conn.upload_file_limited(&dest_folder.id, &local.to_string_lossy(), &limiter)
|
||||
.await?;
|
||||
println!("{}{}", dest_folder.path, filename);
|
||||
} else if local.is_dir() {
|
||||
if !recursive {
|
||||
return Err(format!("Cannot upload `{}`: Is a directory.", local_path).into());
|
||||
bail!("Cannot upload `{}`: Is a directory.", local_path);
|
||||
}
|
||||
// Python always creates a sub-folder named after the local directory inside
|
||||
// remote_path. E.g. `upload ./mydir remote:/lab/path/` creates
|
||||
// `/lab/path/mydir/` on the remote and uploads into that folder.
|
||||
let local_basename = local.file_name().unwrap().to_string_lossy().to_string();
|
||||
let top_remote_id = find_or_create_folder(&conn, &dest_folder.id, &dest_folder.sub_folders, &local_basename).await?;
|
||||
let top_folder = conn.retrieve_folder(&top_remote_id).await?;
|
||||
println!("{}", top_folder.path.trim_end_matches('/'));
|
||||
let top_remote_id = find_or_create_folder(
|
||||
&conn,
|
||||
&limiter,
|
||||
&dest_folder.id,
|
||||
&dest_folder.sub_folders,
|
||||
&local_basename,
|
||||
)
|
||||
.await?;
|
||||
println!(
|
||||
"{}",
|
||||
format!("{}{}", dest_folder.path, local_basename).trim_end_matches('/')
|
||||
);
|
||||
|
||||
// Iterative depth-first walk: each entry is (local_dir, remote_folder_id)
|
||||
let mut stack: Vec<(PathBuf, String)> =
|
||||
vec![(local.to_path_buf(), top_remote_id)];
|
||||
let mut folder_tasks: JoinSet<Result<UploadFolderTaskResult, anyhow::Error>> =
|
||||
JoinSet::new();
|
||||
let mut upload_tasks: JoinSet<Result<(), anyhow::Error>> = JoinSet::new();
|
||||
let mut errors = Vec::new();
|
||||
|
||||
while let Some((local_dir, remote_id)) = stack.pop() {
|
||||
let folder_detail = conn.retrieve_folder(&remote_id).await?;
|
||||
let remote_files = conn.list_all_files(&remote_id).await?;
|
||||
spawn_upload_folder_task(
|
||||
&mut folder_tasks,
|
||||
conn.clone(),
|
||||
limiter.clone(),
|
||||
local.to_path_buf(),
|
||||
top_remote_id,
|
||||
skip_if_exists,
|
||||
);
|
||||
|
||||
let mut entries = fs::read_dir(&local_dir).await?;
|
||||
let mut subdirs: Vec<PathBuf> = Vec::new();
|
||||
let mut files: Vec<PathBuf> = Vec::new();
|
||||
while let Some(entry) = entries.next_entry().await? {
|
||||
let p = entry.path();
|
||||
if p.is_dir() {
|
||||
subdirs.push(p);
|
||||
} else {
|
||||
files.push(p);
|
||||
}
|
||||
}
|
||||
drive_upload_tasks(
|
||||
&mut folder_tasks,
|
||||
&mut upload_tasks,
|
||||
&mut errors,
|
||||
conn.clone(),
|
||||
limiter,
|
||||
skip_if_exists,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Ensure each local sub-directory exists on the remote side
|
||||
for subdir in subdirs {
|
||||
let dirname = subdir.file_name().unwrap().to_string_lossy().to_string();
|
||||
let sub_remote_id = find_or_create_folder(&conn, &remote_id, &folder_detail.sub_folders, &dirname).await?;
|
||||
let sub_folder = conn.retrieve_folder(&sub_remote_id).await?;
|
||||
println!("{}", sub_folder.path.trim_end_matches('/'));
|
||||
stack.push((subdir, sub_remote_id));
|
||||
}
|
||||
|
||||
// Upload files in this directory (up to 10 concurrent)
|
||||
let mut futs: FuturesUnordered<tokio::task::JoinHandle<()>> =
|
||||
FuturesUnordered::new();
|
||||
for file_path in files {
|
||||
let filename = file_path.file_name().unwrap().to_string_lossy().to_string();
|
||||
let file_path_str = file_path.to_string_lossy().to_string();
|
||||
if skip_if_exists {
|
||||
if let Some(rf) = find_file_by_name(&remote_files, &filename) {
|
||||
if let Ok(meta) = std::fs::metadata(&file_path) {
|
||||
if rf.size == meta.len() {
|
||||
let remote_path_prefix = folder_detail.path.clone();
|
||||
println!("{}{}", remote_path_prefix, filename);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let conn = conn.clone();
|
||||
let folder_id = remote_id.clone();
|
||||
let remote_path_prefix = folder_detail.path.clone();
|
||||
let fname = filename.clone();
|
||||
futs.push(tokio::spawn(async move {
|
||||
match conn.upload_file(&folder_id, &file_path_str).await {
|
||||
Ok(_) => println!("{}{}", remote_path_prefix, fname),
|
||||
Err(e) => eprintln!("Error: {}", e),
|
||||
}
|
||||
}));
|
||||
if futs.len() >= crate::settings::SETTINGS.concurrent {
|
||||
let _ = futs.next().await;
|
||||
}
|
||||
}
|
||||
while futs.next().await.is_some() {}
|
||||
if !errors.is_empty() {
|
||||
bail!(errors.join("\n"));
|
||||
}
|
||||
} else {
|
||||
return Err(format!("File or directory `{}` not found.", local_path).into());
|
||||
bail!("File or directory `{}` not found.", local_path);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -123,21 +103,238 @@ pub async fn upload(
|
||||
|
||||
/// Find an existing sub-folder by name or create it, returning its ID.
|
||||
async fn find_or_create_folder(
|
||||
conn: &crate::connection::MDRSConnection,
|
||||
conn: &MDRSConnection,
|
||||
limiter: &ApiRequestLimiter,
|
||||
parent_id: &str,
|
||||
existing: &[FolderSimple],
|
||||
name: &str,
|
||||
) -> Result<String, Box<dyn std::error::Error>> {
|
||||
if let Some(sf) = existing.iter().find(|f| f.name == name) {
|
||||
) -> Result<String, anyhow::Error> {
|
||||
if let Some(sf) = existing
|
||||
.iter()
|
||||
.find(|f| nfc(&f.name).to_lowercase() == nfc(name).to_lowercase())
|
||||
{
|
||||
return Ok(sf.id.clone());
|
||||
}
|
||||
let resp = conn.create_folder(parent_id, name).await?;
|
||||
if !resp.status().is_success() {
|
||||
return Err(format!("Failed to create remote folder: {}", name).into());
|
||||
}
|
||||
let json: serde_json::Value = resp.json().await?;
|
||||
json["id"]
|
||||
.as_str()
|
||||
.ok_or_else(|| format!("No id in create_folder response for {}", name).into())
|
||||
.map(|s| s.to_string())
|
||||
conn.create_folder_id_limited(parent_id, &nfc(name), limiter)
|
||||
.await
|
||||
}
|
||||
|
||||
struct UploadFolderTaskResult {
|
||||
child_folders: Vec<(PathBuf, String)>,
|
||||
upload_jobs: Vec<UploadJob>,
|
||||
}
|
||||
|
||||
struct UploadJob {
|
||||
folder_id: String,
|
||||
file_path: String,
|
||||
remote_path: String,
|
||||
}
|
||||
|
||||
fn spawn_upload_folder_task(
|
||||
folder_tasks: &mut JoinSet<Result<UploadFolderTaskResult, anyhow::Error>>,
|
||||
conn: Arc<MDRSConnection>,
|
||||
limiter: ApiRequestLimiter,
|
||||
local_dir: PathBuf,
|
||||
remote_id: String,
|
||||
skip_if_exists: bool,
|
||||
) {
|
||||
folder_tasks.spawn(async move {
|
||||
process_upload_folder(conn, limiter, local_dir, remote_id, skip_if_exists).await
|
||||
});
|
||||
}
|
||||
|
||||
fn spawn_upload_task(
|
||||
upload_tasks: &mut JoinSet<Result<(), anyhow::Error>>,
|
||||
conn: Arc<MDRSConnection>,
|
||||
limiter: ApiRequestLimiter,
|
||||
job: UploadJob,
|
||||
) {
|
||||
upload_tasks.spawn(async move {
|
||||
conn.upload_file_limited(&job.folder_id, &job.file_path, &limiter)
|
||||
.await?;
|
||||
println!("{}", job.remote_path);
|
||||
Ok(())
|
||||
});
|
||||
}
|
||||
|
||||
async fn process_upload_folder(
|
||||
conn: Arc<MDRSConnection>,
|
||||
limiter: ApiRequestLimiter,
|
||||
local_dir: PathBuf,
|
||||
remote_id: String,
|
||||
skip_if_exists: bool,
|
||||
) -> Result<UploadFolderTaskResult, anyhow::Error> {
|
||||
let (folder_detail, remote_files) = tokio::try_join!(
|
||||
conn.retrieve_folder_limited(&remote_id, &limiter),
|
||||
conn.list_all_files_limited(&remote_id, &limiter),
|
||||
)?;
|
||||
|
||||
let mut entries = fs::read_dir(&local_dir).await?;
|
||||
let mut subdirs: Vec<PathBuf> = Vec::new();
|
||||
let mut files: Vec<PathBuf> = Vec::new();
|
||||
while let Some(entry) = entries.next_entry().await? {
|
||||
let path = entry.path();
|
||||
if path.is_dir() {
|
||||
subdirs.push(path);
|
||||
} else {
|
||||
files.push(path);
|
||||
}
|
||||
}
|
||||
|
||||
let mut subdir_tasks: JoinSet<Result<(PathBuf, String, String), anyhow::Error>> =
|
||||
JoinSet::new();
|
||||
let existing_subfolders = Arc::new(folder_detail.sub_folders.clone());
|
||||
let folder_path_prefix = folder_detail.path.clone();
|
||||
for subdir in subdirs {
|
||||
let conn = conn.clone();
|
||||
let limiter = limiter.clone();
|
||||
let remote_id = remote_id.clone();
|
||||
let existing_subfolders = existing_subfolders.clone();
|
||||
let folder_path_prefix = folder_path_prefix.clone();
|
||||
subdir_tasks.spawn(async move {
|
||||
let dirname = subdir.file_name().unwrap().to_string_lossy().to_string();
|
||||
let sub_remote_id = find_or_create_folder(
|
||||
&conn,
|
||||
&limiter,
|
||||
&remote_id,
|
||||
existing_subfolders.as_slice(),
|
||||
&dirname,
|
||||
)
|
||||
.await?;
|
||||
Ok((
|
||||
subdir,
|
||||
sub_remote_id,
|
||||
format!("{}{}", folder_path_prefix, dirname),
|
||||
))
|
||||
});
|
||||
}
|
||||
|
||||
let mut child_folders = Vec::new();
|
||||
while let Some(result) = subdir_tasks.join_next().await {
|
||||
let (subdir, sub_remote_id, remote_path) = flatten_join_result(result)?;
|
||||
println!("{}", remote_path.trim_end_matches('/'));
|
||||
child_folders.push((subdir, sub_remote_id));
|
||||
}
|
||||
|
||||
let mut upload_jobs = Vec::new();
|
||||
for file_path in files {
|
||||
let filename = file_path.file_name().unwrap().to_string_lossy().to_string();
|
||||
if skip_if_exists {
|
||||
if let Some(rf) = find_file_by_name(&remote_files, &filename) {
|
||||
if let Ok(meta) = std::fs::metadata(&file_path) {
|
||||
if rf.size == meta.len() {
|
||||
println!("{}{}", folder_detail.path, filename);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
upload_jobs.push(UploadJob {
|
||||
folder_id: remote_id.clone(),
|
||||
file_path: file_path.to_string_lossy().to_string(),
|
||||
remote_path: format!("{}{}", folder_detail.path, filename),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(UploadFolderTaskResult {
|
||||
child_folders,
|
||||
upload_jobs,
|
||||
})
|
||||
}
|
||||
|
||||
async fn drive_upload_tasks(
|
||||
folder_tasks: &mut JoinSet<Result<UploadFolderTaskResult, anyhow::Error>>,
|
||||
upload_tasks: &mut JoinSet<Result<(), anyhow::Error>>,
|
||||
errors: &mut Vec<String>,
|
||||
conn: Arc<MDRSConnection>,
|
||||
limiter: ApiRequestLimiter,
|
||||
skip_if_exists: bool,
|
||||
) {
|
||||
loop {
|
||||
match (folder_tasks.is_empty(), upload_tasks.is_empty()) {
|
||||
(true, true) => break,
|
||||
(false, true) => {
|
||||
if let Some(result) = folder_tasks.join_next().await {
|
||||
handle_upload_folder_result(
|
||||
result,
|
||||
folder_tasks,
|
||||
upload_tasks,
|
||||
errors,
|
||||
conn.clone(),
|
||||
limiter.clone(),
|
||||
skip_if_exists,
|
||||
);
|
||||
}
|
||||
}
|
||||
(true, false) => {
|
||||
if let Some(result) = upload_tasks.join_next().await {
|
||||
if let Err(err) = flatten_join_result(result) {
|
||||
errors.push(err.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
(false, false) => {
|
||||
tokio::select! {
|
||||
result = folder_tasks.join_next() => {
|
||||
if let Some(result) = result {
|
||||
handle_upload_folder_result(
|
||||
result,
|
||||
folder_tasks,
|
||||
upload_tasks,
|
||||
errors,
|
||||
conn.clone(),
|
||||
limiter.clone(),
|
||||
skip_if_exists,
|
||||
);
|
||||
}
|
||||
}
|
||||
result = upload_tasks.join_next() => {
|
||||
if let Some(result) = result {
|
||||
if let Err(err) = flatten_join_result(result) {
|
||||
errors.push(err.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_upload_folder_result(
|
||||
result: Result<Result<UploadFolderTaskResult, anyhow::Error>, tokio::task::JoinError>,
|
||||
folder_tasks: &mut JoinSet<Result<UploadFolderTaskResult, anyhow::Error>>,
|
||||
upload_tasks: &mut JoinSet<Result<(), anyhow::Error>>,
|
||||
errors: &mut Vec<String>,
|
||||
conn: Arc<MDRSConnection>,
|
||||
limiter: ApiRequestLimiter,
|
||||
skip_if_exists: bool,
|
||||
) {
|
||||
match flatten_join_result(result) {
|
||||
Ok(task_result) => {
|
||||
for (local_dir, remote_id) in task_result.child_folders {
|
||||
spawn_upload_folder_task(
|
||||
folder_tasks,
|
||||
conn.clone(),
|
||||
limiter.clone(),
|
||||
local_dir,
|
||||
remote_id,
|
||||
skip_if_exists,
|
||||
);
|
||||
}
|
||||
for job in task_result.upload_jobs {
|
||||
spawn_upload_task(upload_tasks, conn.clone(), limiter.clone(), job);
|
||||
}
|
||||
}
|
||||
Err(err) => errors.push(err.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
fn flatten_join_result<T>(
|
||||
result: Result<Result<T, anyhow::Error>, tokio::task::JoinError>,
|
||||
) -> Result<T, anyhow::Error> {
|
||||
match result {
|
||||
Ok(inner) => inner,
|
||||
Err(err) => Err(anyhow!("Task join failed: {}", err)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
pub fn version() {
|
||||
println!("{} {}", env!("CARGO_BIN_NAME"), env!("CARGO_PKG_VERSION"));
|
||||
}
|
||||
+2
-24
@@ -1,27 +1,5 @@
|
||||
use serde::Deserialize;
|
||||
use std::fs;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct CacheUser {
|
||||
username: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct WhoamiCache {
|
||||
user: Option<CacheUser>,
|
||||
}
|
||||
|
||||
pub async fn whoami(remote: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let cache_path = crate::settings::SETTINGS
|
||||
.config_dirname
|
||||
.join("cache")
|
||||
.join(format!("{}.json", remote));
|
||||
if !cache_path.exists() {
|
||||
println!("(Anonymous)");
|
||||
return Ok(());
|
||||
}
|
||||
let data = fs::read_to_string(&cache_path)?;
|
||||
match serde_json::from_str::<WhoamiCache>(&data) {
|
||||
pub async fn whoami(remote: &str) -> Result<(), anyhow::Error> {
|
||||
match crate::cache::load_cache(remote) {
|
||||
Ok(cache) => match cache.user {
|
||||
Some(user) => println!("{}", user.username),
|
||||
None => println!("(Anonymous)"),
|
||||
|
||||
+139
-57
@@ -1,5 +1,8 @@
|
||||
use reqwest::Client;
|
||||
use reqwest::header::{ACCEPT, AUTHORIZATION, HeaderMap, HeaderValue, USER_AGENT};
|
||||
use reqwest::{Client, Response};
|
||||
use serde::Serialize;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::{OwnedSemaphorePermit, Semaphore};
|
||||
|
||||
fn build_user_agent() -> String {
|
||||
let info = os_info::get();
|
||||
@@ -26,21 +29,77 @@ fn build_user_agent() -> String {
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
/// HTTP transport layer for MDRS API calls.
|
||||
pub struct MDRSConnection {
|
||||
pub remote: Option<String>,
|
||||
pub url: String,
|
||||
pub client: Client,
|
||||
pub token: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ApiRequestLimiter {
|
||||
semaphore: Arc<Semaphore>,
|
||||
}
|
||||
|
||||
impl ApiRequestLimiter {
|
||||
pub fn new(limit: usize) -> Self {
|
||||
Self {
|
||||
semaphore: Arc::new(Semaphore::new(limit.max(1))),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn acquire(&self) -> Result<OwnedSemaphorePermit, anyhow::Error> {
|
||||
self.semaphore
|
||||
.clone()
|
||||
.acquire_owned()
|
||||
.await
|
||||
.map_err(|_| anyhow::anyhow!("API request limiter was closed."))
|
||||
}
|
||||
}
|
||||
|
||||
impl MDRSConnection {
|
||||
pub fn new(url: &str) -> Self {
|
||||
MDRSConnection {
|
||||
remote: None,
|
||||
url: url.to_string(),
|
||||
client: Client::new(),
|
||||
token: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_remote(mut self, remote: &str) -> Self {
|
||||
self.remote = Some(remote.to_string());
|
||||
self
|
||||
}
|
||||
|
||||
/// Create a new connection that shares the HTTP client (and its connection
|
||||
/// pool) with the receiver but uses a fresh access token. Useful for
|
||||
/// spawning per-task connections without allocating a new connection pool
|
||||
/// for every concurrent task.
|
||||
///
|
||||
/// `reqwest::Client` wraps an internal `Arc`; cloning it is cheap and
|
||||
/// keeps the shared pool intact.
|
||||
pub fn with_token(&self, access_token: String) -> Self {
|
||||
MDRSConnection {
|
||||
remote: self.remote.clone(),
|
||||
url: self.url.clone(),
|
||||
client: self.client.clone(),
|
||||
token: Some(access_token),
|
||||
}
|
||||
}
|
||||
|
||||
async fn connection_with_fresh_token(&self) -> Result<Self, anyhow::Error> {
|
||||
match (&self.remote, &self.token) {
|
||||
(Some(remote), Some(_)) => {
|
||||
let cache = crate::cache::load_cache_with_token_refresh(remote).await?;
|
||||
Ok(self.with_token(cache.token.access))
|
||||
}
|
||||
_ => Ok(self.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn build_url(&self, path: &str) -> String {
|
||||
format!("{}/{}", self.url.trim_end_matches('/'), path)
|
||||
}
|
||||
@@ -49,89 +108,112 @@ impl MDRSConnection {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(
|
||||
USER_AGENT,
|
||||
HeaderValue::from_str(&build_user_agent()).unwrap(),
|
||||
HeaderValue::from_str(&build_user_agent())
|
||||
.expect("user-agent string contains invalid header characters"),
|
||||
);
|
||||
headers.insert(ACCEPT, HeaderValue::from_static("application/json"));
|
||||
if let Some(token) = &self.token {
|
||||
headers.insert(
|
||||
AUTHORIZATION,
|
||||
HeaderValue::from_str(&format!("Bearer {}", token)).unwrap(),
|
||||
HeaderValue::from_str(&format!("Bearer {}", token))
|
||||
.expect("token contains invalid header characters"),
|
||||
);
|
||||
}
|
||||
headers
|
||||
}
|
||||
|
||||
pub async fn get(&self, path: &str) -> reqwest::Result<reqwest::Response> {
|
||||
self.client
|
||||
.get(self.build_url(path))
|
||||
.headers(self.prepare_headers())
|
||||
pub async fn get(&self, path: &str) -> Result<Response, anyhow::Error> {
|
||||
let conn = self.connection_with_fresh_token().await?;
|
||||
Ok(conn
|
||||
.client
|
||||
.get(conn.build_url(path))
|
||||
.headers(conn.prepare_headers())
|
||||
.send()
|
||||
.await
|
||||
.await?)
|
||||
}
|
||||
|
||||
pub async fn get_with_query<Q>(&self, path: &str, query: &Q) -> Result<Response, anyhow::Error>
|
||||
where
|
||||
Q: Serialize + ?Sized,
|
||||
{
|
||||
let conn = self.connection_with_fresh_token().await?;
|
||||
Ok(conn
|
||||
.client
|
||||
.get(conn.build_url(path))
|
||||
.headers(conn.prepare_headers())
|
||||
.query(query)
|
||||
.send()
|
||||
.await?)
|
||||
}
|
||||
|
||||
pub async fn get_url(&self, url: &str) -> Result<Response, anyhow::Error> {
|
||||
let conn = self.connection_with_fresh_token().await?;
|
||||
Ok(conn
|
||||
.client
|
||||
.get(if url.starts_with("http") {
|
||||
url.to_string()
|
||||
} else {
|
||||
conn.build_url(url)
|
||||
})
|
||||
.headers(conn.prepare_headers())
|
||||
.send()
|
||||
.await?)
|
||||
}
|
||||
|
||||
pub async fn post_json<B>(&self, path: &str, body: &B) -> Result<Response, anyhow::Error>
|
||||
where
|
||||
B: Serialize + ?Sized,
|
||||
{
|
||||
let conn = self.connection_with_fresh_token().await?;
|
||||
Ok(conn
|
||||
.client
|
||||
.post(conn.build_url(path))
|
||||
.headers(conn.prepare_headers())
|
||||
.json(body)
|
||||
.send()
|
||||
.await?)
|
||||
}
|
||||
|
||||
pub async fn post_multipart(
|
||||
&self,
|
||||
path: &str,
|
||||
form: reqwest::multipart::Form,
|
||||
) -> reqwest::Result<reqwest::Response> {
|
||||
self.client
|
||||
.post(self.build_url(path))
|
||||
.headers(self.prepare_headers())
|
||||
) -> Result<Response, anyhow::Error> {
|
||||
let conn = self.connection_with_fresh_token().await?;
|
||||
Ok(conn
|
||||
.client
|
||||
.post(conn.build_url(path))
|
||||
.headers(conn.prepare_headers())
|
||||
.multipart(form)
|
||||
.send()
|
||||
.await
|
||||
.await?)
|
||||
}
|
||||
|
||||
pub async fn download_file(
|
||||
&self,
|
||||
url: &str,
|
||||
dest: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let resp = self
|
||||
pub async fn delete(&self, path: &str) -> Result<Response, anyhow::Error> {
|
||||
let conn = self.connection_with_fresh_token().await?;
|
||||
Ok(conn
|
||||
.client
|
||||
.get(url)
|
||||
.headers(self.prepare_headers())
|
||||
.delete(conn.build_url(path))
|
||||
.headers(conn.prepare_headers())
|
||||
.send()
|
||||
.await?;
|
||||
let bytes = resp.bytes().await?;
|
||||
tokio::fs::write(dest, &bytes).await?;
|
||||
Ok(())
|
||||
.await?)
|
||||
}
|
||||
|
||||
pub async fn create_folder(
|
||||
pub async fn delete_with_query<Q>(
|
||||
&self,
|
||||
parent_id: &str,
|
||||
folder_name: &str,
|
||||
) -> reqwest::Result<reqwest::Response> {
|
||||
let body = serde_json::json!({"parent": parent_id, "name": folder_name});
|
||||
self.client
|
||||
.post(self.build_url("v3/folders/"))
|
||||
.headers(self.prepare_headers())
|
||||
.json(&body)
|
||||
.send()
|
||||
.await
|
||||
}
|
||||
|
||||
/// Authenticate against a password-locked folder (POST v3/folders/{id}/auth/).
|
||||
/// Returns Err if the password is incorrect or the request fails.
|
||||
pub async fn folder_auth(
|
||||
&self,
|
||||
folder_id: &str,
|
||||
password: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error>> {
|
||||
let resp = self
|
||||
path: &str,
|
||||
query: &Q,
|
||||
) -> Result<Response, anyhow::Error>
|
||||
where
|
||||
Q: Serialize + ?Sized,
|
||||
{
|
||||
let conn = self.connection_with_fresh_token().await?;
|
||||
Ok(conn
|
||||
.client
|
||||
.post(self.build_url(&format!("v3/folders/{}/auth/", folder_id)))
|
||||
.headers(self.prepare_headers())
|
||||
.json(&serde_json::json!({"password": password}))
|
||||
.delete(conn.build_url(path))
|
||||
.headers(conn.prepare_headers())
|
||||
.query(query)
|
||||
.send()
|
||||
.await?;
|
||||
if resp.status() == reqwest::StatusCode::UNAUTHORIZED {
|
||||
return Err("Password is incorrect.".into());
|
||||
}
|
||||
if !resp.status().is_success() {
|
||||
return Err(format!("Folder auth failed: {}", resp.status()).into());
|
||||
}
|
||||
Ok(())
|
||||
.await?)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,25 @@
|
||||
/// Print the error message and exit with code 2.
|
||||
/// JSON deserialization errors produce a friendlier message matching Python's
|
||||
/// JSONDecodeError handling.
|
||||
pub fn handle_error(e: anyhow::Error) -> ! {
|
||||
if is_json_error(&*e) {
|
||||
eprintln!(
|
||||
"Unexpected response returned. Please check the configuration or the server's operational status."
|
||||
);
|
||||
} else {
|
||||
eprintln!("Error: {}", e);
|
||||
}
|
||||
std::process::exit(2);
|
||||
}
|
||||
|
||||
/// Walk the error source chain to detect `serde_json` parse errors.
|
||||
fn is_json_error(e: &(dyn std::error::Error + 'static)) -> bool {
|
||||
let mut source: Option<&(dyn std::error::Error + 'static)> = Some(e);
|
||||
while let Some(err) = source {
|
||||
if err.downcast_ref::<serde_json::Error>().is_some() {
|
||||
return true;
|
||||
}
|
||||
source = err.source();
|
||||
}
|
||||
false
|
||||
}
|
||||
+120
-262
@@ -1,202 +1,63 @@
|
||||
pub mod api;
|
||||
mod api;
|
||||
mod cache;
|
||||
mod cli;
|
||||
mod commands;
|
||||
mod connection;
|
||||
mod error;
|
||||
mod models;
|
||||
mod settings;
|
||||
mod token;
|
||||
|
||||
use clap::{Parser, Subcommand};
|
||||
use clap::Parser;
|
||||
use cli::{Cli, Commands};
|
||||
use error::handle_error;
|
||||
|
||||
#[derive(Parser)]
|
||||
#[command(name = "mdrs")]
|
||||
#[command(about = "MDRS Rust CLI client", long_about = None)]
|
||||
struct Cli {
|
||||
#[command(subcommand)]
|
||||
command: Commands,
|
||||
}
|
||||
fn run(cli: Cli) {
|
||||
let build_rt = || tokio::runtime::Runtime::new().unwrap_or_else(|e| handle_error(e.into()));
|
||||
|
||||
use commands::config_subcommand::*;
|
||||
|
||||
#[derive(Subcommand)]
|
||||
enum Commands {
|
||||
/// Config management (create, update, list, delete)
|
||||
#[command(subcommand)]
|
||||
Config(ConfigSubcommand),
|
||||
Login {
|
||||
#[arg(short, long)]
|
||||
username: Option<String>,
|
||||
#[arg(short, long)]
|
||||
password: Option<String>,
|
||||
remote: String,
|
||||
},
|
||||
/// Logout and remove cached credentials for a remote
|
||||
Logout {
|
||||
remote: String,
|
||||
},
|
||||
Upload {
|
||||
#[arg(short, long)]
|
||||
recursive: bool,
|
||||
#[arg(short = 's', long)]
|
||||
skip_if_exists: bool,
|
||||
local_path: String,
|
||||
remote_path: String,
|
||||
},
|
||||
Download {
|
||||
#[arg(short, long)]
|
||||
recursive: bool,
|
||||
#[arg(short = 's', long)]
|
||||
skip_if_exists: bool,
|
||||
#[arg(short = 'p', long)]
|
||||
password: Option<String>,
|
||||
#[arg(long)]
|
||||
exclude: Vec<String>,
|
||||
remote_path: String,
|
||||
local_path: String,
|
||||
},
|
||||
Ls {
|
||||
remote_path: String,
|
||||
#[arg(short = 'p', long)]
|
||||
password: Option<String>,
|
||||
#[arg(short = 'J', long = "json")]
|
||||
json: bool,
|
||||
#[arg(short = 'r', long)]
|
||||
recursive: bool,
|
||||
#[arg(short = 'q', long)]
|
||||
quick: bool,
|
||||
},
|
||||
Whoami {
|
||||
remote: String,
|
||||
},
|
||||
Labs {
|
||||
remote: String,
|
||||
},
|
||||
Chacl {
|
||||
/// Access level key: private, public, pw_open, cbs_open, 5kikan_open,
|
||||
/// cbs_or_pw_open, 5kikan_or_pw_open, storage
|
||||
access_level_key: String,
|
||||
#[arg(short, long)]
|
||||
recursive: bool,
|
||||
#[arg(short = 'p', long)]
|
||||
password: Option<String>,
|
||||
remote_path: String,
|
||||
},
|
||||
Metadata {
|
||||
#[arg(short = 'p', long)]
|
||||
password: Option<String>,
|
||||
remote_path: String,
|
||||
},
|
||||
Mkdir {
|
||||
remote_path: String,
|
||||
},
|
||||
Rm {
|
||||
#[arg(short, long)]
|
||||
recursive: bool,
|
||||
remote_path: String,
|
||||
},
|
||||
Mv {
|
||||
src_path: String,
|
||||
dest_path: String,
|
||||
},
|
||||
Cp {
|
||||
#[arg(short, long)]
|
||||
recursive: bool,
|
||||
src_path: String,
|
||||
dest_path: String,
|
||||
},
|
||||
/// Show metadata for a remote file
|
||||
FileMetadata {
|
||||
#[arg(short = 'p', long)]
|
||||
password: Option<String>,
|
||||
remote_path: String,
|
||||
},
|
||||
}
|
||||
|
||||
/// Print the error message in Python-compatible format and exit with code 2.
|
||||
/// JSON deserialization errors show a friendlier message matching Python's JSONDecodeError handling.
|
||||
fn handle_error(e: Box<dyn std::error::Error>) -> ! {
|
||||
if is_json_error(&*e) {
|
||||
eprintln!(
|
||||
"Unexpected response returned. Please check the configuration or the server's operational status."
|
||||
);
|
||||
} else {
|
||||
eprintln!("Error: {}", e);
|
||||
}
|
||||
std::process::exit(2);
|
||||
}
|
||||
|
||||
/// Walk the error source chain to detect serde_json parse errors.
|
||||
fn is_json_error(e: &(dyn std::error::Error + 'static)) -> bool {
|
||||
let mut source: Option<&(dyn std::error::Error + 'static)> = Some(e);
|
||||
while let Some(err) = source {
|
||||
if err.downcast_ref::<serde_json::Error>().is_some() {
|
||||
return true;
|
||||
match cli.command {
|
||||
Commands::Config(subcmd) => {
|
||||
use commands::config_subcommand::ConfigSubcommand;
|
||||
match subcmd {
|
||||
ConfigSubcommand::Create(args) => {
|
||||
if let Err(e) = commands::config::config_create(&args.remote, &args.url) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
ConfigSubcommand::Update(args) => {
|
||||
if let Err(e) = commands::config::config_update(&args.remote, &args.url) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
ConfigSubcommand::List(_) => {
|
||||
if let Err(e) = commands::config::config_list() {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
ConfigSubcommand::Delete(args) => {
|
||||
if let Err(e) = commands::config::config_delete(&args.remote) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
source = err.source();
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// Load .env file from current directory (silently ignore if not present).
|
||||
dotenvy::dotenv().ok();
|
||||
|
||||
// Exit with code 130 on Ctrl+C, matching Python's KeyboardInterrupt handling.
|
||||
ctrlc::set_handler(|| {
|
||||
std::process::exit(130);
|
||||
})
|
||||
.ok();
|
||||
|
||||
let cli = Cli::parse();
|
||||
match &cli.command {
|
||||
Commands::Config(subcmd) => match subcmd {
|
||||
ConfigSubcommand::Create(args) => {
|
||||
if let Err(e) = crate::commands::config::config_create(&args.remote, &args.url) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
ConfigSubcommand::Update(args) => {
|
||||
if let Err(e) = crate::commands::config::config_update(&args.remote, &args.url) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
ConfigSubcommand::List(args) => {
|
||||
if let Err(e) = crate::commands::config::config_list(args.long) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
ConfigSubcommand::Delete(args) => {
|
||||
if let Err(e) = crate::commands::config::config_delete(&args.remote) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
},
|
||||
Commands::Login {
|
||||
username,
|
||||
password,
|
||||
remote,
|
||||
} => {
|
||||
let remote = remote.trim_end_matches(':');
|
||||
use std::io::{self, Write};
|
||||
let username_val: String = match username {
|
||||
Some(u) => u.clone(),
|
||||
None => {
|
||||
print!("Username: ");
|
||||
io::stdout().flush().unwrap();
|
||||
let mut s = String::new();
|
||||
io::stdin().read_line(&mut s).unwrap();
|
||||
s.trim().to_string()
|
||||
}
|
||||
};
|
||||
let password_val: String = match password {
|
||||
Some(p) => p.clone(),
|
||||
None => {
|
||||
rpassword::prompt_password("Password: ").unwrap()
|
||||
}
|
||||
};
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
if let Err(e) =
|
||||
rt.block_on(commands::login::login(&username_val, &password_val, remote))
|
||||
{
|
||||
let remote = remote.trim_end_matches(':').to_string();
|
||||
if let Err(e) = build_rt().block_on(commands::login::run_login(
|
||||
username.as_deref(),
|
||||
password.as_deref(),
|
||||
&remote,
|
||||
)) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
Commands::Logout { remote } => {
|
||||
let remote = remote.trim_end_matches(':').to_string();
|
||||
if let Err(e) = commands::logout::logout(&remote) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
@@ -206,15 +67,12 @@ fn main() {
|
||||
recursive,
|
||||
skip_if_exists,
|
||||
} => {
|
||||
if let Err(e) = tokio::runtime::Runtime::new()
|
||||
.unwrap()
|
||||
.block_on(commands::upload::upload(
|
||||
local_path,
|
||||
remote_path,
|
||||
*recursive,
|
||||
*skip_if_exists,
|
||||
))
|
||||
{
|
||||
if let Err(e) = build_rt().block_on(commands::upload::upload(
|
||||
&local_path,
|
||||
&remote_path,
|
||||
recursive,
|
||||
skip_if_exists,
|
||||
)) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
@@ -230,17 +88,14 @@ fn main() {
|
||||
.iter()
|
||||
.map(|e| e.trim_end_matches('/').to_lowercase())
|
||||
.collect();
|
||||
if let Err(e) = tokio::runtime::Runtime::new()
|
||||
.unwrap()
|
||||
.block_on(commands::download::download(
|
||||
remote_path,
|
||||
local_path,
|
||||
*recursive,
|
||||
*skip_if_exists,
|
||||
password.as_deref(),
|
||||
excludes,
|
||||
))
|
||||
{
|
||||
if let Err(e) = build_rt().block_on(commands::download::download(
|
||||
&remote_path,
|
||||
&local_path,
|
||||
recursive,
|
||||
skip_if_exists,
|
||||
password.as_deref(),
|
||||
excludes,
|
||||
)) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
@@ -249,73 +104,58 @@ fn main() {
|
||||
password,
|
||||
json,
|
||||
recursive,
|
||||
quick,
|
||||
quiet,
|
||||
} => {
|
||||
if let Err(e) = tokio::runtime::Runtime::new()
|
||||
.unwrap()
|
||||
.block_on(commands::ls::ls(
|
||||
remote_path,
|
||||
password.as_deref(),
|
||||
*json,
|
||||
*recursive,
|
||||
*quick,
|
||||
))
|
||||
{
|
||||
if let Err(e) = build_rt().block_on(commands::ls::ls(
|
||||
&remote_path,
|
||||
password.as_deref(),
|
||||
json,
|
||||
recursive,
|
||||
quiet,
|
||||
)) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
Commands::Whoami { remote } => {
|
||||
let remote = remote.trim_end_matches(':');
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
if let Err(e) = rt.block_on(commands::whoami::whoami(remote)) {
|
||||
let remote = remote.trim_end_matches(':').to_string();
|
||||
if let Err(e) = build_rt().block_on(commands::whoami::whoami(&remote)) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
Commands::Labs { remote } => {
|
||||
let remote = remote.trim_end_matches(':');
|
||||
match crate::commands::config::get_remote_url(remote) {
|
||||
Ok(Some(url)) => {
|
||||
let conn = std::sync::Arc::new(crate::connection::MDRSConnection::new(&url));
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
if let Err(e) = rt.block_on(crate::commands::labs::labs(conn.clone(), remote)) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
Ok(None) => {
|
||||
eprintln!("Error: Remote host `{}` is not configured", remote);
|
||||
std::process::exit(2);
|
||||
}
|
||||
Err(e) => {
|
||||
handle_error(e);
|
||||
}
|
||||
let remote = remote.trim_end_matches(':').to_string();
|
||||
if let Err(e) = build_rt().block_on(commands::labs::labs(&remote)) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
|
||||
Commands::Chacl {
|
||||
access_level_key,
|
||||
recursive,
|
||||
password,
|
||||
remote_path,
|
||||
} => {
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
if let Err(e) = rt.block_on(crate::commands::chacl::chacl(
|
||||
remote_path,
|
||||
access_level_key,
|
||||
*recursive,
|
||||
if let Err(e) = build_rt().block_on(commands::chacl::chacl(
|
||||
&remote_path,
|
||||
&access_level_key,
|
||||
recursive,
|
||||
password.as_deref(),
|
||||
)) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
Commands::Metadata { remote_path, password } => {
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
if let Err(e) = rt.block_on(crate::commands::metadata::metadata(remote_path, password.as_deref())) {
|
||||
Commands::Metadata {
|
||||
remote_path,
|
||||
password,
|
||||
} => {
|
||||
if let Err(e) = build_rt().block_on(commands::metadata::metadata(
|
||||
&remote_path,
|
||||
password.as_deref(),
|
||||
)) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
Commands::Mkdir { remote_path } => {
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
if let Err(e) = rt.block_on(crate::commands::mkdir::mkdir(remote_path)) {
|
||||
if let Err(e) = build_rt().block_on(commands::mkdir::mkdir(&remote_path)) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
@@ -323,20 +163,15 @@ fn main() {
|
||||
recursive,
|
||||
remote_path,
|
||||
} => {
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
if let Err(e) = rt.block_on(crate::commands::rm::rm(remote_path, *recursive)) {
|
||||
if let Err(e) = build_rt().block_on(commands::rm::rm(&remote_path, recursive)) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
Commands::Logout { remote } => {
|
||||
let remote = remote.trim_end_matches(':');
|
||||
if let Err(e) = crate::commands::logout::logout(remote) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
Commands::Mv { src_path, dest_path } => {
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
if let Err(e) = rt.block_on(crate::commands::mv::mv(src_path, dest_path)) {
|
||||
Commands::Mv {
|
||||
src_path,
|
||||
dest_path,
|
||||
} => {
|
||||
if let Err(e) = build_rt().block_on(commands::mv::mv(&src_path, &dest_path)) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
@@ -345,16 +180,39 @@ fn main() {
|
||||
dest_path,
|
||||
recursive,
|
||||
} => {
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
if let Err(e) = rt.block_on(crate::commands::cp::cp(src_path, dest_path, *recursive)) {
|
||||
if let Err(e) = build_rt().block_on(commands::cp::cp(&src_path, &dest_path, recursive))
|
||||
{
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
Commands::FileMetadata { remote_path, password } => {
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
if let Err(e) = rt.block_on(crate::commands::file_metadata::file_metadata(remote_path, password.as_deref())) {
|
||||
Commands::FileMetadata {
|
||||
remote_path,
|
||||
password,
|
||||
} => {
|
||||
if let Err(e) = build_rt().block_on(commands::file_metadata::file_metadata(
|
||||
&remote_path,
|
||||
password.as_deref(),
|
||||
)) {
|
||||
handle_error(e);
|
||||
}
|
||||
}
|
||||
Commands::Version => {
|
||||
commands::version::version();
|
||||
}
|
||||
Commands::SelfUpdate { yes } => {
|
||||
if let Err(e) = build_rt().block_on(commands::selfupdate::selfupdate(yes)) {
|
||||
handle_error(e.into());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// Load .env file from the current directory (silently ignore if not present).
|
||||
dotenvy::dotenv().ok();
|
||||
|
||||
// Exit with code 130 on Ctrl+C, matching Python's KeyboardInterrupt handling.
|
||||
ctrlc::set_handler(|| std::process::exit(130)).ok();
|
||||
|
||||
run(Cli::parse());
|
||||
}
|
||||
|
||||
@@ -13,3 +13,22 @@ pub struct Laboratory {
|
||||
pub struct Laboratories {
|
||||
pub items: Vec<Laboratory>,
|
||||
}
|
||||
|
||||
impl From<&Laboratory> for crate::cache::CacheLaboratory {
|
||||
fn from(l: &Laboratory) -> Self {
|
||||
crate::cache::CacheLaboratory {
|
||||
id: l.id,
|
||||
name: l.name.clone(),
|
||||
pi_name: l.pi_name.clone(),
|
||||
full_name: l.full_name.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&Laboratories> for crate::cache::CacheLabsWrapper {
|
||||
fn from(labs: &Laboratories) -> Self {
|
||||
crate::cache::CacheLabsWrapper {
|
||||
items: labs.items.iter().map(Into::into).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,3 +8,14 @@ pub struct User {
|
||||
pub laboratory_ids: Vec<u32>,
|
||||
pub is_reviewer: bool,
|
||||
}
|
||||
|
||||
impl From<&User> for crate::cache::CacheUser {
|
||||
fn from(u: &User) -> Self {
|
||||
crate::cache::CacheUser {
|
||||
id: u.id,
|
||||
username: u.username.clone(),
|
||||
laboratory_ids: u.laboratory_ids.clone(),
|
||||
is_reviewer: u.is_reviewer,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
+9
-6
@@ -4,7 +4,7 @@ pub struct Settings {
|
||||
/// Base directory for config and cache files.
|
||||
/// Controlled by `MDRS_CLIENT_CONFIG_DIRNAME` env var (default: `~/.mdrs-client`).
|
||||
pub config_dirname: std::path::PathBuf,
|
||||
/// Maximum number of concurrent upload/download workers.
|
||||
/// Maximum number of concurrent MDRS API requests used by upload/download.
|
||||
/// Controlled by `MDRS_CLIENT_CONCURRENT` env var (default: 10).
|
||||
pub concurrent: usize,
|
||||
}
|
||||
@@ -14,16 +14,19 @@ impl Settings {
|
||||
let config_dirname = std::env::var("MDRS_CLIENT_CONFIG_DIRNAME")
|
||||
.ok()
|
||||
.map(|s| {
|
||||
let expanded = if s.starts_with("~/") {
|
||||
if s.starts_with("~/") {
|
||||
dirs::home_dir()
|
||||
.unwrap()
|
||||
.unwrap_or_else(|| std::path::PathBuf::from("."))
|
||||
.join(&s[2..])
|
||||
} else {
|
||||
std::path::PathBuf::from(&s)
|
||||
};
|
||||
expanded
|
||||
}
|
||||
})
|
||||
.unwrap_or_else(|| dirs::home_dir().unwrap().join(".mdrs-client"));
|
||||
.unwrap_or_else(|| {
|
||||
dirs::home_dir()
|
||||
.unwrap_or_else(|| std::path::PathBuf::from("."))
|
||||
.join(".mdrs-client")
|
||||
});
|
||||
|
||||
let concurrent = std::env::var("MDRS_CLIENT_CONCURRENT")
|
||||
.ok()
|
||||
|
||||
+4
-3
@@ -1,5 +1,6 @@
|
||||
// JWT utilities for token expiry checking (no signature verification required)
|
||||
|
||||
use anyhow::{anyhow, bail};
|
||||
use base64::{Engine, engine::general_purpose::URL_SAFE_NO_PAD};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
@@ -11,16 +12,16 @@ fn now_secs() -> i64 {
|
||||
}
|
||||
|
||||
/// Decode the `exp` field from a JWT payload without signature verification.
|
||||
pub fn jwt_exp(token: &str) -> Result<i64, Box<dyn std::error::Error>> {
|
||||
pub fn jwt_exp(token: &str) -> Result<i64, anyhow::Error> {
|
||||
let parts: Vec<&str> = token.split('.').collect();
|
||||
if parts.len() < 2 {
|
||||
return Err("Invalid JWT: expected at least 2 dot-separated parts".into());
|
||||
bail!("Invalid JWT: expected at least 2 dot-separated parts");
|
||||
}
|
||||
let payload_bytes = URL_SAFE_NO_PAD.decode(parts[1])?;
|
||||
let json: serde_json::Value = serde_json::from_slice(&payload_bytes)?;
|
||||
let exp = json["exp"]
|
||||
.as_i64()
|
||||
.ok_or("JWT payload missing 'exp' field")?;
|
||||
.ok_or_else(|| anyhow!("JWT payload missing 'exp' field"))?;
|
||||
Ok(exp)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user