Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
101 changes: 91 additions & 10 deletions src/api/user_upload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,12 @@ use serde::Deserialize;
use crate::api::{API_BASE_URL, ApiClient, ApiError, check_for_response_success};

#[derive(Debug, Clone)]
#[allow(dead_code)]
pub struct UserUploads {
pub statistics: UserUploadStatistics,
pub uploads: Vec<UserUpload>,
pub limit: u32,
pub offset: u32,
}

#[derive(Deserialize, Debug, Clone)]
Expand Down Expand Up @@ -51,37 +54,115 @@ pub struct UserUpload {
}

impl ApiClient {
pub async fn get_user_upload_stats(
pub async fn get_user_upload_statistics(
&self,
api_key: &str,
user_id: &str,
) -> Result<UserUploads, ApiError> {
// Response structs for the user info endpoint
start_date: Option<chrono::NaiveDate>,
end_date: Option<chrono::NaiveDate>,
) -> Result<UserUploadStatistics, ApiError> {
#[derive(Deserialize, Debug)]
#[allow(unused)]
struct UserStatsResponse {
struct UserStatisticsResponse {
success: bool,
user_id: String,
statistics: UserUploadStatistics,
uploads: Vec<UserUpload>,
}

let mut url = format!("{API_BASE_URL}/tracker/v2/uploads/user/{user_id}/stats");
let mut query_params = Vec::new();
if let Some(start) = start_date {
query_params.push(format!("start_date={}", start.format("%Y-%m-%d")));
}
if let Some(end) = end_date {
query_params.push(format!("end_date={}", end.format("%Y-%m-%d")));
}
if !query_params.is_empty() {
url.push('?');
url.push_str(&query_params.join("&"));
}

let response = self
.client
.get(format!("{API_BASE_URL}/tracker/uploads/user/{user_id}"))
.get(url)
.header("Content-Type", "application/json")
.header("X-API-Key", api_key)
.send()
.await?;

let response =
check_for_response_success(response, "User upload stats unavailable").await?;
check_for_response_success(response, "User upload statistics unavailable").await?;

let server_stats = response.json::<UserStatisticsResponse>().await?;

let server_stats = response.json::<UserStatsResponse>().await?;
Ok(server_stats.statistics)
}

pub async fn get_user_upload_list(
&self,
api_key: &str,
user_id: &str,
limit: u32,
offset: u32,
start_date: Option<chrono::NaiveDate>,
end_date: Option<chrono::NaiveDate>,
) -> Result<(Vec<UserUpload>, u32, u32), ApiError> {
#[derive(Deserialize, Debug)]
#[allow(unused)]
struct UserUploadListResponse {
success: bool,
user_id: String,
uploads: Vec<UserUpload>,
limit: u32,
offset: u32,
}

let mut url = format!(
"{API_BASE_URL}/tracker/v2/uploads/user/{user_id}/list?limit={limit}&offset={offset}"
);
if let Some(start) = start_date {
url.push_str(&format!("&start_date={}", start.format("%Y-%m-%d")));
}
if let Some(end) = end_date {
url.push_str(&format!("&end_date={}", end.format("%Y-%m-%d")));
}

let response = self
.client
.get(url)
.header("Content-Type", "application/json")
.header("X-API-Key", api_key)
.send()
.await?;

let response = check_for_response_success(response, "User upload list unavailable").await?;

let server_list = response.json::<UserUploadListResponse>().await?;

Ok((server_list.uploads, server_list.limit, server_list.offset))
}

/// Legacy method for backward compatibility if needed, though it's better to use the split methods.
#[allow(dead_code)]
pub async fn get_user_upload_stats(
&self,
api_key: &str,
user_id: &str,
limit: u32,
offset: u32,
) -> Result<UserUploads, ApiError> {
let statistics = self
.get_user_upload_statistics(api_key, user_id, None, None)
.await?;
let (uploads, limit, offset) = self
.get_user_upload_list(api_key, user_id, limit, offset, None, None)
.await?;

Ok(UserUploads {
statistics: server_stats.statistics,
uploads: server_stats.uploads,
statistics,
uploads,
limit,
offset,
})
}
}
25 changes: 21 additions & 4 deletions src/app_state.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@ use egui_wgpu::wgpu;
use tokio::sync::{broadcast, mpsc};

use crate::{
api::UserUploads, config::Config, play_time::PlayTimeTracker, record::LocalRecording,
upload::ProgressData,
config::Config, play_time::PlayTimeTracker, record::LocalRecording, upload::ProgressData,
};

pub struct AppState {
Expand All @@ -39,6 +38,8 @@ pub struct AppState {
pub unsupported_games: RwLock<UnsupportedGames>,
/// Offline mode state
pub offline: OfflineState,
/// Upload filters for date range filtering
pub upload_filters: RwLock<UploadFilters>,
}

/// State for offline mode and backoff retry logic
Expand Down Expand Up @@ -89,12 +90,19 @@ impl AppState {
last_recordable_game: RwLock::new(None),
unsupported_games: RwLock::new(UnsupportedGames::load_from_embedded()),
offline: OfflineState::default(),
upload_filters: RwLock::new(UploadFilters::default()),
};
tracing::debug!("AppState::new() complete");
state
}
}

#[derive(Default, Clone, Copy, Debug)]
pub struct UploadFilters {
pub start_date: Option<chrono::NaiveDate>,
pub end_date: Option<chrono::NaiveDate>,
}

#[derive(Clone, PartialEq)]
pub struct ForegroundedGame {
pub exe_name: Option<String>,
Expand Down Expand Up @@ -171,7 +179,11 @@ pub enum AsyncRequest {
OpenDataDump,
OpenLog,
UpdateUnsupportedGames(UnsupportedGames),
LoadUploadStats,
LoadUploadStatistics,
LoadUploadList {
limit: u32,
offset: u32,
},
LoadLocalRecordings,
DeleteAllInvalidRecordings,
DeleteAllUploadedLocalRecordings,
Expand Down Expand Up @@ -213,7 +225,12 @@ pub enum UiUpdate {
UploadFailed(String),
UpdateRecordingState(bool),
UpdateNewerReleaseAvailable(GitHubRelease),
UpdateUserUploads(UserUploads),
UpdateUserUploadStatistics(crate::api::UserUploadStatistics),
UpdateUserUploadList {
uploads: Vec<crate::api::UserUpload>,
limit: u32,
offset: u32,
},
UpdateLocalRecordings(Vec<LocalRecording>),
FolderPickerResult {
old_path: PathBuf,
Expand Down
83 changes: 71 additions & 12 deletions src/record/local_recording.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,15 +67,58 @@ impl UploadProgressState {

/// Load progress state from a file
pub fn load_from_file(path: &Path) -> eyre::Result<Self> {
let content = std::fs::read_to_string(path)?;
let state: Self = serde_json::from_str(&content)?;
let file = std::fs::File::open(path)?;
let reader = std::io::BufReader::new(file);
let mut stream =
serde_json::Deserializer::from_reader(reader).into_iter::<serde_json::Value>();

// Read the first object which should be the UploadProgressState
let first_value = stream
.next()
.ok_or_else(|| eyre::eyre!("Empty progress file"))??;
let mut state: Self = serde_json::from_value(first_value)?;

// If the state was saved in the old format (single JSON object with populated etags),
// we're done (the etags are already in state.chunk_etags).
// If it was saved in the new format (header + log lines), state.chunk_etags might be empty,
// and we need to read the rest of the file.

// Read subsequent objects as CompleteMultipartUploadChunk
for value in stream {
let chunk: CompleteMultipartUploadChunk = serde_json::from_value(value?)?;
// Avoid duplicates if we're migrating or recovering from a weird state
if !state
.chunk_etags
.iter()
.any(|c| c.chunk_number == chunk.chunk_number)
{
state.chunk_etags.push(chunk);
}
}

Ok(state)
}

/// Save progress state to a file
/// Save progress state to a file (Snapshot + Log format)
pub fn save_to_file(&self, path: &Path) -> eyre::Result<()> {
let content = serde_json::to_string_pretty(self)?;
std::fs::write(path, content)?;
let file = std::fs::File::create(path)?;
let mut writer = std::io::BufWriter::new(file);

// 1. Write the base state with EMPTY chunk_etags to the first line.
// We clone to clear the vector without modifying self.
let mut header_state = self.clone();
header_state.chunk_etags.clear();
serde_json::to_writer(&mut writer, &header_state)?;
use std::io::Write;
writeln!(&mut writer)?;

// 2. Write all existing etags as subsequent lines
for chunk in &self.chunk_etags {
serde_json::to_writer(&mut writer, chunk)?;
writeln!(&mut writer)?;
}

writer.flush()?;
Ok(())
}

Expand Down Expand Up @@ -151,14 +194,30 @@ impl LocalRecordingPaused {
&self.upload_progress
}

/// Mutate the upload progress state and save to file.
pub fn mutate_upload_progress<R>(
/// Records a successful chunk upload: updates in-memory state and appends to the log file.
pub fn record_chunk_completion(
&mut self,
f: impl FnOnce(&mut UploadProgressState) -> R,
) -> R {
let r = f(&mut self.upload_progress);
self.save_upload_progress().ok();
r
chunk: CompleteMultipartUploadChunk,
) -> eyre::Result<()> {
// Update in-memory
self.upload_progress.chunk_etags.push(chunk.clone());

// Append to disk (efficient log append)
// We construct the path manually here or use the one from info,
// but UploadProgressState doesn't store the full progress file path, only tar path.
// We can use the helper method on self.

let path = self.upload_progress_path();
let mut file = std::fs::OpenOptions::new()
.append(true)
.create(false) // Should already exist
.open(path)?;

serde_json::to_writer(&mut file, &chunk)?;
use std::io::Write;
writeln!(&mut file)?;

Ok(())
}

/// Save upload progress state to .upload-progress file.
Expand Down
53 changes: 43 additions & 10 deletions src/tokio_thread.rs
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,8 @@ async fn main(
.send(UiUpdate::UpdateUserId(Ok(user_id)))
.ok();

app_state.async_request_tx.send(AsyncRequest::LoadUploadStats).await.ok();
app_state.async_request_tx.send(AsyncRequest::LoadUploadStatistics).await.ok();
app_state.async_request_tx.send(AsyncRequest::LoadUploadList { limit: 100, offset: 0 }).await.ok();
}
}
// no matter if offline or online, local recordings should be loaded
Expand Down Expand Up @@ -266,31 +267,62 @@ async fn main(
unsupported_games.games.len(),
);
}
AsyncRequest::LoadUploadStats => {
AsyncRequest::LoadUploadStatistics => {
if app_state.offline.mode.load(Ordering::SeqCst) {
tracing::info!("Offline mode enabled, skipping upload stats load");
// Don't send any update - UI will show no upload stats
tracing::info!("Offline mode enabled, skipping upload statistics load");
} else {
match valid_api_key_and_user_id.clone() {
Some((api_key, user_id)) => {
let start_date = app_state.upload_filters.read().unwrap().start_date;
let end_date = app_state.upload_filters.read().unwrap().end_date;
tokio::spawn({
let app_state = app_state.clone();
let api_client = api_client.clone();
async move {
let stats = match api_client.get_user_upload_stats(&api_key, &user_id).await {
let stats = match api_client.get_user_upload_statistics(&api_key, &user_id, start_date, end_date).await {
Ok(stats) => stats,
Err(e) => {
tracing::error!(e=?e, "Failed to get user upload stats");
tracing::error!(e=?e, "Failed to get user upload statistics");
return;
}
};
tracing::info!(stats=?stats.statistics, "Loaded upload stats");
app_state.ui_update_tx.send(UiUpdate::UpdateUserUploads(stats)).ok();
tracing::info!(stats=?stats, "Loaded upload statistics");
app_state.ui_update_tx.send(UiUpdate::UpdateUserUploadStatistics(stats)).ok();
}
});
}
None => {
tracing::error!("API key and user ID not found, skipping upload stats load");
tracing::error!("API key and user ID not found, skipping upload statistics load");
}
}
}
}
AsyncRequest::LoadUploadList { limit, offset } => {
if app_state.offline.mode.load(Ordering::SeqCst) {
tracing::info!("Offline mode enabled, skipping upload list load");
} else {
match valid_api_key_and_user_id.clone() {
Some((api_key, user_id)) => {
let start_date = app_state.upload_filters.read().unwrap().start_date;
let end_date = app_state.upload_filters.read().unwrap().end_date;
tokio::spawn({
let app_state = app_state.clone();
let api_client = api_client.clone();
async move {
let (uploads, limit, offset) = match api_client.get_user_upload_list(&api_key, &user_id, limit, offset, start_date, end_date).await {
Ok(res) => res,
Err(e) => {
tracing::error!(e=?e, "Failed to get user upload list");
return;
}
};
tracing::info!(count=uploads.len(), "Loaded upload list");
app_state.ui_update_tx.send(UiUpdate::UpdateUserUploadList { uploads, limit, offset }).ok();
}
});
}
None => {
tracing::error!("API key and user ID not found, skipping upload list load");
}
}
}
Expand Down Expand Up @@ -501,7 +533,8 @@ async fn main(
app_state.async_request_tx.send(AsyncRequest::CancelOfflineBackoff).await.ok();
app_state.async_request_tx.send(AsyncRequest::ValidateApiKey { api_key }).await.ok();
// Load data now that we're online
app_state.async_request_tx.send(AsyncRequest::LoadUploadStats).await.ok();
app_state.async_request_tx.send(AsyncRequest::LoadUploadStatistics).await.ok();
app_state.async_request_tx.send(AsyncRequest::LoadUploadList { limit: 100, offset: 0 }).await.ok();
app_state.async_request_tx.send(AsyncRequest::LoadLocalRecordings).await.ok();
},
}
Expand Down
Loading
Loading