|
|
|
@ -1,5 +1,6 @@
|
|
|
|
|
use anyhow::{Context, Result};
|
|
|
|
|
use chrono::{DateTime, Local};
|
|
|
|
|
use rayon::prelude::*;
|
|
|
|
|
use serde::{Deserialize, Serialize};
|
|
|
|
|
use std::fs;
|
|
|
|
|
|
|
|
|
@ -29,25 +30,28 @@ pub fn write_history(entry: HistoryEntry) -> Result<()> {
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
this should probably be async all things considered however losing out on proper error handling with ? is
|
|
|
|
|
not worth it. so here's hoping that rust fs (and serde_json (meaning serde too)) is fast enough to read like 10000 files
|
|
|
|
|
*/
|
|
|
|
|
pub fn read_all_history() -> Result<Vec<HistoryEntry>> {
|
|
|
|
|
// should be safe to unwrap as this function will never be called without it
|
|
|
|
|
let history_dir = CONFIG.app.history_dir.as_ref().unwrap();
|
|
|
|
|
let mut entries = vec![];
|
|
|
|
|
let (tx, rx) = flume::unbounded();
|
|
|
|
|
|
|
|
|
|
for dir_entry in fs::read_dir(history_dir).context("Failed to read history_dir!")? {
|
|
|
|
|
let dir_entry = dir_entry?;
|
|
|
|
|
fs::read_dir(history_dir)
|
|
|
|
|
.context("Failed to read history_dir!")?
|
|
|
|
|
.par_bridge()
|
|
|
|
|
.try_for_each_with(tx, |tx, dir_entry| -> Result<()> {
|
|
|
|
|
let dir_entry = dir_entry?;
|
|
|
|
|
|
|
|
|
|
if dir_entry.file_type()?.is_file() {
|
|
|
|
|
let json = fs::read_to_string(dir_entry.path())?;
|
|
|
|
|
let entry: HistoryEntry = serde_json::from_str(&json)?;
|
|
|
|
|
if dir_entry.file_type()?.is_file() {
|
|
|
|
|
let json = fs::read_to_string(dir_entry.path())?;
|
|
|
|
|
let entry: HistoryEntry = serde_json::from_str(&json)?;
|
|
|
|
|
|
|
|
|
|
entries.push(entry);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
tx.send(entry)?;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
let mut entries: Vec<HistoryEntry> = rx.iter().collect();
|
|
|
|
|
|
|
|
|
|
entries.sort_by(|a, b| b.date.cmp(&a.date));
|
|
|
|
|
|
|
|
|
|