|
|
|
|
@ -5,6 +5,30 @@ use sqlx::{Row, SqlitePool, sqlite::SqliteConnectOptions};
|
|
|
|
|
use std::collections::HashMap; |
|
|
|
|
use std::str::FromStr; |
|
|
|
|
|
|
|
|
|
// --- DTO: Data Transfer Object ---
|
|
|
|
|
// This struct exists ONLY to talk to the database.
|
|
|
|
|
#[derive(sqlx::FromRow)] |
|
|
|
|
struct SqliteEntryDto { |
|
|
|
|
id: i64, |
|
|
|
|
text: String, |
|
|
|
|
// sqlx reads the DB column into this specific wrapper
|
|
|
|
|
metadata: sqlx::types::Json<HashMap<String, String>>, |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// Mapper: DTO -> Domain Entity
|
|
|
|
|
impl From<SqliteEntryDto> for DictEntry { |
|
|
|
|
fn from(dto: SqliteEntryDto) -> Self { |
|
|
|
|
Self { |
|
|
|
|
id: Some(dto.id as u64), |
|
|
|
|
text: dto.text, |
|
|
|
|
// Unwrap the sqlx wrapper to get the inner HashMap
|
|
|
|
|
metadata: dto.metadata.0, |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// --- REPOSITORY IMPLEMENTATION ---
|
|
|
|
|
|
|
|
|
|
#[derive(Clone)] |
|
|
|
|
pub struct SqliteDictRepository { |
|
|
|
|
pool: SqlitePool, |
|
|
|
|
@ -20,295 +44,154 @@ impl SqliteDictRepository {
|
|
|
|
|
.await |
|
|
|
|
.map_err(|_| RepositoryError::ConnectionFailed)?; |
|
|
|
|
|
|
|
|
|
// Run migrations
|
|
|
|
|
sqlx::migrate!("./migrations") |
|
|
|
|
.run(&pool) |
|
|
|
|
.await |
|
|
|
|
.map_err(|e| RepositoryError::Unexpected(format!("Failed to run migrations: {}", e)))?; |
|
|
|
|
|
|
|
|
|
Ok(Self { pool }) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
async fn ensure_dict_tables(&self, dict_name: &str) -> Result<(), RepositoryError> { |
|
|
|
|
// Create dict table if not exists
|
|
|
|
|
// Ensure tables exist with proper Normalization and Constraints
|
|
|
|
|
sqlx::query( |
|
|
|
|
r#" |
|
|
|
|
CREATE TABLE IF NOT EXISTS dicts ( |
|
|
|
|
name TEXT PRIMARY KEY, |
|
|
|
|
CREATE TABLE IF NOT EXISTS dictionaries ( |
|
|
|
|
id INTEGER PRIMARY KEY, |
|
|
|
|
name TEXT NOT NULL UNIQUE, |
|
|
|
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP |
|
|
|
|
) |
|
|
|
|
"#, |
|
|
|
|
) |
|
|
|
|
.execute(&self.pool) |
|
|
|
|
.await |
|
|
|
|
.map_err(|_| RepositoryError::ConnectionFailed)?; |
|
|
|
|
); |
|
|
|
|
|
|
|
|
|
// Insert dict if not exists
|
|
|
|
|
sqlx::query("INSERT OR IGNORE INTO dicts (name) VALUES (?)") |
|
|
|
|
.bind(dict_name) |
|
|
|
|
.execute(&self.pool) |
|
|
|
|
.await |
|
|
|
|
.map_err(|_| RepositoryError::ConnectionFailed)?; |
|
|
|
|
|
|
|
|
|
// Create entries table for this dict
|
|
|
|
|
let table_name = format!("dict_entries_{}", dict_name); |
|
|
|
|
let create_table_sql = format!( |
|
|
|
|
r#" |
|
|
|
|
CREATE TABLE IF NOT EXISTS {} ( |
|
|
|
|
CREATE TABLE IF NOT EXISTS entries ( |
|
|
|
|
id INTEGER PRIMARY KEY, |
|
|
|
|
text TEXT NOT NULL UNIQUE, |
|
|
|
|
dictionary_id INTEGER NOT NULL, |
|
|
|
|
text TEXT NOT NULL, |
|
|
|
|
metadata TEXT, |
|
|
|
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP, |
|
|
|
|
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP |
|
|
|
|
) |
|
|
|
|
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP, |
|
|
|
|
FOREIGN KEY(dictionary_id) REFERENCES dictionaries(id) ON DELETE CASCADE, |
|
|
|
|
-- This constraint allows us to update existing words instead of duplicating them |
|
|
|
|
UNIQUE(dictionary_id, text) |
|
|
|
|
); |
|
|
|
|
"#, |
|
|
|
|
table_name |
|
|
|
|
); |
|
|
|
|
|
|
|
|
|
sqlx::query(&create_table_sql) |
|
|
|
|
.execute(&self.pool) |
|
|
|
|
.await |
|
|
|
|
.map_err(|_| RepositoryError::ConnectionFailed)?; |
|
|
|
|
|
|
|
|
|
Ok(()) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
pub fn get_next_id(&self, dict_name: &str) -> Result<u32, RepositoryError> { |
|
|
|
|
let pool = self.pool.clone(); |
|
|
|
|
let dict_name = dict_name.to_string(); |
|
|
|
|
|
|
|
|
|
tokio::task::block_in_place(|| { |
|
|
|
|
tokio::runtime::Handle::current().block_on(async move { |
|
|
|
|
let table_name = format!("dict_entries_{}", dict_name); |
|
|
|
|
|
|
|
|
|
let result: Option<i64> = |
|
|
|
|
sqlx::query_scalar(&format!("SELECT MAX(id) FROM {}", table_name)) |
|
|
|
|
.fetch_one(&pool) |
|
|
|
|
.await |
|
|
|
|
.map_err(|_| RepositoryError::ConnectionFailed)?; |
|
|
|
|
) |
|
|
|
|
.execute(&pool) |
|
|
|
|
.await |
|
|
|
|
.map_err(|e| RepositoryError::StorageError(e.to_string()))?; |
|
|
|
|
|
|
|
|
|
Ok(result.map(|id| id as u32 + 1).unwrap_or(1)) |
|
|
|
|
}) |
|
|
|
|
}) |
|
|
|
|
Ok(Self { pool }) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
fn find_id_by_text(&self, dict_name: &str, text: &str) -> Result<Option<u32>, RepositoryError> { |
|
|
|
|
let pool = self.pool.clone(); |
|
|
|
|
let dict_name = dict_name.to_string(); |
|
|
|
|
let text = text.to_string(); |
|
|
|
|
|
|
|
|
|
tokio::task::block_in_place(|| { |
|
|
|
|
tokio::runtime::Handle::current().block_on(async move { |
|
|
|
|
let table_name = format!("dict_entries_{}", dict_name); |
|
|
|
|
|
|
|
|
|
let result: Option<i64> = |
|
|
|
|
sqlx::query_scalar(&format!("SELECT id FROM {} WHERE text = ?", table_name)) |
|
|
|
|
.bind(&text) |
|
|
|
|
.fetch_one(&pool) |
|
|
|
|
.await |
|
|
|
|
.map_err(|_| RepositoryError::ConnectionFailed)?; |
|
|
|
|
// Helper: Resolve dictionary name to ID
|
|
|
|
|
async fn get_dict_id(&self, name: &str) -> Result<i64, RepositoryError> { |
|
|
|
|
let row = sqlx::query("SELECT id FROM dictionaries WHERE name = ?") |
|
|
|
|
.bind(name) |
|
|
|
|
.fetch_optional(&self.pool) |
|
|
|
|
.await |
|
|
|
|
.map_err(|e| RepositoryError::StorageError(e.to_string()))?; |
|
|
|
|
|
|
|
|
|
Ok(result.map(|id| id as u32)) |
|
|
|
|
}) |
|
|
|
|
}) |
|
|
|
|
match row { |
|
|
|
|
Some(r) => Ok(r.get("id")), |
|
|
|
|
None => Err(RepositoryError::NotFound(name.to_string())), |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
#[async_trait::async_trait] |
|
|
|
|
impl DictRepository for SqliteDictRepository { |
|
|
|
|
fn create(&self, name: &str) -> Result<(), RepositoryError> { |
|
|
|
|
// This is a synchronous method, but we need to run async operations
|
|
|
|
|
// In a real application, you might want to make the trait async or use a blocking executor
|
|
|
|
|
let pool = self.pool.clone(); |
|
|
|
|
let name = name.to_string(); |
|
|
|
|
|
|
|
|
|
// Use tokio's block_in_place to run async code in sync context
|
|
|
|
|
tokio::task::block_in_place(|| { |
|
|
|
|
tokio::runtime::Handle::current().block_on(async move { |
|
|
|
|
let mut tx = pool.begin().await.map_err(|e| { |
|
|
|
|
RepositoryError::Unexpected(format!("Failed to begin transaction: {}", e)) |
|
|
|
|
})?; |
|
|
|
|
|
|
|
|
|
// Create dict table
|
|
|
|
|
sqlx::query( |
|
|
|
|
r#" |
|
|
|
|
CREATE TABLE IF NOT EXISTS dicts ( |
|
|
|
|
name TEXT PRIMARY KEY, |
|
|
|
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP |
|
|
|
|
) |
|
|
|
|
"#, |
|
|
|
|
) |
|
|
|
|
.execute(&mut *tx) |
|
|
|
|
.await |
|
|
|
|
.map_err(|e| { |
|
|
|
|
RepositoryError::Unexpected(format!("Failed to create dicts table: {}", e)) |
|
|
|
|
})?; |
|
|
|
|
|
|
|
|
|
// Insert dict
|
|
|
|
|
sqlx::query("INSERT OR IGNORE INTO dicts (name) VALUES (?)") |
|
|
|
|
.bind(&name) |
|
|
|
|
.execute(&mut *tx) |
|
|
|
|
.await |
|
|
|
|
.map_err(|e| { |
|
|
|
|
RepositoryError::Unexpected(format!("Failed to insert dict: {}", e)) |
|
|
|
|
})?; |
|
|
|
|
|
|
|
|
|
// Create entries table for this dict
|
|
|
|
|
let table_name = format!("dict_entries_{}", name); |
|
|
|
|
let create_table_sql = format!( |
|
|
|
|
r#" |
|
|
|
|
CREATE TABLE IF NOT EXISTS {} ( |
|
|
|
|
id INTEGER PRIMARY KEY, |
|
|
|
|
text TEXT NOT NULL UNIQUE, |
|
|
|
|
metadata TEXT, |
|
|
|
|
created_at DATETIME DEFAULT CURRENT_TIMESTAMP, |
|
|
|
|
updated_at DATETIME DEFAULT CURRENT_TIMESTAMP |
|
|
|
|
) |
|
|
|
|
"#, |
|
|
|
|
table_name |
|
|
|
|
); |
|
|
|
|
|
|
|
|
|
sqlx::query(&create_table_sql) |
|
|
|
|
.execute(&mut *tx) |
|
|
|
|
.await |
|
|
|
|
.map_err(|e| { |
|
|
|
|
RepositoryError::Unexpected(format!( |
|
|
|
|
"Failed to create entries table: {}", |
|
|
|
|
e |
|
|
|
|
)) |
|
|
|
|
})?; |
|
|
|
|
|
|
|
|
|
tx.commit().await.map_err(|e| { |
|
|
|
|
RepositoryError::Unexpected(format!("Failed to commit transaction: {}", e)) |
|
|
|
|
})?; |
|
|
|
|
Ok(()) |
|
|
|
|
}) |
|
|
|
|
}) |
|
|
|
|
async fn create(&self, name: &str) -> Result<(), RepositoryError> { |
|
|
|
|
sqlx::query("INSERT OR IGNORE INTO dictionaries (name) VALUES (?)") |
|
|
|
|
.bind(name) |
|
|
|
|
.execute(&self.pool) |
|
|
|
|
.await |
|
|
|
|
.map_err(|e| RepositoryError::StorageError(e.to_string()))?; |
|
|
|
|
Ok(()) |
|
|
|
|
} |
|
|
|
|
fn save_entries(&self, dict_name: &str, entries: &[DictEntry]) -> Result<(), RepositoryError> { |
|
|
|
|
let pool = self.pool.clone(); |
|
|
|
|
let dict_name = dict_name.to_string(); |
|
|
|
|
let entries = entries.to_vec(); |
|
|
|
|
|
|
|
|
|
tokio::task::block_in_place(|| { |
|
|
|
|
tokio::runtime::Handle::current().block_on(async move { |
|
|
|
|
let table_name = format!("dict_entries_{}", dict_name); |
|
|
|
|
|
|
|
|
|
for entry in entries { |
|
|
|
|
let metadata_json = serde_json::to_string(&entry.metadata) |
|
|
|
|
.map_err(|e| RepositoryError::InvalidData(e.to_string()))?; |
|
|
|
|
|
|
|
|
|
// Check if entry with this text already exists
|
|
|
|
|
let existing_id: Option<i64> = sqlx::query_scalar(&format!( |
|
|
|
|
"SELECT id FROM {} WHERE text = ?", |
|
|
|
|
table_name |
|
|
|
|
)) |
|
|
|
|
.bind(&entry.text) |
|
|
|
|
.fetch_optional(&pool) |
|
|
|
|
.await |
|
|
|
|
.map_err(|e| RepositoryError::Unexpected(format!("Failed to check existing entry: {}", e)))?; |
|
|
|
|
|
|
|
|
|
if let Some(id) = existing_id { |
|
|
|
|
// Update existing entry
|
|
|
|
|
sqlx::query(&format!( |
|
|
|
|
"UPDATE {} SET metadata = ?, updated_at = CURRENT_TIMESTAMP WHERE id = ?", |
|
|
|
|
table_name |
|
|
|
|
)) |
|
|
|
|
.bind(metadata_json) |
|
|
|
|
.bind(id) |
|
|
|
|
.execute(&pool) |
|
|
|
|
.await |
|
|
|
|
.map_err(|e| RepositoryError::Unexpected(format!("Failed to update entry: {}", e)))?; |
|
|
|
|
} else { |
|
|
|
|
// Insert new entry
|
|
|
|
|
sqlx::query(&format!( |
|
|
|
|
"INSERT INTO {} (id, text, metadata) VALUES (?, ?, ?)", |
|
|
|
|
table_name |
|
|
|
|
)) |
|
|
|
|
.bind(entry.id as i64) |
|
|
|
|
.bind(&entry.text) |
|
|
|
|
.bind(metadata_json) |
|
|
|
|
.execute(&pool) |
|
|
|
|
.await |
|
|
|
|
.map_err(|e| RepositoryError::Unexpected(format!("Failed to insert entry: {}", e)))?; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
async fn save_entries( |
|
|
|
|
&self, |
|
|
|
|
dict_name: &str, |
|
|
|
|
entries: &[DictEntry], |
|
|
|
|
) -> Result<(), RepositoryError> { |
|
|
|
|
let mut tx = self |
|
|
|
|
.pool |
|
|
|
|
.begin() |
|
|
|
|
.await |
|
|
|
|
.map_err(|_| RepositoryError::ConnectionFailed)?; |
|
|
|
|
|
|
|
|
|
Ok(()) |
|
|
|
|
}) |
|
|
|
|
}) |
|
|
|
|
} |
|
|
|
|
// 1. Get Dict ID
|
|
|
|
|
let dict_id_row = sqlx::query("SELECT id FROM dictionaries WHERE name = ?") |
|
|
|
|
.bind(dict_name) |
|
|
|
|
.fetch_optional(&mut *tx) |
|
|
|
|
.await |
|
|
|
|
.map_err(|e| RepositoryError::StorageError(e.to_string()))?; |
|
|
|
|
|
|
|
|
|
let dict_id: i64 = match dict_id_row { |
|
|
|
|
Some(row) => row.get("id"), |
|
|
|
|
None => return Err(RepositoryError::NotFound(dict_name.to_string())), |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
// 2. Batch Upsert
|
|
|
|
|
for entry in entries { |
|
|
|
|
// We must wrap the HashMap in sqlx::types::Json so SQLx knows how to serialize it
|
|
|
|
|
let meta_json = sqlx::types::Json(&entry.metadata); |
|
|
|
|
|
|
|
|
|
sqlx::query( |
|
|
|
|
r#" |
|
|
|
|
INSERT INTO entries (dictionary_id, text, metadata) |
|
|
|
|
VALUES (?, ?, ?) |
|
|
|
|
ON CONFLICT(dictionary_id, text) DO UPDATE SET |
|
|
|
|
metadata = excluded.metadata, |
|
|
|
|
updated_at = CURRENT_TIMESTAMP |
|
|
|
|
"#, |
|
|
|
|
) |
|
|
|
|
.bind(dict_id) |
|
|
|
|
.bind(&entry.text) |
|
|
|
|
.bind(meta_json) |
|
|
|
|
.execute(&mut *tx) |
|
|
|
|
.await |
|
|
|
|
.map_err(|e| RepositoryError::StorageError(e.to_string()))?; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
fn get_next_id(&self, dict_name: &str) -> Result<u32, RepositoryError> { |
|
|
|
|
self.get_next_id(dict_name) |
|
|
|
|
tx.commit() |
|
|
|
|
.await |
|
|
|
|
.map_err(|e| RepositoryError::StorageError(e.to_string()))?; |
|
|
|
|
Ok(()) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
fn fetch_many( |
|
|
|
|
async fn fetch_many( |
|
|
|
|
&self, |
|
|
|
|
name: &str, |
|
|
|
|
limit: Option<u32>, |
|
|
|
|
offset: Option<u32>, |
|
|
|
|
) -> Result<Dict, RepositoryError> { |
|
|
|
|
let pool = self.pool.clone(); |
|
|
|
|
let name = name.to_string(); |
|
|
|
|
|
|
|
|
|
tokio::task::block_in_place(|| { |
|
|
|
|
tokio::runtime::Handle::current().block_on(async move { |
|
|
|
|
let table_name = format!("dict_entries_{}", name); |
|
|
|
|
|
|
|
|
|
// Check if dict exists
|
|
|
|
|
let dict_exists: bool = |
|
|
|
|
sqlx::query_scalar("SELECT EXISTS(SELECT 1 FROM dicts WHERE name = ?)") |
|
|
|
|
.bind(&name) |
|
|
|
|
.fetch_one(&pool) |
|
|
|
|
.await |
|
|
|
|
.map_err(|e| { |
|
|
|
|
RepositoryError::Unexpected(format!( |
|
|
|
|
"Failed to check dict exists: {}", |
|
|
|
|
e |
|
|
|
|
)) |
|
|
|
|
})?; |
|
|
|
|
|
|
|
|
|
if !dict_exists { |
|
|
|
|
return Err(RepositoryError::NotFound); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
let mut query = format!("SELECT id, text, metadata FROM {}", table_name); |
|
|
|
|
// 1. Get Dict ID
|
|
|
|
|
let dict_id = self.get_dict_id(name).await?; |
|
|
|
|
|
|
|
|
|
if let Some(offset_val) = offset { |
|
|
|
|
query.push_str(&format!(" LIMIT {}", limit.unwrap_or(1000))); |
|
|
|
|
query.push_str(&format!(" OFFSET {}", offset_val)); |
|
|
|
|
} else if let Some(limit_val) = limit { |
|
|
|
|
query.push_str(&format!(" LIMIT {}", limit_val)); |
|
|
|
|
} |
|
|
|
|
// 2. Prepare Limits
|
|
|
|
|
let limit_val = limit.unwrap_or(1000); |
|
|
|
|
let offset_val = offset.unwrap_or(0); |
|
|
|
|
|
|
|
|
|
let rows = sqlx::query(&query).fetch_all(&pool).await.map_err(|e| { |
|
|
|
|
RepositoryError::Unexpected(format!("Failed to fetch entries: {}", e)) |
|
|
|
|
})?; |
|
|
|
|
|
|
|
|
|
let mut entries = HashMap::new(); |
|
|
|
|
for row in rows { |
|
|
|
|
let id: i64 = row.get("id"); |
|
|
|
|
let text: String = row.get("text"); |
|
|
|
|
let metadata_json: Option<String> = row.get("metadata"); |
|
|
|
|
|
|
|
|
|
let metadata = if let Some(json) = metadata_json { |
|
|
|
|
serde_json::from_str(&json) |
|
|
|
|
.map_err(|e| RepositoryError::InvalidData(e.to_string()))? |
|
|
|
|
} else { |
|
|
|
|
HashMap::new() |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
let entry = DictEntry { |
|
|
|
|
id: id as DictEntryId, |
|
|
|
|
text, |
|
|
|
|
metadata, |
|
|
|
|
}; |
|
|
|
|
|
|
|
|
|
entries.insert(entry.id, entry); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
Ok(Dict { name, entries }) |
|
|
|
|
}) |
|
|
|
|
// 3. Query (Reading into the DTO)
|
|
|
|
|
let dtos = sqlx::query_as::<_, SqliteEntryDto>( |
|
|
|
|
r#" |
|
|
|
|
SELECT id, text, metadata |
|
|
|
|
FROM entries |
|
|
|
|
WHERE dictionary_id = ? |
|
|
|
|
LIMIT ? OFFSET ? |
|
|
|
|
"#, |
|
|
|
|
) |
|
|
|
|
.bind(dict_id) |
|
|
|
|
.bind(limit_val) |
|
|
|
|
.bind(offset_val) |
|
|
|
|
.fetch_all(&self.pool) |
|
|
|
|
.await |
|
|
|
|
.map_err(|e| RepositoryError::StorageError(e.to_string()))?; |
|
|
|
|
|
|
|
|
|
// 4. Convert DTOs to Domain Dict
|
|
|
|
|
let mut entries_map = HashMap::new(); |
|
|
|
|
for dto in dtos { |
|
|
|
|
let entry: DictEntry = dto.into(); // Converts DTO -> Entity
|
|
|
|
|
|
|
|
|
|
// We safely unwrap because the DB guarantees an ID exists
|
|
|
|
|
if let Some(id) = entry.id { |
|
|
|
|
entries_map.insert(id, entry); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
Ok(Dict { |
|
|
|
|
name: name.to_string(), |
|
|
|
|
entries: entries_map, |
|
|
|
|
}) |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|