From 2e797ddb79c3ee8dd4d17baf5b6b1abbb52ca249 Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Wed, 22 Apr 2026 17:04:21 -0700 Subject: [PATCH 01/42] test: capture model picker shell Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/__tests__/App.test.tsx | 29 ++++++++++++++++++++++++++ src/view/__tests__/AskBarView.test.tsx | 22 +++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/src/__tests__/App.test.tsx b/src/__tests__/App.test.tsx index 0a2f5c1a..091fce11 100644 --- a/src/__tests__/App.test.tsx +++ b/src/__tests__/App.test.tsx @@ -28,6 +28,35 @@ describe('App', () => { enableChannelCapture(); }); + it('calls get_model_config on mount', async () => { + render(); + await act(async () => {}); + + expect(invoke).toHaveBeenCalledWith('get_model_config'); + }); + + it('fetches model picker state on mount and refreshes it when the overlay shows', async () => { + invoke.mockReset(); + enableChannelCaptureWithResponses({ + get_model_picker_state: { + active: 'gemma4:e2b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }, + }); + + render(); + await act(async () => {}); + + expect(invoke).toHaveBeenCalledWith('get_model_picker_state'); + + invoke.mockClear(); + + await showOverlay(); + + expect(invoke).toHaveBeenCalledWith('get_model_picker_state'); + }); + + it('grows upward when near bottom screen edge', async () => { const { container } = render(); await act(async () => {}); diff --git a/src/view/__tests__/AskBarView.test.tsx b/src/view/__tests__/AskBarView.test.tsx index 7ed0681e..6e48d549 100644 --- a/src/view/__tests__/AskBarView.test.tsx +++ b/src/view/__tests__/AskBarView.test.tsx @@ -212,6 +212,28 @@ describe('AskBarView', () => { ).toBeInTheDocument(); }); + it('renders a model picker trigger near send when models are available', () => { + render( + , + ); + + expect( + screen.getByRole('button', { name: 'Choose model' }), + ).toBeInTheDocument(); + }); + it('displays selectedText when provided', () => { render( Date: Thu, 23 Apr 2026 14:42:05 -0700 Subject: [PATCH 02/42] feat: persist active local model selection Introduce a dedicated models module as the single backend source of truth for the user's chosen Ollama model. The model slug is persisted in app_config under 'active_model' and resolved at runtime against the live /api/tags list, falling back to the first installed model, then the env bootstrap default. - Add src-tauri/src/models.rs with resolve_active_model, validate_model_installed, fetch_installed_model_names, and two Tauri commands (get_model_picker_state, set_active_model). All pure helpers are unit-tested; the Tauri wrappers are thin delegations excluded from coverage. - Seed ActiveModelState at startup from the persisted value (or env bootstrap) and manage it alongside the existing ModelConfig, which is kept registered for Task 5 back-compat. - Rewire ask_ollama, search_pipeline, and generate_title to read the active slug from ActiveModelState. Mutex guards are scoped and dropped before any .await point so locks are never held across suspension. - set_active_model rejects uninstalled slugs with the exact error copy 'Model is not installed in Ollama: {model}'. Signed-off-by: Logan Nguyen --- src-tauri/src/commands.rs | 8 +- src-tauri/src/history.rs | 3 +- src-tauri/src/lib.rs | 25 +++ src-tauri/src/models.rs | 395 ++++++++++++++++++++++++++++++++++++ src-tauri/src/search/mod.rs | 17 +- 5 files changed, 442 insertions(+), 6 deletions(-) create mode 100644 src-tauri/src/models.rs diff --git a/src-tauri/src/commands.rs b/src-tauri/src/commands.rs index b5eb213f..66f5f98d 100644 --- a/src-tauri/src/commands.rs +++ b/src-tauri/src/commands.rs @@ -318,8 +318,14 @@ pub async fn ask_ollama( generation: State<'_, GenerationState>, history: State<'_, ConversationHistory>, config: State<'_, AppConfig>, + active_model: State<'_, crate::models::ActiveModelState>, ) -> Result<(), String> { let endpoint = format!("{}/api/chat", config.model.ollama_url.trim_end_matches('/')); + // Snapshot the active model slug; drop the guard before any `.await`. + let model_name = { + let guard = active_model.0.lock().map_err(|e| e.to_string())?; + guard.clone() + }; let cancel_token = CancellationToken::new(); generation.set_token(cancel_token.clone()); @@ -366,7 +372,7 @@ pub async fn ask_ollama( let accumulated = stream_ollama_chat( &endpoint, - config.model.active(), + &model_name, messages, think, &client, diff --git a/src-tauri/src/history.rs b/src-tauri/src/history.rs index df16e845..47d41f5c 100644 --- a/src-tauri/src/history.rs +++ b/src-tauri/src/history.rs @@ -244,6 +244,7 @@ pub fn delete_conversation( pub async fn generate_title( conversation_id: String, messages: Vec, + model: String, db: State<'_, Database>, client: State<'_, reqwest::Client>, app_config: State<'_, AppConfig>, @@ -289,7 +290,7 @@ pub async fn generate_title( let cancel_token = tokio_util::sync::CancellationToken::new(); let accumulated = crate::commands::stream_ollama_chat( &endpoint, - app_config.model.active(), + &model, title_messages, false, &client, diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 3a2a035e..1192625e 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -20,6 +20,7 @@ pub mod config; pub mod database; pub mod history; pub mod images; +pub mod models; pub mod onboarding; pub mod screenshot; pub mod search; @@ -720,6 +721,11 @@ pub fn run() { Ok(c) => c, Err(e) => crate::config::show_fatal_dialog_and_exit(&e), }; + // Snapshot the bootstrap active-model slug from TOML before + // moving `app_config` into managed state. The picker overlay + // refreshes the live installed list on first open and may + // replace this seed. + let bootstrap_active = app_config.model.active().to_string(); app.manage(app_config); // ── Generation + conversation state ───────────────────── @@ -733,6 +739,21 @@ pub fn run() { .expect("failed to resolve app data directory"); let db_conn = database::open_database(&app_data_dir) .expect("failed to initialise SQLite database"); + + // ── Active-model state: seed from SQLite app_config table ── + // The installed list isn't queried here (no async runtime yet); + // get_model_picker_state reconciles against the live + // `/api/tags` list on first open and may replace this seed. + let persisted_active = database::get_config(&db_conn, models::ACTIVE_MODEL_KEY) + .expect("failed to read active_model from app_config"); + let initial_active_model = models::resolve_active_model( + persisted_active.as_deref(), + &[], + &bootstrap_active, + ); + app.manage(models::ActiveModelState(std::sync::Mutex::new( + initial_active_model, + ))); app.manage(history::Database(std::sync::Mutex::new(db_conn))); // ── Orphaned image cleanup (startup + periodic) ───────── @@ -755,6 +776,10 @@ pub fn run() { #[cfg(not(coverage))] commands::get_config, #[cfg(not(coverage))] + models::get_model_picker_state, + #[cfg(not(coverage))] + models::set_active_model, + #[cfg(not(coverage))] history::save_conversation, #[cfg(not(coverage))] history::persist_message, diff --git a/src-tauri/src/models.rs b/src-tauri/src/models.rs new file mode 100644 index 00000000..2113b0f7 --- /dev/null +++ b/src-tauri/src/models.rs @@ -0,0 +1,395 @@ +/*! + * Active-model state module. + * + * Single source of truth for the locally-selected Ollama model. The "active" + * model is whichever slug the user last picked via the picker popup, + * persisted across launches in `app_config` under [`ACTIVE_MODEL_KEY`] and + * mirrored in [`ActiveModelState`] for fast reads from Tauri commands. + * + * The backend treats Ollama's `/api/tags` response as authoritative: a + * persisted model is only honored if it still appears in the live installed + * list. If not, we fall back to the first installed model, then to the + * bootstrap default from `THUKI_SUPPORTED_AI_MODELS`. + */ + +use std::sync::Mutex; + +use serde::Deserialize; + +use crate::config::defaults::DEFAULT_OLLAMA_URL; +use crate::database::{get_config, set_config}; +use crate::history::Database; + +/// `app_config` key used to persist the user's selected model slug. +pub const ACTIVE_MODEL_KEY: &str = "active_model"; + +/// In-memory cache of the currently active model slug. Written once at +/// startup (after `resolve_active_model`) and updated every time the user +/// picks a new model via `set_active_model`. +#[derive(Default)] +pub struct ActiveModelState(pub Mutex); + +/// Top-level shape of the Ollama `/api/tags` response. Only the `models` +/// array is consumed; all other fields are ignored. +#[derive(Deserialize)] +struct TagsResponse { + models: Vec, +} + +/// A single entry in the `/api/tags` `models` array. Only the `name` slug +/// is needed; everything else (size, digest, modified_at, details) is +/// deliberately ignored to keep the schema surface small. +#[derive(Deserialize)] +struct TagsModel { + name: String, +} + +/// Chooses which model slug should be active given a persisted preference, +/// the live installed list from Ollama, and an env-derived bootstrap value. +/// +/// Resolution rules, in order: +/// 1. If `persisted` is `Some` and still appears in `installed`, use it. +/// 2. Otherwise use the first entry in `installed`. +/// 3. Otherwise fall back to `bootstrap` (the compiled-in / env default). +pub fn resolve_active_model( + persisted: Option<&str>, + installed: &[String], + bootstrap: &str, +) -> String { + if let Some(p) = persisted { + if installed.iter().any(|m| m == p) { + return p.to_string(); + } + } + if let Some(first) = installed.first() { + return first.clone(); + } + bootstrap.to_string() +} + +/// Verifies that `model` is present in `installed`. Returns an `Err` with +/// the exact error copy the frontend surfaces when a user somehow requests +/// a slug that is not pulled locally. +pub fn validate_model_installed(model: &str, installed: &[String]) -> Result<(), String> { + if installed.iter().any(|m| m == model) { + Ok(()) + } else { + Err(format!("Model is not installed in Ollama: {model}")) + } +} + +/// GETs `{base_url}/api/tags` and returns the list of installed model slugs. +/// +/// Every failure mode (transport error, non-2xx status, JSON decode error) +/// is translated to `Err(String)` so the Tauri command layer can propagate +/// it verbatim to the frontend without panicking. +pub async fn fetch_installed_model_names( + client: &reqwest::Client, + base_url: &str, +) -> Result, String> { + let url = format!("{}/api/tags", base_url.trim_end_matches('/')); + let response = client + .get(&url) + .send() + .await + .map_err(|e| format!("failed to reach Ollama: {e}"))?; + + if !response.status().is_success() { + return Err(format!( + "Ollama /api/tags returned HTTP {}", + response.status().as_u16() + )); + } + + let body: TagsResponse = response + .json() + .await + .map_err(|e| format!("failed to decode /api/tags response: {e}"))?; + + Ok(body.models.into_iter().map(|m| m.name).collect()) +} + +/// Returns the currently active model and the full list of installed models, +/// persisting the resolved active model so future launches see it. +/// +/// Shape: `{ "active": "", "all": ["", ...] }`. +#[cfg_attr(coverage_nightly, coverage(off))] +#[cfg_attr(not(coverage), tauri::command)] +pub async fn get_model_picker_state( + client: tauri::State<'_, reqwest::Client>, + db: tauri::State<'_, Database>, + active_model: tauri::State<'_, ActiveModelState>, + app_config: tauri::State<'_, crate::config::AppConfig>, +) -> Result { + let installed = fetch_installed_model_names(&client, DEFAULT_OLLAMA_URL).await?; + + let persisted = { + let conn = db.0.lock().map_err(|e| e.to_string())?; + get_config(&conn, ACTIVE_MODEL_KEY).map_err(|e| e.to_string())? + }; + + let resolved = resolve_active_model(persisted.as_deref(), &installed, app_config.model.active()); + + { + let conn = db.0.lock().map_err(|e| e.to_string())?; + set_config(&conn, ACTIVE_MODEL_KEY, &resolved).map_err(|e| e.to_string())?; + } + + { + let mut guard = active_model.0.lock().map_err(|e| e.to_string())?; + *guard = resolved.clone(); + } + + Ok(serde_json::json!({ "active": resolved, "all": installed })) +} + +/// Persists `model` as the active model after validating that Ollama still +/// reports it as installed. Rejects uninstalled slugs with the exact error +/// copy `"Model is not installed in Ollama: {model}"`. +#[cfg_attr(coverage_nightly, coverage(off))] +#[cfg_attr(not(coverage), tauri::command)] +pub async fn set_active_model( + model: String, + client: tauri::State<'_, reqwest::Client>, + db: tauri::State<'_, Database>, + active_model: tauri::State<'_, ActiveModelState>, +) -> Result<(), String> { + let installed = fetch_installed_model_names(&client, DEFAULT_OLLAMA_URL).await?; + validate_model_installed(&model, &installed)?; + + { + let conn = db.0.lock().map_err(|e| e.to_string())?; + set_config(&conn, ACTIVE_MODEL_KEY, &model).map_err(|e| e.to_string())?; + } + + { + let mut guard = active_model.0.lock().map_err(|e| e.to_string())?; + *guard = model; + } + + Ok(()) +} + +// ─── Tests ────────────────────────────────────────────────────────────────── + +#[cfg(test)] +mod tests { + use super::*; + + // ── resolve_active_model ───────────────────────────────────────────────── + + #[test] + fn resolve_prefers_persisted_when_still_installed() { + let installed = vec!["gemma4:e2b".to_string(), "gemma4:e4b".to_string()]; + let result = resolve_active_model(Some("gemma4:e4b"), &installed, "gemma4:e2b"); + assert_eq!(result, "gemma4:e4b"); + } + + #[test] + fn resolve_falls_back_to_first_installed_when_persisted_missing() { + let installed = vec!["gemma4:e2b".to_string(), "gemma4:e4b".to_string()]; + let result = resolve_active_model(Some("llama3:8b"), &installed, "bootstrap-model"); + assert_eq!(result, "gemma4:e2b"); + } + + #[test] + fn resolve_falls_back_to_bootstrap_when_nothing_installed() { + let installed: Vec = vec![]; + let result = resolve_active_model(None, &installed, "bootstrap-model"); + assert_eq!(result, "bootstrap-model"); + } + + #[test] + fn resolve_with_no_persisted_uses_first_installed() { + let installed = vec!["gemma4:e2b".to_string()]; + let result = resolve_active_model(None, &installed, "bootstrap-model"); + assert_eq!(result, "gemma4:e2b"); + } + + #[test] + fn resolve_with_empty_persisted_bootstrap_used_when_installed_empty() { + let installed: Vec = vec![]; + // Persisted is present but installed list is empty: bootstrap wins + // because there's nothing to cross-check against. + let result = resolve_active_model(Some("gemma4:e2b"), &installed, "fallback"); + assert_eq!(result, "fallback"); + } + + // ── validate_model_installed ───────────────────────────────────────────── + + #[test] + fn validate_accepts_installed_model() { + let installed = vec!["gemma4:e2b".to_string(), "gemma4:e4b".to_string()]; + assert!(validate_model_installed("gemma4:e4b", &installed).is_ok()); + } + + #[test] + fn validate_rejects_uninstalled_model_with_exact_message() { + let installed = vec!["gemma4:e2b".to_string()]; + let err = validate_model_installed("llama3:8b", &installed).unwrap_err(); + assert_eq!(err, "Model is not installed in Ollama: llama3:8b"); + } + + #[test] + fn validate_rejects_when_installed_list_empty() { + let installed: Vec = vec![]; + let err = validate_model_installed("gemma4:e2b", &installed).unwrap_err(); + assert_eq!(err, "Model is not installed in Ollama: gemma4:e2b"); + } + + // ── fetch_installed_model_names ────────────────────────────────────────── + + #[tokio::test] + async fn fetch_parses_valid_tags_response() { + let mut server = mockito::Server::new_async().await; + let body = r#"{"models":[ + {"name":"gemma4:e2b"}, + {"name":"gemma4:e4b"} + ]}"#; + let mock = server + .mock("GET", "/api/tags") + .with_status(200) + .with_header("content-type", "application/json") + .with_body(body) + .create_async() + .await; + + let client = reqwest::Client::new(); + let result = fetch_installed_model_names(&client, &server.url()).await; + + mock.assert_async().await; + let names = result.unwrap(); + assert_eq!( + names, + vec!["gemma4:e2b".to_string(), "gemma4:e4b".to_string()] + ); + } + + #[tokio::test] + async fn fetch_returns_empty_when_no_models_installed() { + let mut server = mockito::Server::new_async().await; + let mock = server + .mock("GET", "/api/tags") + .with_status(200) + .with_header("content-type", "application/json") + .with_body(r#"{"models":[]}"#) + .create_async() + .await; + + let client = reqwest::Client::new(); + let result = fetch_installed_model_names(&client, &server.url()).await; + + mock.assert_async().await; + assert_eq!(result.unwrap(), Vec::::new()); + } + + #[tokio::test] + async fn fetch_maps_http_error_to_err_string() { + let mut server = mockito::Server::new_async().await; + let mock = server + .mock("GET", "/api/tags") + .with_status(500) + .with_body("server blew up") + .create_async() + .await; + + let client = reqwest::Client::new(); + let result = fetch_installed_model_names(&client, &server.url()).await; + + mock.assert_async().await; + let err = result.unwrap_err(); + assert!( + err.contains("500"), + "expected status code in error, got: {err}" + ); + } + + #[tokio::test] + async fn fetch_maps_invalid_json_to_err_string() { + let mut server = mockito::Server::new_async().await; + let mock = server + .mock("GET", "/api/tags") + .with_status(200) + .with_header("content-type", "application/json") + .with_body("not json at all") + .create_async() + .await; + + let client = reqwest::Client::new(); + let result = fetch_installed_model_names(&client, &server.url()).await; + + mock.assert_async().await; + let err = result.unwrap_err(); + assert!( + err.contains("failed to decode"), + "expected decode error, got: {err}" + ); + } + + #[tokio::test] + async fn fetch_maps_transport_error_to_err_string() { + // Port 1 is reserved and will refuse connections; tests the `send()` + // error branch without a live server. + let client = reqwest::Client::new(); + let result = fetch_installed_model_names(&client, "http://127.0.0.1:1").await; + + let err = result.unwrap_err(); + assert!( + err.contains("failed to reach Ollama"), + "expected transport error, got: {err}" + ); + } + + #[tokio::test] + async fn fetch_trims_trailing_slash_from_base_url() { + let mut server = mockito::Server::new_async().await; + let mock = server + .mock("GET", "/api/tags") + .with_status(200) + .with_header("content-type", "application/json") + .with_body(r#"{"models":[{"name":"x"}]}"#) + .create_async() + .await; + + let client = reqwest::Client::new(); + // Pass the URL with a trailing slash; the helper must strip it. + let url_with_slash = format!("{}/", server.url()); + let result = fetch_installed_model_names(&client, &url_with_slash).await; + + mock.assert_async().await; + assert_eq!(result.unwrap(), vec!["x".to_string()]); + } + + // ── ActiveModelState ───────────────────────────────────────────────────── + + #[test] + fn active_model_state_defaults_to_empty_string() { + let state = ActiveModelState::default(); + assert_eq!(*state.0.lock().unwrap(), ""); + } + + #[test] + fn active_model_state_round_trip_write_read() { + let state = ActiveModelState::default(); + { + let mut guard = state.0.lock().unwrap(); + *guard = "gemma4:e2b".to_string(); + } + assert_eq!(*state.0.lock().unwrap(), "gemma4:e2b"); + } + + // ── Persistence round-trip through app_config ─────────────────────────── + + #[test] + fn active_model_key_persists_via_set_and_get_config() { + let conn = crate::database::open_in_memory().unwrap(); + set_config(&conn, ACTIVE_MODEL_KEY, "gemma4:e4b").unwrap(); + let back = get_config(&conn, ACTIVE_MODEL_KEY).unwrap(); + assert_eq!(back.as_deref(), Some("gemma4:e4b")); + } + + #[test] + fn active_model_key_constant_matches_expected_value() { + assert_eq!(ACTIVE_MODEL_KEY, "active_model"); + } +} diff --git a/src-tauri/src/search/mod.rs b/src-tauri/src/search/mod.rs index 62ea789e..05c34e90 100644 --- a/src-tauri/src/search/mod.rs +++ b/src-tauri/src/search/mod.rs @@ -17,6 +17,7 @@ use tokio_util::sync::CancellationToken; use crate::commands::{ConversationHistory, GenerationState}; use crate::config::AppConfig; +use crate::models::ActiveModelState; pub mod chunker; pub mod config; @@ -59,6 +60,7 @@ pub async fn search_pipeline( generation: State<'_, GenerationState>, history: State<'_, ConversationHistory>, app_config: State<'_, AppConfig>, + active_model_state: State<'_, ActiveModelState>, ) -> Result<(), String> { // Resolve the runtime search view from the loaded TOML. The single // source of truth lives in `config::defaults`; the loader has already @@ -66,6 +68,14 @@ pub async fn search_pipeline( let runtime_config = config::SearchRuntimeConfig::from_app_config(&app_config); let searxng_endpoint = runtime_config.searxng_endpoint(); + // Snapshot the active model slug once from the picker-backed + // ActiveModelState; drop the guard before any `.await` so we never + // hold a `MutexGuard` across an await point. + let model_name = { + let guard = active_model_state.0.lock().map_err(|e| e.to_string())?; + guard.clone() + }; + // Pre-flight: verify both sandbox services are reachable before touching // the LLM or SearXNG. A 2-second probe prevents a long wait when the // containers are simply not running. @@ -84,7 +94,6 @@ pub async fn search_pipeline( "{}/api/chat", app_config.model.ollama_url.trim_end_matches('/') ); - let active_model = app_config.model.active().to_string(); let cancel_token = CancellationToken::new(); generation.set_token(cancel_token.clone()); @@ -92,7 +101,7 @@ pub async fn search_pipeline( let router = pipeline::DefaultRouterJudge::new( ollama_endpoint.clone(), - active_model.clone(), + model_name.clone(), (*client).clone(), cancel_token.clone(), today.clone(), @@ -100,7 +109,7 @@ pub async fn search_pipeline( ); let judge = pipeline::DefaultJudge::new( ollama_endpoint.clone(), - active_model.clone(), + model_name.clone(), (*client).clone(), cancel_token.clone(), runtime_config.judge_timeout_s, @@ -110,7 +119,7 @@ pub async fn search_pipeline( &ollama_endpoint, &searxng_endpoint, &runtime_config.reader_url, - &active_model, + &model_name, &client, cancel_token.clone(), &app_config.prompt.resolved_system, From ad41ac0591e0286af0406874035c0d75e5ddba8f Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Thu, 23 Apr 2026 14:47:53 -0700 Subject: [PATCH 03/42] hardening: add ollama tags fetch timeout and clarify startup seed Two small DoS/clarity fixes flagged in code review: 1. Per-request 5s timeout on `fetch_installed_model_names`. Without it, a hung Ollama socket (TCP accept but no HTTP response) would block `get_model_picker_state` and `set_active_model` indefinitely and wedge the UI at the IPC boundary. Factored the body into a private `_with_timeout` helper so the timeout branch is deterministically testable with a 100ms override against a black-hole TCP listener. 2. Inline comment at the startup `resolve_active_model` call in lib.rs, making the `&[]` installed-list argument's intent explicit: no async runtime here, so we fall through to bootstrap; the frontend's first `get_model_picker_state` reconciles against the live list. Signed-off-by: Logan Nguyen --- src-tauri/src/lib.rs | 5 ++++ src-tauri/src/models.rs | 52 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 1192625e..0cb7d87c 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -746,6 +746,11 @@ pub fn run() { // `/api/tags` list on first open and may replace this seed. let persisted_active = database::get_config(&db_conn, models::ACTIVE_MODEL_KEY) .expect("failed to read active_model from app_config"); + // The live installed-model list isn't available at startup (no async + // reqwest here). Passing `&[]` forces `resolve_active_model` to fall + // through to the bootstrap default; the first call to + // `get_model_picker_state` from the frontend reconciles against the + // real list and may replace this seed. let initial_active_model = models::resolve_active_model( persisted_active.as_deref(), &[], diff --git a/src-tauri/src/models.rs b/src-tauri/src/models.rs index 2113b0f7..472ad20f 100644 --- a/src-tauri/src/models.rs +++ b/src-tauri/src/models.rs @@ -78,6 +78,13 @@ pub fn validate_model_installed(model: &str, installed: &[String]) -> Result<(), } } +/// Per-request timeout for the Ollama `/api/tags` GET. Guards the IPC +/// boundary: if the daemon accepts the TCP connection but never responds +/// (hung socket, stuck process, network partition), `get_model_picker_state` +/// and `set_active_model` would otherwise block indefinitely and wedge the +/// UI. 5 seconds is generous for a localhost call. +const TAGS_REQUEST_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(5); + /// GETs `{base_url}/api/tags` and returns the list of installed model slugs. /// /// Every failure mode (transport error, non-2xx status, JSON decode error) @@ -86,10 +93,22 @@ pub fn validate_model_installed(model: &str, installed: &[String]) -> Result<(), pub async fn fetch_installed_model_names( client: &reqwest::Client, base_url: &str, +) -> Result, String> { + fetch_installed_model_names_with_timeout(client, base_url, TAGS_REQUEST_TIMEOUT).await +} + +/// Internal variant of [`fetch_installed_model_names`] with a configurable +/// per-request timeout. Exists so tests can exercise the timeout branch +/// deterministically without waiting the production 5s. +async fn fetch_installed_model_names_with_timeout( + client: &reqwest::Client, + base_url: &str, + timeout: std::time::Duration, ) -> Result, String> { let url = format!("{}/api/tags", base_url.trim_end_matches('/')); let response = client .get(&url) + .timeout(timeout) .send() .await .map_err(|e| format!("failed to reach Ollama: {e}"))?; @@ -340,6 +359,39 @@ mod tests { ); } + #[tokio::test] + async fn fetch_installed_model_names_times_out_when_ollama_hangs() { + // Bind a TCP listener that accepts connections but never writes a + // response. reqwest will complete the TCP handshake, send the GET, + // then block waiting for bytes that never arrive. The per-request + // timeout is the only thing that lets us recover. Use a 100ms + // override so the test stays fast and deterministic. + let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = listener.local_addr().unwrap(); + // Accept in a background thread but never read/write, so the socket + // stays open and idle until the test drops it. + std::thread::spawn(move || { + // Hold the accepted stream to keep the connection half-open. + let _held = listener.accept().ok(); + std::thread::sleep(std::time::Duration::from_secs(10)); + }); + + let client = reqwest::Client::new(); + let base = format!("http://{addr}"); + let result = fetch_installed_model_names_with_timeout( + &client, + &base, + std::time::Duration::from_millis(100), + ) + .await; + + let err = result.unwrap_err(); + assert!( + err.contains("failed to reach Ollama"), + "expected timeout to surface as transport error, got: {err}" + ); + } + #[tokio::test] async fn fetch_trims_trailing_slash_from_base_url() { let mut server = mockito::Server::new_async().await; From b2dddc9bfa4faa54cede43a5debd6681fc132c5f Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Thu, 23 Apr 2026 14:51:39 -0700 Subject: [PATCH 04/42] feat: add frontend model selection state Signed-off-by: Logan Nguyen --- .../__tests__/useConversationHistory.test.tsx | 1 + .../__tests__/useModelSelection.test.tsx | 118 ++++++++++++++++++ src/hooks/useConversationHistory.ts | 1 + src/hooks/useModelSelection.ts | 80 ++++++++++++ src/types/model.ts | 17 +++ 5 files changed, 217 insertions(+) create mode 100644 src/hooks/__tests__/useModelSelection.test.tsx create mode 100644 src/hooks/useModelSelection.ts create mode 100644 src/types/model.ts diff --git a/src/hooks/__tests__/useConversationHistory.test.tsx b/src/hooks/__tests__/useConversationHistory.test.tsx index 0403cab9..e3ddab9c 100644 --- a/src/hooks/__tests__/useConversationHistory.test.tsx +++ b/src/hooks/__tests__/useConversationHistory.test.tsx @@ -109,6 +109,7 @@ describe('useConversationHistory', () => { search_metadata: null, }, ], + model: MODEL, }); }); diff --git a/src/hooks/__tests__/useModelSelection.test.tsx b/src/hooks/__tests__/useModelSelection.test.tsx new file mode 100644 index 00000000..4659fb1d --- /dev/null +++ b/src/hooks/__tests__/useModelSelection.test.tsx @@ -0,0 +1,118 @@ +import { renderHook, act } from '@testing-library/react'; +import { describe, it, expect, beforeEach } from 'vitest'; +import { useModelSelection } from '../useModelSelection'; +import { invoke } from '../../testUtils/mocks/tauri'; + +describe('useModelSelection', () => { + beforeEach(() => { + invoke.mockReset(); + }); + + it('loads active and installed models from the backend', async () => { + invoke.mockResolvedValueOnce({ + active: 'gemma4:e2b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }); + + const { result } = renderHook(() => useModelSelection()); + await act(async () => {}); + + expect(result.current.activeModel).toBe('gemma4:e2b'); + expect(result.current.availableModels).toEqual([ + 'gemma4:e2b', + 'qwen2.5:7b', + ]); + }); + + it('persists a new active model and updates local state', async () => { + invoke + .mockResolvedValueOnce({ + active: 'gemma4:e2b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }) + .mockResolvedValueOnce(undefined); + + const { result } = renderHook(() => useModelSelection()); + await act(async () => {}); + + await act(async () => { + await result.current.setActiveModel('qwen2.5:7b'); + }); + + expect(invoke).toHaveBeenCalledWith('set_active_model', { + model: 'qwen2.5:7b', + }); + expect(result.current.activeModel).toBe('qwen2.5:7b'); + }); + + it('clears available models when backend fetch fails', async () => { + invoke.mockRejectedValueOnce(new Error('backend offline')); + + const { result } = renderHook(() => useModelSelection()); + await act(async () => {}); + + expect(result.current.availableModels).toEqual([]); + expect(result.current.activeModel).toBe(''); + }); + + it('falls back to empty state when payload shape is invalid', async () => { + invoke.mockResolvedValueOnce({ active: 42, all: 'not-an-array' }); + + const { result } = renderHook(() => useModelSelection()); + await act(async () => {}); + + expect(result.current.availableModels).toEqual([]); + expect(result.current.activeModel).toBe(''); + }); + + it('re-fetches models when refreshModels is called', async () => { + invoke + .mockResolvedValueOnce({ active: 'gemma4:e2b', all: ['gemma4:e2b'] }) + .mockResolvedValueOnce({ + active: 'qwen2.5:7b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }); + + const { result } = renderHook(() => useModelSelection()); + await act(async () => {}); + + await act(async () => { + await result.current.refreshModels(); + }); + + expect(result.current.activeModel).toBe('qwen2.5:7b'); + expect(result.current.availableModels).toEqual([ + 'gemma4:e2b', + 'qwen2.5:7b', + ]); + }); + + it('rejects null payloads from the backend', async () => { + invoke.mockResolvedValueOnce(null); + + const { result } = renderHook(() => useModelSelection()); + await act(async () => {}); + + expect(result.current.availableModels).toEqual([]); + expect(result.current.activeModel).toBe(''); + }); + + it('rejects non-object payloads from the backend', async () => { + invoke.mockResolvedValueOnce('gemma4:e2b'); + + const { result } = renderHook(() => useModelSelection()); + await act(async () => {}); + + expect(result.current.availableModels).toEqual([]); + expect(result.current.activeModel).toBe(''); + }); + + it('rejects payloads whose `all` array contains non-string entries', async () => { + invoke.mockResolvedValueOnce({ active: 'gemma4:e2b', all: ['ok', 7] }); + + const { result } = renderHook(() => useModelSelection()); + await act(async () => {}); + + expect(result.current.availableModels).toEqual([]); + }); +}); diff --git a/src/hooks/useConversationHistory.ts b/src/hooks/useConversationHistory.ts index 0e1ac8a4..c1b53b15 100644 --- a/src/hooks/useConversationHistory.ts +++ b/src/hooks/useConversationHistory.ts @@ -296,6 +296,7 @@ export function useConversationHistory() { void invoke('generate_title', { conversationId: response.conversation_id, messages: payloads, + model, }); }, [isSaved], diff --git a/src/hooks/useModelSelection.ts b/src/hooks/useModelSelection.ts new file mode 100644 index 00000000..20aad044 --- /dev/null +++ b/src/hooks/useModelSelection.ts @@ -0,0 +1,80 @@ +import { useCallback, useEffect, useState } from 'react'; +import { invoke } from '@tauri-apps/api/core'; +import type { ModelPickerState } from '../types/model'; + +/** + * Runtime guard for the IPC boundary. The Rust backend is trusted, but this + * keeps the hook robust against shape drift (schema changes, legacy builds, + * mocks) without pulling in a schema library. + */ +function isModelPickerState(value: unknown): value is ModelPickerState { + if (typeof value !== 'object' || value === null) return false; + const candidate = value as { active?: unknown; all?: unknown }; + return ( + typeof candidate.active === 'string' && + Array.isArray(candidate.all) && + candidate.all.every((entry) => typeof entry === 'string') + ); +} + +/** + * Shape returned by {@link useModelSelection}. + */ +export interface UseModelSelectionResult { + /** The currently active Ollama model name. Empty string until loaded. */ + activeModel: string; + /** All locally installed Ollama model names available for selection. */ + availableModels: string[]; + /** + * Re-fetch the model picker state from the backend. On failure the + * available models list is cleared to avoid showing stale entries. + */ + refreshModels: () => Promise; + /** + * Persist a new active model through the backend and sync local state + * after the backend acknowledges the change. + */ + setActiveModel: (model: string) => Promise; +} + +/** + * React hook that manages the active Ollama model selection. Loads the + * current model + the installed model list from the Rust backend on mount, + * and exposes imperative helpers for refresh and selection. + * + * Callers are expected to invoke `refreshModels` when they need to pick up + * external state changes (e.g. after a model install completes). The hook + * does not poll or auto-refresh. + */ +export function useModelSelection(): UseModelSelectionResult { + // The state setter is intentionally renamed because `setActiveModel` is the + // public async callback returned by this hook. + // eslint-disable-next-line @eslint-react/use-state + const [activeModel, setActiveModelState] = useState(''); + const [availableModels, setAvailableModels] = useState([]); + + const refreshModels = useCallback(async (): Promise => { + try { + const state = await invoke('get_model_picker_state'); + if (!isModelPickerState(state)) { + setAvailableModels([]); + return; + } + setActiveModelState(state.active); + setAvailableModels(state.all); + } catch { + setAvailableModels([]); + } + }, []); + + useEffect(() => { + void refreshModels(); + }, [refreshModels]); + + const setActiveModel = useCallback(async (model: string): Promise => { + await invoke('set_active_model', { model }); + setActiveModelState(model); + }, []); + + return { activeModel, availableModels, refreshModels, setActiveModel }; +} diff --git a/src/types/model.ts b/src/types/model.ts new file mode 100644 index 00000000..e4d5e04a --- /dev/null +++ b/src/types/model.ts @@ -0,0 +1,17 @@ +/* v8 ignore file -- type-only declarations, no runtime code */ + +/** + * Snapshot of model picker state returned by the Rust + * `get_model_picker_state` Tauri command. + * + * - `active` is the currently selected Ollama model name. Never empty once + * the backend has completed startup seeding. + * - `all` is the full list of locally installed Ollama model names, in the + * order the backend chose to surface them (typically matches `ollama list`). + */ +export interface ModelPickerState { + /** The currently active Ollama model name. */ + active: string; + /** All locally installed Ollama model names available for selection. */ + all: string[]; +} From b1d48d0007c250c4b1aad88b8da5e989bef073f1 Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Thu, 23 Apr 2026 14:57:01 -0700 Subject: [PATCH 05/42] test: cover setActiveModel rejection and tighten refresh fallback Address Task 3 code-review feedback. - refreshModels now clears both activeModel and availableModels when the backend rejects or returns a malformed payload, so a stale active slug cannot linger alongside an empty list. - Add a test for setActiveModel rejection to confirm errors propagate to the caller and the previously active model stays selected. - Add a test proving a second, malformed refresh clears a previously set active model. - Update JSDoc on refreshModels to spell out the clearing semantics and the single-trigger contract. Signed-off-by: Logan Nguyen --- .../__tests__/useModelSelection.test.tsx | 43 +++++++++++++++++++ src/hooks/useModelSelection.ts | 10 +++-- 2 files changed, 50 insertions(+), 3 deletions(-) diff --git a/src/hooks/__tests__/useModelSelection.test.tsx b/src/hooks/__tests__/useModelSelection.test.tsx index 4659fb1d..825237da 100644 --- a/src/hooks/__tests__/useModelSelection.test.tsx +++ b/src/hooks/__tests__/useModelSelection.test.tsx @@ -114,5 +114,48 @@ describe('useModelSelection', () => { await act(async () => {}); expect(result.current.availableModels).toEqual([]); + expect(result.current.activeModel).toBe(''); + }); + + it('surfaces backend errors and leaves active model unchanged on rejection', async () => { + invoke + .mockResolvedValueOnce({ + active: 'gemma4:e2b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }) + .mockRejectedValueOnce( + new Error('Model is not installed in Ollama: mystery'), + ); + + const { result } = renderHook(() => useModelSelection()); + await act(async () => {}); + + await expect( + act(async () => { + await result.current.setActiveModel('mystery'); + }), + ).rejects.toThrow('Model is not installed in Ollama: mystery'); + + expect(result.current.activeModel).toBe('gemma4:e2b'); + }); + + it('clears active model when a later refresh returns a malformed payload', async () => { + invoke + .mockResolvedValueOnce({ + active: 'gemma4:e2b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }) + .mockResolvedValueOnce({ active: 42, all: 'not-an-array' }); + + const { result } = renderHook(() => useModelSelection()); + await act(async () => {}); + expect(result.current.activeModel).toBe('gemma4:e2b'); + + await act(async () => { + await result.current.refreshModels(); + }); + + expect(result.current.activeModel).toBe(''); + expect(result.current.availableModels).toEqual([]); }); }); diff --git a/src/hooks/useModelSelection.ts b/src/hooks/useModelSelection.ts index 20aad044..9f0378b2 100644 --- a/src/hooks/useModelSelection.ts +++ b/src/hooks/useModelSelection.ts @@ -26,8 +26,10 @@ export interface UseModelSelectionResult { /** All locally installed Ollama model names available for selection. */ availableModels: string[]; /** - * Re-fetch the model picker state from the backend. On failure the - * available models list is cleared to avoid showing stale entries. + * Re-fetch the model picker state from the backend. Clears both + * `activeModel` and `availableModels` when the backend returns a malformed + * payload or the call rejects. Callers are the single trigger: this hook + * does not auto-retry. */ refreshModels: () => Promise; /** @@ -55,14 +57,16 @@ export function useModelSelection(): UseModelSelectionResult { const refreshModels = useCallback(async (): Promise => { try { - const state = await invoke('get_model_picker_state'); + const state = await invoke('get_model_picker_state'); if (!isModelPickerState(state)) { + setActiveModelState(''); setAvailableModels([]); return; } setActiveModelState(state.active); setAvailableModels(state.all); } catch { + setActiveModelState(''); setAvailableModels([]); } }, []); From 936b9a38c1a233ff53a6190287ed748c3bcc5cb5 Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Thu, 23 Apr 2026 15:00:51 -0700 Subject: [PATCH 06/42] feat: add ask bar model picker Signed-off-by: Logan Nguyen --- src/components/ModelPicker.tsx | 120 ++++++++++++++++ src/components/__tests__/ModelPicker.test.tsx | 128 ++++++++++++++++++ src/view/AskBarView.tsx | 19 +++ src/view/__tests__/AskBarView.test.tsx | 22 +++ 4 files changed, 289 insertions(+) create mode 100644 src/components/ModelPicker.tsx create mode 100644 src/components/__tests__/ModelPicker.test.tsx diff --git a/src/components/ModelPicker.tsx b/src/components/ModelPicker.tsx new file mode 100644 index 00000000..3d1722d3 --- /dev/null +++ b/src/components/ModelPicker.tsx @@ -0,0 +1,120 @@ +import { AnimatePresence, motion } from 'framer-motion'; +import { useEffect, useRef, useState } from 'react'; + +/** + * Hoisted static SVG - chip-style trigger icon for the model picker. + * @see Vercel React Best Practices - Hoist Static JSX Elements + */ +const CHIP_ICON = ( + +); + +/** Props for the ModelPicker component. */ +export interface ModelPickerProps { + /** Currently active model slug; highlighted in the popup. */ + activeModel: string; + /** Full list of available model slugs from Ollama's tags endpoint. */ + models: string[]; + /** When true the trigger is inert (e.g. during generation). */ + disabled: boolean; + /** Called with the chosen slug when the user picks a row. */ + onSelect: (model: string) => void; +} + +/** + * Right-side chip trigger that opens a slug-only popup anchored above + * the ask bar's send button. The popup closes on outside click. + * + * Rendered inline inside AskBarView's bottom row: `absolute right-0 bottom-10` + * keeps it within the ask bar's relative container so no portal is needed. + */ +export function ModelPicker({ + activeModel, + models, + disabled, + onSelect, +}: ModelPickerProps) { + const [isOpen, setIsOpen] = useState(false); + const rootRef = useRef(null); + + useEffect(() => { + if (!isOpen) return; + const handleMouseDown = (event: MouseEvent) => { + if (!rootRef.current?.contains(event.target as Node)) { + setIsOpen(false); + } + }; + document.addEventListener('mousedown', handleMouseDown); + return () => document.removeEventListener('mousedown', handleMouseDown); + }, [isOpen]); + + if (models.length === 0) return null; + + return ( +
+ + + + {isOpen && ( + + {models.map((model) => ( + + ))} + + )} + +
+ ); +} diff --git a/src/components/__tests__/ModelPicker.test.tsx b/src/components/__tests__/ModelPicker.test.tsx new file mode 100644 index 00000000..ef21929c --- /dev/null +++ b/src/components/__tests__/ModelPicker.test.tsx @@ -0,0 +1,128 @@ +import { render, screen, fireEvent } from '@testing-library/react'; +import { describe, it, expect, vi } from 'vitest'; +import { ModelPicker } from '../ModelPicker'; + +describe('ModelPicker', () => { + it('opens a slug-only popup and highlights the active row', () => { + render( + , + ); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + + expect(screen.getByRole('button', { name: 'gemma4:e2b' })).toHaveClass( + 'bg-primary/10', + ); + expect( + screen.getByRole('button', { name: 'qwen2.5:7b' }), + ).toBeInTheDocument(); + expect(screen.queryByText(/fast|vision|recent/i)).toBeNull(); + }); + + it('calls onSelect and closes after choosing a new model', () => { + const onSelect = vi.fn(); + render( + , + ); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + fireEvent.click(screen.getByRole('button', { name: 'qwen2.5:7b' })); + + expect(onSelect).toHaveBeenCalledWith('qwen2.5:7b'); + expect(screen.queryByRole('button', { name: 'gemma4:e2b' })).toBeNull(); + }); + + it('returns null when models list is empty', () => { + const { container } = render( + , + ); + expect(container.firstChild).toBeNull(); + }); + + it('closes when clicking outside', () => { + render( + , + ); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect( + screen.getByRole('button', { name: 'qwen2.5:7b' }), + ).toBeInTheDocument(); + + fireEvent.mouseDown(document.body); + + expect(screen.queryByRole('button', { name: 'qwen2.5:7b' })).toBeNull(); + }); + + it('toggles closed when the trigger is clicked twice', () => { + render( + , + ); + + const trigger = screen.getByRole('button', { name: 'Choose model' }); + fireEvent.click(trigger); + expect( + screen.getByRole('button', { name: 'qwen2.5:7b' }), + ).toBeInTheDocument(); + + fireEvent.click(trigger); + expect(screen.queryByRole('button', { name: 'qwen2.5:7b' })).toBeNull(); + }); + + it('ignores clicks when disabled', () => { + render( + , + ); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect(screen.queryByRole('button', { name: 'qwen2.5:7b' })).toBeNull(); + }); + + it('keeps mousedown inside the picker from closing the popup', () => { + render( + , + ); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + const row = screen.getByRole('button', { name: 'qwen2.5:7b' }); + fireEvent.mouseDown(row); + expect( + screen.getByRole('button', { name: 'qwen2.5:7b' }), + ).toBeInTheDocument(); + }); +}); diff --git a/src/view/AskBarView.tsx b/src/view/AskBarView.tsx index e4bd6ba1..e8a6c010 100644 --- a/src/view/AskBarView.tsx +++ b/src/view/AskBarView.tsx @@ -5,6 +5,7 @@ import { formatQuotedText } from '../utils/formatQuote'; import { useConfig } from '../contexts/ConfigContext'; import { ImageThumbnails } from '../components/ImageThumbnails'; import { CommandSuggestion } from '../components/CommandSuggestion'; +import { ModelPicker } from '../components/ModelPicker'; import { Tooltip } from '../components/Tooltip'; import type { AttachedImage } from '../types/image'; import { MAX_IMAGE_SIZE_BYTES } from '../types/image'; @@ -236,6 +237,12 @@ interface AskBarViewProps { * "normal" = violet ring; "max" = red ring + label; undefined = no ring. */ isDragOver?: 'normal' | 'max'; + /** Currently active Ollama model slug. Enables the model picker when set. */ + activeModel?: string; + /** Full list of model slugs available for selection in the picker. */ + availableModels?: string[]; + /** Called when the user picks a new active model from the picker. */ + onModelSelect?: (model: string) => void; } /** @@ -261,6 +268,9 @@ export function AskBarView({ onImagePreview, onScreenshot, isDragOver, + activeModel, + availableModels, + onModelSelect, }: AskBarViewProps) { /** Ref to the mirror div behind the textarea for command highlighting. */ const mirrorRef = useRef(null); @@ -658,6 +668,15 @@ export function AskBarView({ )} + {activeModel && availableModels && onModelSelect && ( + + )} + { ).toBeInTheDocument(); }); + it('calls onModelSelect when a model row is chosen', () => { + const onModelSelect = vi.fn(); + render( + , + ); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + fireEvent.click(screen.getByRole('button', { name: 'qwen2.5:7b' })); + expect(onModelSelect).toHaveBeenCalledWith('qwen2.5:7b'); + }); + it('displays selectedText when provided', () => { render( Date: Thu, 23 Apr 2026 15:27:16 -0700 Subject: [PATCH 07/42] fix: close model picker popup when disabled and mark active row Signed-off-by: Logan Nguyen --- src/components/ModelPicker.tsx | 11 +++-- src/components/__tests__/ModelPicker.test.tsx | 40 +++++++++++++++++++ 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/src/components/ModelPicker.tsx b/src/components/ModelPicker.tsx index 3d1722d3..f0f80978 100644 --- a/src/components/ModelPicker.tsx +++ b/src/components/ModelPicker.tsx @@ -60,8 +60,12 @@ export function ModelPicker({ const [isOpen, setIsOpen] = useState(false); const rootRef = useRef(null); + // Derived so a `disabled` flip (e.g. generation starts while popup is open) + // hides the popup immediately without needing a state-syncing effect. + const showPopup = isOpen && !disabled; + useEffect(() => { - if (!isOpen) return; + if (!showPopup) return; const handleMouseDown = (event: MouseEvent) => { if (!rootRef.current?.contains(event.target as Node)) { setIsOpen(false); @@ -69,7 +73,7 @@ export function ModelPicker({ }; document.addEventListener('mousedown', handleMouseDown); return () => document.removeEventListener('mousedown', handleMouseDown); - }, [isOpen]); + }, [showPopup]); if (models.length === 0) return null; @@ -86,7 +90,7 @@ export function ModelPicker({ - {isOpen && ( + {showPopup && ( { onSelect(model); setIsOpen(false); diff --git a/src/components/__tests__/ModelPicker.test.tsx b/src/components/__tests__/ModelPicker.test.tsx index ef21929c..25cced44 100644 --- a/src/components/__tests__/ModelPicker.test.tsx +++ b/src/components/__tests__/ModelPicker.test.tsx @@ -125,4 +125,44 @@ describe('ModelPicker', () => { screen.getByRole('button', { name: 'qwen2.5:7b' }), ).toBeInTheDocument(); }); + + it('closes an open popup when disabled flips true', () => { + const { rerender } = render( + , + ); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect(screen.getByRole('button', { name: 'qwen2.5:7b' })).toBeInTheDocument(); + + rerender( + , + ); + + expect(screen.queryByRole('button', { name: 'qwen2.5:7b' })).toBeNull(); + }); + + it('marks the active row with aria-current', () => { + render( + , + ); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect(screen.getByRole('button', { name: 'gemma4:e2b' })).toHaveAttribute('aria-current', 'true'); + expect(screen.getByRole('button', { name: 'qwen2.5:7b' })).not.toHaveAttribute('aria-current'); + }); }); From 204971dcfb9e1b9f726f8f991b9f8246f6d00298 Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Thu, 23 Apr 2026 15:31:41 -0700 Subject: [PATCH 08/42] feat: wire model picker through app state Signed-off-by: Logan Nguyen --- src/App.tsx | 27 ++++++++++++---- src/__tests__/App.test.tsx | 65 ++++++++++++++++++++++++++++++++++---- 2 files changed, 78 insertions(+), 14 deletions(-) diff --git a/src/App.tsx b/src/App.tsx index 3932450f..4cd0ce1f 100644 --- a/src/App.tsx +++ b/src/App.tsx @@ -14,6 +14,7 @@ import { LogicalSize } from '@tauri-apps/api/dpi'; import { useOllama } from './hooks/useOllama'; import type { Message } from './hooks/useOllama'; import { useConversationHistory } from './hooks/useConversationHistory'; +import { useModelSelection } from './hooks/useModelSelection'; import { ConversationView } from './view/ConversationView'; import { AskBarView, MAX_IMAGES } from './view/AskBarView'; import { OnboardingView } from './view/onboarding/index'; @@ -30,6 +31,10 @@ import { } from './config/commands'; import './App.css'; +/** Fallback model name used before get_model_picker_state resolves at startup. */ +const DEFAULT_MODEL_FALLBACK = 'gemma4:e2b'; + + const OVERLAY_VISIBILITY_EVENT = 'thuki://visibility'; const ONBOARDING_EVENT = 'thuki://onboarding'; @@ -230,6 +235,8 @@ function App() { const [selectedContext, setSelectedContext] = useState(null); const config = useConfig(); const quote = config.quote; + const { activeModel, availableModels, refreshModels, setActiveModel } = + useModelSelection(); /** * True when the window is near the screen bottom and should grow upward. @@ -402,11 +409,12 @@ function App() { setCaptureError(null); setSearchActive(false); + void refreshModels(); reset(); resetHistory(); setOverlayState('visible'); }, - [reset, resetHistory], + [reset, resetHistory, refreshModels], ); /** @@ -575,12 +583,12 @@ function App() { if (isSaved) { await unsave(); } else { - await save(messages); + await save(messages, activeModel || DEFAULT_MODEL_FALLBACK); } } catch { // State stays unchanged on failure; feedback is implicit in the icon. } - }, [isSaved, unsave, save, messages]); + }, [isSaved, unsave, save, messages, activeModel]); /** * Loads a conversation from history, replacing the current session. @@ -616,7 +624,7 @@ function App() { const handleSaveAndLoad = useCallback( async (id: string) => { try { - await save(messages); + await save(messages, activeModel || DEFAULT_MODEL_FALLBACK); } catch { // Save failed - abort to avoid leaving the current session unprotected. return; @@ -631,7 +639,7 @@ function App() { setIsHistoryOpen(false); } }, - [save, messages, loadConversation, loadMessages], + [save, messages, loadConversation, loadMessages, activeModel], ); /** @@ -690,12 +698,12 @@ function App() { /** Saves the current conversation then starts a fresh one. */ const handleSaveAndNew = useCallback(async () => { try { - await save(messages); + await save(messages, activeModel || DEFAULT_MODEL_FALLBACK); } catch { return; } resetForNewConversation(); - }, [save, messages, resetForNewConversation]); + }, [save, messages, resetForNewConversation, activeModel]); /** Discards the current conversation and starts a fresh one. */ const handleJustNew = useCallback(() => { @@ -1559,6 +1567,11 @@ function App() { onImagePreview={handleAskBarImagePreview} onScreenshot={handleScreenshot} isDragOver={isDragOver ?? undefined} + activeModel={activeModel} + availableModels={availableModels} + onModelSelect={(model) => { + void setActiveModel(model); + }} /> diff --git a/src/__tests__/App.test.tsx b/src/__tests__/App.test.tsx index 091fce11..bad7ce6f 100644 --- a/src/__tests__/App.test.tsx +++ b/src/__tests__/App.test.tsx @@ -28,13 +28,6 @@ describe('App', () => { enableChannelCapture(); }); - it('calls get_model_config on mount', async () => { - render(); - await act(async () => {}); - - expect(invoke).toHaveBeenCalledWith('get_model_config'); - }); - it('fetches model picker state on mount and refreshes it when the overlay shows', async () => { invoke.mockReset(); enableChannelCaptureWithResponses({ @@ -56,6 +49,60 @@ describe('App', () => { expect(invoke).toHaveBeenCalledWith('get_model_picker_state'); }); + it('renders the model picker when the overlay is visible and models load', async () => { + enableChannelCaptureWithResponses({ + get_model_picker_state: { + active: 'gemma4:e2b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }, + }); + + render(); + await act(async () => {}); + await showOverlay(); + + expect( + screen.getByRole('button', { name: 'Choose model' }), + ).toBeInTheDocument(); + }); + + it('saves the conversation with the currently selected model', async () => { + enableChannelCaptureWithResponses({ + get_model_picker_state: { + active: 'gemma4:e2b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }, + save_conversation: { conversation_id: 'conv-1' }, + generate_title: undefined, + set_active_model: undefined, + }); + + render(); + await act(async () => {}); + await showOverlay(); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + await act(async () => { + fireEvent.click(screen.getByRole('button', { name: 'qwen2.5:7b' })); + }); + + const textarea = screen.getByPlaceholderText('Ask Thuki anything...'); + fireEvent.change(textarea, { target: { value: 'hello there' } }); + fireEvent.keyDown(textarea, { key: 'Enter', shiftKey: false }); + await act(async () => {}); + + act(() => { + getLastChannel()?.simulateMessage({ type: 'Token', data: 'Hi there!' }); + getLastChannel()?.simulateMessage({ type: 'Done' }); + }); + + fireEvent.click(screen.getByLabelText('Save conversation')); + + expect(invoke).toHaveBeenCalledWith( + 'save_conversation', + expect.objectContaining({ model: 'qwen2.5:7b' }), + ); + }); it('grows upward when near bottom screen edge', async () => { const { container } = render(); @@ -687,6 +734,10 @@ describe('App', () => { it('closes history panel when a conversation is loaded', async () => { enableChannelCaptureWithResponses({ + get_model_picker_state: { + active: 'gemma4:e2b', + all: ['gemma4:e2b'], + }, list_conversations: [], }); From 1f72e86ba4756c7e9659d6e923c6901c073608d1 Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Thu, 23 Apr 2026 15:37:40 -0700 Subject: [PATCH 09/42] refactor: stabilize model select handler and swallow rejection Wraps the onModelSelect callback in useCallback so the prop identity is stable across renders, and attaches a noop .catch to suppress unhandled rejection warnings when the backend rejects an uninstalled-slug race. Also drops a one-line comment at the first save() call site explaining the activeModel empty-string fallback window. Signed-off-by: Logan Nguyen --- src/App.tsx | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/src/App.tsx b/src/App.tsx index 4cd0ce1f..ebd784ef 100644 --- a/src/App.tsx +++ b/src/App.tsx @@ -583,6 +583,8 @@ function App() { if (isSaved) { await unsave(); } else { + // activeModel is empty string until the model picker hook resolves on first + // load; fall back to the bootstrap default during that brief window. await save(messages, activeModel || DEFAULT_MODEL_FALLBACK); } } catch { @@ -1283,6 +1285,24 @@ function App() { requestAnimationFrame(() => inputRef.current?.focus()); }, [isSubmitPending, cancel, setSearchActive, setSelectedContext]); + /** + * Persists the user's model choice via the backend. Silently no-ops on + * rejection: the only reject path is a race where the chosen model was + * uninstalled between the picker render and the click. The next + * `refreshModels` (fired on overlay show) will reconcile the list. + */ + const handleModelSelect = useCallback( + (model: string) => { + void setActiveModel(model).catch( + /* v8 ignore next 3 -- rejection requires a mid-render uninstall race that cannot be triggered in jsdom */ + () => { + // Intentional swallow: see docblock above. + }, + ); + }, + [setActiveModel], + ); + /** * Synchronizes the React animation state with Tauri-driven overlay visibility * requests emitted from the Rust backend. @@ -1569,9 +1589,7 @@ function App() { isDragOver={isDragOver ?? undefined} activeModel={activeModel} availableModels={availableModels} - onModelSelect={(model) => { - void setActiveModel(model); - }} + onModelSelect={handleModelSelect} /> From 013ad218bd4eb2e5d68258a44cce496bdd401dc2 Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Thu, 23 Apr 2026 15:41:04 -0700 Subject: [PATCH 10/42] docs: document local model picker and reformat tests Update README and configurations.md to reflect the in-app model picker that now lists live Ollama installs and persists user selection. The `THUKI_SUPPORTED_AI_MODELS` env var is now a bootstrap/fallback list only. Remove the "in-app model switching" bullet from Future Work since it has shipped. Reformat two picker test files to satisfy prettier. Signed-off-by: Logan Nguyen --- README.md | 3 +-- src/components/__tests__/ModelPicker.test.tsx | 13 ++++++++++--- src/view/__tests__/AskBarView.test.tsx | 2 +- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index b66dd707..4dddb116 100644 --- a/README.md +++ b/README.md @@ -79,7 +79,7 @@ Most AI tools require accounts, API keys, or subscriptions that bill you per tok ### Step 1: Set Up Your AI Engine -> **Default model:** Thuki ships with [`gemma4:e2b`](https://ollama.com/library/gemma4) by default, an effective 2B parameter edge model from Google. It runs comfortably on most modern Macs with 8 GB of RAM and delivers strong performance on reasoning, coding, and vision tasks. To use a different model, edit `~/Library/Application Support/com.quietnode.thuki/config.toml` and reorder the `[model] available` list so your preferred model is first. See [Configurations](docs/configurations.md) for the full schema. +> **Default model:** Thuki ships with [`gemma4:e2b`](https://ollama.com/library/gemma4) by default, an effective 2B parameter edge model from Google. It runs comfortably on most modern Macs with 8 GB of RAM and delivers strong performance on reasoning, coding, and vision tasks. The ask-bar model picker lists the models currently installed in your local Ollama and lets you switch the active model without leaving the overlay. To change the bootstrap default itself, edit `~/Library/Application Support/com.quietnode.thuki/config.toml` and reorder the `[model] available` list so your preferred model is first. See [Configurations](docs/configurations.md) for the full schema. Choose one of the two options below to set up your AI engine before installing Thuki. @@ -256,7 +256,6 @@ The big leap: from answering questions to taking action. More flexibility over the model powering Thuki. - **Native settings panel (⌘,):** a proper macOS preferences window to configure your model, Ollama endpoint, activation shortcut, slash commands, and system prompt. No config files needed. -- **In-app model switching:** swap between any Ollama model from the UI without restarting (the backend already supports multiple models via the `[model] available` list in `config.toml`; the picker UI is next) - **Multiple provider support:** opt in to OpenAI, Anthropic, or any OpenAI-compatible endpoint as an alternative to local Ollama - **Custom activation shortcut:** change the double-tap trigger to any key or combo you prefer diff --git a/src/components/__tests__/ModelPicker.test.tsx b/src/components/__tests__/ModelPicker.test.tsx index 25cced44..8b7036a1 100644 --- a/src/components/__tests__/ModelPicker.test.tsx +++ b/src/components/__tests__/ModelPicker.test.tsx @@ -137,7 +137,9 @@ describe('ModelPicker', () => { ); fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - expect(screen.getByRole('button', { name: 'qwen2.5:7b' })).toBeInTheDocument(); + expect( + screen.getByRole('button', { name: 'qwen2.5:7b' }), + ).toBeInTheDocument(); rerender( { ); fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - expect(screen.getByRole('button', { name: 'gemma4:e2b' })).toHaveAttribute('aria-current', 'true'); - expect(screen.getByRole('button', { name: 'qwen2.5:7b' })).not.toHaveAttribute('aria-current'); + expect(screen.getByRole('button', { name: 'gemma4:e2b' })).toHaveAttribute( + 'aria-current', + 'true', + ); + expect( + screen.getByRole('button', { name: 'qwen2.5:7b' }), + ).not.toHaveAttribute('aria-current'); }); }); diff --git a/src/view/__tests__/AskBarView.test.tsx b/src/view/__tests__/AskBarView.test.tsx index 3a77d107..e6fe5ccc 100644 --- a/src/view/__tests__/AskBarView.test.tsx +++ b/src/view/__tests__/AskBarView.test.tsx @@ -224,7 +224,7 @@ describe('AskBarView', () => { onCancel={vi.fn()} inputRef={makeRef()} activeModel="gemma4:e2b" - availableModels={["gemma4:e2b", "qwen2.5:7b"]} + availableModels={['gemma4:e2b', 'qwen2.5:7b']} onModelSelect={vi.fn()} />, ); From e213d108a7dde9387441b3d503af1403374f4df7 Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Thu, 23 Apr 2026 16:09:41 -0700 Subject: [PATCH 11/42] fix: use thuki theme tokens for model picker popup bg-surface/95 + border-white/10 did not resolve against Thuki's Tailwind v4 @theme (which only defines surface-base, surface-elevated, surface-border). The popup would have rendered without a background. Switch to the same token chain as HistoryPanel and CommandSuggestion (rounded-xl, surface-border, surface-base, shadow-chat) for visual parity with existing Thuki chrome. Signed-off-by: Logan Nguyen --- src/components/ModelPicker.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/components/ModelPicker.tsx b/src/components/ModelPicker.tsx index f0f80978..59f3bba5 100644 --- a/src/components/ModelPicker.tsx +++ b/src/components/ModelPicker.tsx @@ -96,7 +96,7 @@ export function ModelPicker({ animate={{ opacity: 1, y: 0 }} exit={{ opacity: 0, y: 6 }} transition={{ duration: 0.16 }} - className="absolute right-0 bottom-10 w-56 overflow-hidden rounded-[18px] border border-white/10 bg-surface/95 shadow-2xl backdrop-blur-xl" + className="absolute right-0 bottom-10 w-56 overflow-hidden rounded-xl border border-surface-border bg-surface-base shadow-chat backdrop-blur-2xl" > {models.map((model) => ( + ); +} + +/** Props for the {@link ModelPickerList} component. */ +export interface ModelPickerListProps { + /** Ref forwarded to the outer list container for outside-click detection. */ + listRef?: RefObject; /** Currently active model slug; highlighted in the popup. */ activeModel: string; /** Full list of available model slugs from Ollama's tags endpoint. */ models: string[]; - /** When true the trigger is inert (e.g. during generation). */ - disabled: boolean; + /** True when the list should be visible. */ + isOpen: boolean; /** Called with the chosen slug when the user picks a row. */ onSelect: (model: string) => void; } /** - * Right-side chip trigger that opens a slug-only popup anchored above - * the ask bar's send button. The popup closes on outside click. + * Animated popup rendered inline above the ask bar input row. * - * Rendered inline inside AskBarView's bottom row: `absolute right-0 bottom-10` - * keeps it within the ask bar's relative container so no portal is needed. + * Uses a height animation inside `AnimatePresence` so the morphing + * container's `ResizeObserver` can smoothly grow the native window as + * the list mounts. Renders nothing when `isOpen` is false or the + * `models` list is empty. */ -export function ModelPicker({ +export function ModelPickerList({ + listRef, activeModel, models, - disabled, + isOpen, onSelect, -}: ModelPickerProps) { - const [isOpen, setIsOpen] = useState(false); - const rootRef = useRef(null); - - // Derived so a `disabled` flip (e.g. generation starts while popup is open) - // hides the popup immediately without needing a state-syncing effect. - const showPopup = isOpen && !disabled; - - useEffect(() => { - if (!showPopup) return; - const handleMouseDown = (event: MouseEvent) => { - if (!rootRef.current?.contains(event.target as Node)) { - setIsOpen(false); - } - }; - document.addEventListener('mousedown', handleMouseDown); - return () => document.removeEventListener('mousedown', handleMouseDown); - }, [showPopup]); - - if (models.length === 0) return null; - +}: ModelPickerListProps) { return ( -
- - - - {showPopup && ( - - {models.map((model) => ( - - ))} - - )} - -
+ + {isOpen && models.length > 0 && ( + +
+
+ {models.map((model) => ( + + ))} +
+
+
+ )} +
); } diff --git a/src/components/__tests__/ModelPicker.test.tsx b/src/components/__tests__/ModelPicker.test.tsx index 8b7036a1..3bfbde13 100644 --- a/src/components/__tests__/ModelPicker.test.tsx +++ b/src/components/__tests__/ModelPicker.test.tsx @@ -1,169 +1,103 @@ import { render, screen, fireEvent } from '@testing-library/react'; import { describe, it, expect, vi } from 'vitest'; -import { ModelPicker } from '../ModelPicker'; +import { ModelPickerList, ModelPickerTrigger } from '../ModelPicker'; -describe('ModelPicker', () => { - it('opens a slug-only popup and highlights the active row', () => { - render( - , +describe('ModelPickerTrigger', () => { + it('exposes a Choose model button with aria-expanded reflecting open state', () => { + const { rerender } = render( + , ); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + const trigger = screen.getByRole('button', { name: 'Choose model' }); + expect(trigger).toHaveAttribute('aria-expanded', 'false'); - expect(screen.getByRole('button', { name: 'gemma4:e2b' })).toHaveClass( - 'bg-primary/10', + rerender( + , ); expect( - screen.getByRole('button', { name: 'qwen2.5:7b' }), - ).toBeInTheDocument(); - expect(screen.queryByText(/fast|vision|recent/i)).toBeNull(); + screen.getByRole('button', { name: 'Choose model' }), + ).toHaveAttribute('aria-expanded', 'true'); }); - it('calls onSelect and closes after choosing a new model', () => { - const onSelect = vi.fn(); + it('fires onToggle when clicked', () => { + const onToggle = vi.fn(); render( - , ); fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - fireEvent.click(screen.getByRole('button', { name: 'qwen2.5:7b' })); - - expect(onSelect).toHaveBeenCalledWith('qwen2.5:7b'); - expect(screen.queryByRole('button', { name: 'gemma4:e2b' })).toBeNull(); + expect(onToggle).toHaveBeenCalledTimes(1); }); - it('returns null when models list is empty', () => { - const { container } = render( - , - ); - expect(container.firstChild).toBeNull(); - }); - - it('closes when clicking outside', () => { + it('does not fire onToggle when disabled', () => { + const onToggle = vi.fn(); render( - , + , ); fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - expect( - screen.getByRole('button', { name: 'qwen2.5:7b' }), - ).toBeInTheDocument(); - - fireEvent.mouseDown(document.body); - - expect(screen.queryByRole('button', { name: 'qwen2.5:7b' })).toBeNull(); + expect(onToggle).not.toHaveBeenCalled(); }); +}); - it('toggles closed when the trigger is clicked twice', () => { - render( - { + it('renders nothing when closed', () => { + const { container } = render( + , ); - - const trigger = screen.getByRole('button', { name: 'Choose model' }); - fireEvent.click(trigger); - expect( - screen.getByRole('button', { name: 'qwen2.5:7b' }), - ).toBeInTheDocument(); - - fireEvent.click(trigger); - expect(screen.queryByRole('button', { name: 'qwen2.5:7b' })).toBeNull(); + expect(container.firstChild).toBeNull(); }); - it('ignores clicks when disabled', () => { - render( - { + const { container } = render( + , ); - - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - expect(screen.queryByRole('button', { name: 'qwen2.5:7b' })).toBeNull(); + expect(container.firstChild).toBeNull(); }); - it('keeps mousedown inside the picker from closing the popup', () => { + it('renders a slug-only row per model when open and highlights the active row', () => { render( - , ); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - const row = screen.getByRole('button', { name: 'qwen2.5:7b' }); - fireEvent.mouseDown(row); - expect( - screen.getByRole('button', { name: 'qwen2.5:7b' }), - ).toBeInTheDocument(); - }); - - it('closes an open popup when disabled flips true', () => { - const { rerender } = render( - , + expect(screen.getByRole('button', { name: 'gemma4:e2b' })).toHaveClass( + 'bg-primary/10', ); - - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); expect( screen.getByRole('button', { name: 'qwen2.5:7b' }), ).toBeInTheDocument(); - - rerender( - , - ); - - expect(screen.queryByRole('button', { name: 'qwen2.5:7b' })).toBeNull(); + expect(screen.queryByText(/fast|vision|recent/i)).toBeNull(); }); it('marks the active row with aria-current', () => { render( - , ); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); expect(screen.getByRole('button', { name: 'gemma4:e2b' })).toHaveAttribute( 'aria-current', 'true', @@ -172,4 +106,19 @@ describe('ModelPicker', () => { screen.getByRole('button', { name: 'qwen2.5:7b' }), ).not.toHaveAttribute('aria-current'); }); + + it('calls onSelect with the chosen slug when a row is clicked', () => { + const onSelect = vi.fn(); + render( + , + ); + + fireEvent.click(screen.getByRole('button', { name: 'qwen2.5:7b' })); + expect(onSelect).toHaveBeenCalledWith('qwen2.5:7b'); + }); }); diff --git a/src/view/AskBarView.tsx b/src/view/AskBarView.tsx index e8a6c010..c1ca73f4 100644 --- a/src/view/AskBarView.tsx +++ b/src/view/AskBarView.tsx @@ -5,7 +5,7 @@ import { formatQuotedText } from '../utils/formatQuote'; import { useConfig } from '../contexts/ConfigContext'; import { ImageThumbnails } from '../components/ImageThumbnails'; import { CommandSuggestion } from '../components/CommandSuggestion'; -import { ModelPicker } from '../components/ModelPicker'; +import { ModelPickerList, ModelPickerTrigger } from '../components/ModelPicker'; import { Tooltip } from '../components/Tooltip'; import type { AttachedImage } from '../types/image'; import { MAX_IMAGE_SIZE_BYTES } from '../types/image'; @@ -293,6 +293,61 @@ export function AskBarView({ return () => clearTimeout(timer); }, [pasteMaxError]); + // ─── Model picker state ─────────────────────────────────────────────────── + + /** True while the model picker popup is visible above the input row. */ + const [isModelPickerOpen, setIsModelPickerOpen] = useState(false); + const modelPickerTriggerRef = useRef(null); + const modelPickerListRef = useRef(null); + + /** Gate combining open intent with prerequisites (not busy, models loaded). */ + const modelPickerAvailable = Boolean( + activeModel && + availableModels && + availableModels.length > 0 && + onModelSelect, + ); + const showModelPicker = isModelPickerOpen && !isBusy && modelPickerAvailable; + + /* eslint-disable @eslint-react/set-state-in-effect -- intentional: when + generation starts while the picker is open we must reset the user's + open intent so it does not reappear when generation ends. No secondary + effects are triggered by this reset. */ + useEffect(() => { + if (isBusy && isModelPickerOpen) setIsModelPickerOpen(false); + }, [isBusy, isModelPickerOpen]); + /* eslint-enable @eslint-react/set-state-in-effect */ + + /** Outside-click closes the popup. Skips when closed to avoid listener leaks. */ + useEffect(() => { + if (!showModelPicker) return; + const handleMouseDown = (event: MouseEvent) => { + const target = event.target as Node; + if (modelPickerTriggerRef.current?.contains(target)) return; + if (modelPickerListRef.current?.contains(target)) return; + setIsModelPickerOpen(false); + }; + document.addEventListener('mousedown', handleMouseDown); + return () => document.removeEventListener('mousedown', handleMouseDown); + }, [showModelPicker]); + + const toggleModelPicker = useCallback(() => { + setIsModelPickerOpen((open) => !open); + }, []); + + /** + * Forwards the picked slug to the parent and collapses the popup. The + * parent's `onModelSelect` already handles backend persistence; this + * closure just handles the UI state transition. + */ + const handleModelRowSelect = useCallback( + (model: string) => { + onModelSelect?.(model); + setIsModelPickerOpen(false); + }, + [onModelSelect], + ); + // ─── Command suggestion state ───────────────────────────────────────────── /** @@ -592,6 +647,19 @@ export function AskBarView({
)}
+ {/* Model picker list renders above the input row in the normal DOM + flow so the morphing container's ResizeObserver can grow the + native window upward as the list mounts, avoiding the clipping + that an absolute popup would suffer inside `overflow-hidden`. */} + {modelPickerAvailable && activeModel && availableModels && ( + + )}
)} - {activeModel && availableModels && onModelSelect && ( - )} diff --git a/src/view/__tests__/AskBarView.test.tsx b/src/view/__tests__/AskBarView.test.tsx index e6fe5ccc..f15ed5e6 100644 --- a/src/view/__tests__/AskBarView.test.tsx +++ b/src/view/__tests__/AskBarView.test.tsx @@ -234,7 +234,7 @@ describe('AskBarView', () => { ).toBeInTheDocument(); }); - it('calls onModelSelect when a model row is chosen', () => { + it('calls onModelSelect and closes the popup when a model row is chosen', () => { const onModelSelect = vi.fn(); render( { fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); fireEvent.click(screen.getByRole('button', { name: 'qwen2.5:7b' })); expect(onModelSelect).toHaveBeenCalledWith('qwen2.5:7b'); + expect(screen.queryByRole('button', { name: 'qwen2.5:7b' })).toBeNull(); + }); + + it('closes the model picker popup when generation starts', () => { + const { rerender } = render( + , + ); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect( + screen.getByRole('button', { name: 'qwen2.5:7b' }), + ).toBeInTheDocument(); + + rerender( + , + ); + + expect(screen.queryByRole('button', { name: 'qwen2.5:7b' })).toBeNull(); + }); + + it('closes the model picker popup when clicking outside the picker', () => { + render( + , + ); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect( + screen.getByRole('button', { name: 'qwen2.5:7b' }), + ).toBeInTheDocument(); + + fireEvent.mouseDown(document.body); + expect(screen.queryByRole('button', { name: 'qwen2.5:7b' })).toBeNull(); + }); + + it('keeps the popup open when a mousedown lands on the trigger itself', () => { + render( + , + ); + + const trigger = screen.getByRole('button', { name: 'Choose model' }); + fireEvent.click(trigger); + fireEvent.mouseDown(trigger); + + expect( + screen.getByRole('button', { name: 'qwen2.5:7b' }), + ).toBeInTheDocument(); + }); + + it('keeps the popup open when a mousedown lands inside a row before click', () => { + render( + , + ); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + const row = screen.getByRole('button', { name: 'qwen2.5:7b' }); + fireEvent.mouseDown(row); + expect( + screen.getByRole('button', { name: 'qwen2.5:7b' }), + ).toBeInTheDocument(); + }); + + it('hides the model picker trigger when no models are available', () => { + render( + , + ); + expect(screen.queryByRole('button', { name: 'Choose model' })).toBeNull(); }); it('displays selectedText when provided', () => { From 522b4e6bcd53220095c4a7328eb7c8243fa9d51b Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Thu, 23 Apr 2026 17:38:15 -0700 Subject: [PATCH 13/42] refactor: portal-render model picker menu via createPortal Signed-off-by: Logan Nguyen --- src/__tests__/App.test.tsx | 2 +- src/components/ModelPicker.tsx | 336 +++++++++++++----- src/components/__tests__/ModelPicker.test.tsx | 313 ++++++++++------ src/view/AskBarView.tsx | 83 +---- src/view/__tests__/AskBarView.test.tsx | 63 +--- 5 files changed, 478 insertions(+), 319 deletions(-) diff --git a/src/__tests__/App.test.tsx b/src/__tests__/App.test.tsx index bad7ce6f..a39eb00c 100644 --- a/src/__tests__/App.test.tsx +++ b/src/__tests__/App.test.tsx @@ -83,7 +83,7 @@ describe('App', () => { fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); await act(async () => { - fireEvent.click(screen.getByRole('button', { name: 'qwen2.5:7b' })); + fireEvent.click(screen.getByRole('menuitem', { name: 'qwen2.5:7b' })); }); const textarea = screen.getByPlaceholderText('Ask Thuki anything...'); diff --git a/src/components/ModelPicker.tsx b/src/components/ModelPicker.tsx index 7e4f5034..6e25dc8d 100644 --- a/src/components/ModelPicker.tsx +++ b/src/components/ModelPicker.tsx @@ -1,5 +1,12 @@ import { AnimatePresence, motion } from 'framer-motion'; -import type { RefObject } from 'react'; +import { + useCallback, + useEffect, + useLayoutEffect, + useRef, + useState, +} from 'react'; +import { createPortal } from 'react-dom'; /** * Hoisted static SVG - chip-style trigger icon for the model picker. @@ -34,112 +41,259 @@ const CHIP_ICON = ( ); -/** Props for the {@link ModelPickerTrigger} component. */ -export interface ModelPickerTriggerProps { - /** Ref forwarded to the underlying button so callers can manage focus / outside-click. */ - triggerRef?: RefObject; - /** True while the associated popup is visible - drives `aria-expanded`. */ - isOpen: boolean; - /** When true the trigger is inert (e.g. during generation). */ - disabled: boolean; - /** Fires on click to toggle the popup open/closed. */ - onToggle: () => void; -} +/** Hoisted static checkmark path used on the active row. */ +const CHECK_ICON_PATH = ( + +); -/** - * Right-side chip button that opens the model picker popup. - * - * The popup itself lives in {@link ModelPickerList} and is rendered in the - * ask bar's upper DOM-flow slot so the morphing container can grow the - * native window to reveal it without being clipped by `overflow-hidden`. - */ -export function ModelPickerTrigger({ - triggerRef, - isOpen, - disabled, - onToggle, -}: ModelPickerTriggerProps) { - return ( - - ); +/** Fixed target width for the portal menu in pixels. */ +const MENU_WIDTH = 220; +/** Viewport-edge padding used when clamping the left position. */ +const EDGE_PADDING = 8; +/** Vertical gap between the trigger and the menu. */ +const MENU_GAP = 8; + +/** Screen position for the portal menu, computed from the trigger rect. */ +interface MenuPosition { + top: number; + left: number; } -/** Props for the {@link ModelPickerList} component. */ -export interface ModelPickerListProps { - /** Ref forwarded to the outer list container for outside-click detection. */ - listRef?: RefObject; - /** Currently active model slug; highlighted in the popup. */ +/** Props for the {@link ModelPicker} component. */ +export interface ModelPickerProps { + /** Currently active model slug; the matching row renders an orange tick. */ activeModel: string; /** Full list of available model slugs from Ollama's tags endpoint. */ models: string[]; - /** True when the list should be visible. */ - isOpen: boolean; + /** When true the trigger is inert (e.g. during generation) and any open menu closes. */ + disabled: boolean; /** Called with the chosen slug when the user picks a row. */ onSelect: (model: string) => void; } /** - * Animated popup rendered inline above the ask bar input row. + * Single self-contained model picker rendered as a portal menu. + * + * The menu escapes the ask bar's morphing container (which sets + * `overflow-hidden`) by rendering into `document.body` via + * {@link createPortal}. That keeps the Thuki window size stable while the + * menu floats above it like a native macOS NSMenu. * - * Uses a height animation inside `AnimatePresence` so the morphing - * container's `ResizeObserver` can smoothly grow the native window as - * the list mounts. Renders nothing when `isOpen` is false or the - * `models` list is empty. + * Positioning algorithm: + * 1. Read the trigger's `getBoundingClientRect` on open. + * 2. Right-align the menu to the trigger, clamped to 8px from the left edge. + * 3. Prefer opening above the trigger. If that would clip above the + * viewport, open below instead. Uses a two-phase rAF measurement: + * render once to measure the menu height, then adjust `top`. + * 4. Re-run on every scroll / resize / window blur while the menu is open. + * + * All listeners (scroll, resize, mousedown, keydown) are attached in a single + * effect gated on {@link showMenu} and removed on close or unmount. */ -export function ModelPickerList({ - listRef, +export function ModelPicker({ activeModel, models, - isOpen, + disabled, onSelect, -}: ModelPickerListProps) { +}: ModelPickerProps) { + const [isOpen, setIsOpen] = useState(false); + const [position, setPosition] = useState(null); + + const triggerRef = useRef(null); + const menuRef = useRef(null); + + /** + * Combined open gate: hides the menu if the picker becomes disabled or + * empty while the user intent (`isOpen`) is still true. The underlying + * `isOpen` state is still reset to false by the disabled-sync effect + * below so re-enabling does not reopen a stale menu. + */ + const showMenu = isOpen && !disabled && models.length > 0; + + /** Recomputes the menu position from the current trigger rect. */ + const updatePosition = useCallback(() => { + const trigger = triggerRef.current; + /* v8 ignore start -- trigger ref is always set while the menu can be open; + guard is defensive for concurrent unmount. */ + if (!trigger) return; + /* v8 ignore stop */ + const rect = trigger.getBoundingClientRect(); + const left = Math.max(EDGE_PADDING, rect.right - MENU_WIDTH); + + const menuEl = menuRef.current; + const menuHeight = menuEl?.offsetHeight ?? 0; + let top = rect.top - menuHeight - MENU_GAP; + if (top < 0) { + top = rect.bottom + MENU_GAP; + } + setPosition({ top, left }); + }, []); + + /** + * First-frame position: read the rect synchronously so the menu mounts + * at an approximate spot, then the effect below re-measures height and + * flips above/below on the following frame. + */ + /* eslint-disable @eslint-react/set-state-in-effect -- intentional: seeding + the initial menu position from the trigger rect is exactly what a layout + effect is for. The rAF inside the next effect corrects the top coordinate + once the menu has laid out and a real height is available. */ + useLayoutEffect(() => { + if (!showMenu) { + setPosition(null); + return; + } + const trigger = triggerRef.current; + /* v8 ignore start -- showMenu implies trigger is mounted; defensive guard. */ + if (!trigger) return; + /* v8 ignore stop */ + const rect = trigger.getBoundingClientRect(); + const left = Math.max(EDGE_PADDING, rect.right - MENU_WIDTH); + // Start above the trigger by an estimated offset so the first paint + // is close to final. The rAF below corrects based on real height. + setPosition({ top: rect.top - MENU_GAP, left }); + }, [showMenu]); + /* eslint-enable @eslint-react/set-state-in-effect */ + + /** + * Attaches all live listeners for the open menu and re-measures once the + * menu has laid out so the above/below flip uses the real height. + */ + useEffect(() => { + if (!showMenu) return; + + // Re-measure after the portal has rendered once. + const rafId = requestAnimationFrame(updatePosition); + + const handleScroll = () => { + requestAnimationFrame(updatePosition); + }; + const handleResize = () => { + requestAnimationFrame(updatePosition); + }; + const handleMouseDown = (event: MouseEvent) => { + const target = event.target as Node; + if (triggerRef.current?.contains(target)) return; + if (menuRef.current?.contains(target)) return; + setIsOpen(false); + }; + const handleKeyDown = (event: KeyboardEvent) => { + if (event.key === 'Escape') { + setIsOpen(false); + } + }; + + window.addEventListener('scroll', handleScroll, { passive: true }); + window.addEventListener('resize', handleResize, { passive: true }); + document.addEventListener('mousedown', handleMouseDown); + document.addEventListener('keydown', handleKeyDown); + + return () => { + cancelAnimationFrame(rafId); + window.removeEventListener('scroll', handleScroll); + window.removeEventListener('resize', handleResize); + document.removeEventListener('mousedown', handleMouseDown); + document.removeEventListener('keydown', handleKeyDown); + }; + }, [showMenu, updatePosition]); + + /** + * When the picker becomes disabled (e.g. generation starts), collapse + * the open intent so re-enabling does not reopen a stale menu. + */ + /* eslint-disable @eslint-react/set-state-in-effect -- intentional: mirror the + disabled prop into the local open state so a mid-open disable cleanly + closes. No secondary effects are triggered by this reset. */ + useEffect(() => { + if (disabled && isOpen) setIsOpen(false); + }, [disabled, isOpen]); + /* eslint-enable @eslint-react/set-state-in-effect */ + + const handleToggle = useCallback(() => { + setIsOpen((prev) => !prev); + }, []); + + const handleRowClick = useCallback( + (model: string) => { + onSelect(model); + setIsOpen(false); + }, + [onSelect], + ); + + if (models.length === 0) return null; + + /* v8 ignore next 2 -- SSR guard; Tauri + happy-dom always provide document. */ + const portalTarget = typeof document !== 'undefined' ? document.body : null; + return ( - - {isOpen && models.length > 0 && ( - -
-
- {models.map((model) => ( - - ))} -
-
-
- )} -
+ <> + + {portalTarget && + createPortal( + + {showMenu && position && ( + + {models.map((model) => { + const active = model === activeModel; + return ( + + ); + })} + + )} + , + portalTarget, + )} + ); } diff --git a/src/components/__tests__/ModelPicker.test.tsx b/src/components/__tests__/ModelPicker.test.tsx index 3bfbde13..b1e56fd4 100644 --- a/src/components/__tests__/ModelPicker.test.tsx +++ b/src/components/__tests__/ModelPicker.test.tsx @@ -1,124 +1,235 @@ -import { render, screen, fireEvent } from '@testing-library/react'; +import { render, screen, fireEvent, act } from '@testing-library/react'; import { describe, it, expect, vi } from 'vitest'; -import { ModelPickerList, ModelPickerTrigger } from '../ModelPicker'; +import { ModelPicker } from '../ModelPicker'; -describe('ModelPickerTrigger', () => { - it('exposes a Choose model button with aria-expanded reflecting open state', () => { - const { rerender } = render( - , - ); +/** Renders a ModelPicker with a small default model list. */ +function renderPicker( + overrides: Partial> = {}, +) { + const props: React.ComponentProps = { + activeModel: 'gemma4:e2b', + models: ['gemma4:e2b', 'qwen2.5:7b'], + disabled: false, + onSelect: vi.fn(), + ...overrides, + }; + return { props, ...render() }; +} +describe('ModelPicker', () => { + it('does not render when models list is empty', () => { + const { container } = renderPicker({ models: [], activeModel: '' }); + expect(container.firstChild).toBeNull(); + expect(screen.queryByRole('button', { name: 'Choose model' })).toBeNull(); + }); + + it('renders the Choose model trigger with chip icon', () => { + const { container } = renderPicker(); const trigger = screen.getByRole('button', { name: 'Choose model' }); + expect(trigger).toBeInTheDocument(); expect(trigger).toHaveAttribute('aria-expanded', 'false'); + expect(trigger).toHaveAttribute('aria-haspopup', 'menu'); + // The chip icon is rendered inside the trigger. + expect(container.querySelector('svg')).not.toBeNull(); + }); - rerender( - , - ); - expect( - screen.getByRole('button', { name: 'Choose model' }), - ).toHaveAttribute('aria-expanded', 'true'); + it('opens menu on click with aria-expanded true', () => { + renderPicker(); + const trigger = screen.getByRole('button', { name: 'Choose model' }); + fireEvent.click(trigger); + expect(trigger).toHaveAttribute('aria-expanded', 'true'); + expect(screen.getByRole('menu')).toBeInTheDocument(); + }); + + it('portals the menu to document.body (not inside trigger DOM)', () => { + const { container } = renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + const menu = screen.getByRole('menu'); + // Menu is NOT inside the component container. + expect(container.contains(menu)).toBe(false); + // Menu IS a descendant of document.body. + expect(document.body.contains(menu)).toBe(true); + }); + + it('lists each model slug LEFT of the check icon', () => { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + const firstRow = screen.getByRole('menuitem', { name: 'gemma4:e2b' }); + const slug = firstRow.querySelector('span'); + const check = firstRow.querySelector('svg'); + expect(slug).not.toBeNull(); + expect(check).not.toBeNull(); + expect(slug!.textContent).toBe('gemma4:e2b'); + // DOM order: slug precedes the check svg. + const children = Array.from(firstRow.children); + expect(children.indexOf(slug!)).toBeLessThan(children.indexOf(check!)); }); - it('fires onToggle when clicked', () => { - const onToggle = vi.fn(); - render( - , - ); + it('marks only the active row with visible check (opacity 1 via inline style)', () => { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + const activeRow = screen.getByRole('menuitem', { name: 'gemma4:e2b' }); + const inactiveRow = screen.getByRole('menuitem', { name: 'qwen2.5:7b' }); + expect(activeRow).toHaveAttribute('aria-current', 'true'); + expect(inactiveRow).not.toHaveAttribute('aria-current'); + const activeCheck = activeRow.querySelector('svg')!; + const inactiveCheck = inactiveRow.querySelector('svg')!; + expect((activeCheck as SVGElement).style.opacity).toBe('1'); + expect((inactiveCheck as SVGElement).style.opacity).toBe('0'); + }); + it('calls onSelect and closes when row clicked', () => { + const onSelect = vi.fn(); + renderPicker({ onSelect }); fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - expect(onToggle).toHaveBeenCalledTimes(1); + fireEvent.click(screen.getByRole('menuitem', { name: 'qwen2.5:7b' })); + expect(onSelect).toHaveBeenCalledWith('qwen2.5:7b'); + expect(screen.queryByRole('menu')).toBeNull(); }); - it('does not fire onToggle when disabled', () => { - const onToggle = vi.fn(); - render( - , - ); + it('closes on outside click (document mousedown)', () => { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect(screen.getByRole('menu')).toBeInTheDocument(); + fireEvent.mouseDown(document.body); + expect(screen.queryByRole('menu')).toBeNull(); + }); + it('keeps the menu open on mousedown inside the menu', () => { + renderPicker(); fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - expect(onToggle).not.toHaveBeenCalled(); + const row = screen.getByRole('menuitem', { name: 'qwen2.5:7b' }); + fireEvent.mouseDown(row); + expect(screen.getByRole('menu')).toBeInTheDocument(); }); -}); -describe('ModelPickerList', () => { - it('renders nothing when closed', () => { - const { container } = render( - , - ); - expect(container.firstChild).toBeNull(); + it('keeps the menu open on mousedown on the trigger itself', () => { + renderPicker(); + const trigger = screen.getByRole('button', { name: 'Choose model' }); + fireEvent.click(trigger); + fireEvent.mouseDown(trigger); + expect(screen.getByRole('menu')).toBeInTheDocument(); }); - it('renders nothing when models list is empty', () => { - const { container } = render( - , - ); - expect(container.firstChild).toBeNull(); + it('closes on Escape key', () => { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect(screen.getByRole('menu')).toBeInTheDocument(); + fireEvent.keyDown(document, { key: 'Escape' }); + expect(screen.queryByRole('menu')).toBeNull(); }); - it('renders a slug-only row per model when open and highlights the active row', () => { - render( - , - ); - - expect(screen.getByRole('button', { name: 'gemma4:e2b' })).toHaveClass( - 'bg-primary/10', - ); - expect( - screen.getByRole('button', { name: 'qwen2.5:7b' }), - ).toBeInTheDocument(); - expect(screen.queryByText(/fast|vision|recent/i)).toBeNull(); - }); - - it('marks the active row with aria-current', () => { - render( - , - ); - - expect(screen.getByRole('button', { name: 'gemma4:e2b' })).toHaveAttribute( - 'aria-current', - 'true', - ); - expect( - screen.getByRole('button', { name: 'qwen2.5:7b' }), - ).not.toHaveAttribute('aria-current'); - }); - - it('calls onSelect with the chosen slug when a row is clicked', () => { + it('ignores non-Escape document keydown events', () => { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + fireEvent.keyDown(document, { key: 'ArrowDown' }); + expect(screen.getByRole('menu')).toBeInTheDocument(); + }); + + it('closes when disabled flips true mid-open', () => { + const { rerender, props } = renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect(screen.getByRole('menu')).toBeInTheDocument(); + rerender(); + expect(screen.queryByRole('menu')).toBeNull(); + }); + + it('does not fire onSelect when trigger is disabled (click ignored)', () => { const onSelect = vi.fn(); - render( - , - ); - - fireEvent.click(screen.getByRole('button', { name: 'qwen2.5:7b' })); - expect(onSelect).toHaveBeenCalledWith('qwen2.5:7b'); + renderPicker({ disabled: true, onSelect }); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect(screen.queryByRole('menu')).toBeNull(); + expect(onSelect).not.toHaveBeenCalled(); + }); + + it('repositions on window resize without throwing', () => { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect(() => { + act(() => { + window.dispatchEvent(new Event('resize')); + }); + }).not.toThrow(); + expect(screen.getByRole('menu')).toBeInTheDocument(); + }); + + it('repositions on window scroll without throwing', () => { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect(() => { + act(() => { + window.dispatchEvent(new Event('scroll')); + }); + }).not.toThrow(); + expect(screen.getByRole('menu')).toBeInTheDocument(); + }); + + it('clicking the trigger while open toggles closed', () => { + renderPicker(); + const trigger = screen.getByRole('button', { name: 'Choose model' }); + fireEvent.click(trigger); + expect(screen.getByRole('menu')).toBeInTheDocument(); + fireEvent.click(trigger); + expect(screen.queryByRole('menu')).toBeNull(); + }); + + it('opens below the trigger when there is no room above', () => { + // Force the trigger rect to read as being at the very top of the viewport + // so the above-trigger math would go negative and the menu must flip below. + const originalGetRect = Element.prototype.getBoundingClientRect; + Element.prototype.getBoundingClientRect = function () { + return { + top: 0, + left: 100, + right: 128, + bottom: 28, + width: 28, + height: 28, + x: 100, + y: 0, + toJSON() { + return {}; + }, + } as DOMRect; + }; + try { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + const menu = screen.getByRole('menu'); + // Top coordinate should be non-negative since we flipped below. + const topPx = parseFloat((menu as HTMLElement).style.top); + expect(topPx).toBeGreaterThanOrEqual(0); + } finally { + Element.prototype.getBoundingClientRect = originalGetRect; + } + }); + + it('clamps left to the 8px edge when the trigger is near the left edge', () => { + // Trigger far to the left so right-align math would produce a negative left. + const originalGetRect = Element.prototype.getBoundingClientRect; + Element.prototype.getBoundingClientRect = function () { + return { + top: 500, + left: 0, + right: 28, + bottom: 528, + width: 28, + height: 28, + x: 0, + y: 500, + toJSON() { + return {}; + }, + } as DOMRect; + }; + try { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + const menu = screen.getByRole('menu'); + const leftPx = parseFloat((menu as HTMLElement).style.left); + expect(leftPx).toBe(8); + } finally { + Element.prototype.getBoundingClientRect = originalGetRect; + } }); }); diff --git a/src/view/AskBarView.tsx b/src/view/AskBarView.tsx index c1ca73f4..a2c1d6bd 100644 --- a/src/view/AskBarView.tsx +++ b/src/view/AskBarView.tsx @@ -5,7 +5,7 @@ import { formatQuotedText } from '../utils/formatQuote'; import { useConfig } from '../contexts/ConfigContext'; import { ImageThumbnails } from '../components/ImageThumbnails'; import { CommandSuggestion } from '../components/CommandSuggestion'; -import { ModelPickerList, ModelPickerTrigger } from '../components/ModelPicker'; +import { ModelPicker } from '../components/ModelPicker'; import { Tooltip } from '../components/Tooltip'; import type { AttachedImage } from '../types/image'; import { MAX_IMAGE_SIZE_BYTES } from '../types/image'; @@ -293,60 +293,15 @@ export function AskBarView({ return () => clearTimeout(timer); }, [pasteMaxError]); - // ─── Model picker state ─────────────────────────────────────────────────── + // ─── Model picker availability gate ─────────────────────────────────────── - /** True while the model picker popup is visible above the input row. */ - const [isModelPickerOpen, setIsModelPickerOpen] = useState(false); - const modelPickerTriggerRef = useRef(null); - const modelPickerListRef = useRef(null); - - /** Gate combining open intent with prerequisites (not busy, models loaded). */ + /** Prerequisites for rendering the model picker chip. */ const modelPickerAvailable = Boolean( activeModel && availableModels && availableModels.length > 0 && onModelSelect, ); - const showModelPicker = isModelPickerOpen && !isBusy && modelPickerAvailable; - - /* eslint-disable @eslint-react/set-state-in-effect -- intentional: when - generation starts while the picker is open we must reset the user's - open intent so it does not reappear when generation ends. No secondary - effects are triggered by this reset. */ - useEffect(() => { - if (isBusy && isModelPickerOpen) setIsModelPickerOpen(false); - }, [isBusy, isModelPickerOpen]); - /* eslint-enable @eslint-react/set-state-in-effect */ - - /** Outside-click closes the popup. Skips when closed to avoid listener leaks. */ - useEffect(() => { - if (!showModelPicker) return; - const handleMouseDown = (event: MouseEvent) => { - const target = event.target as Node; - if (modelPickerTriggerRef.current?.contains(target)) return; - if (modelPickerListRef.current?.contains(target)) return; - setIsModelPickerOpen(false); - }; - document.addEventListener('mousedown', handleMouseDown); - return () => document.removeEventListener('mousedown', handleMouseDown); - }, [showModelPicker]); - - const toggleModelPicker = useCallback(() => { - setIsModelPickerOpen((open) => !open); - }, []); - - /** - * Forwards the picked slug to the parent and collapses the popup. The - * parent's `onModelSelect` already handles backend persistence; this - * closure just handles the UI state transition. - */ - const handleModelRowSelect = useCallback( - (model: string) => { - onModelSelect?.(model); - setIsModelPickerOpen(false); - }, - [onModelSelect], - ); // ─── Command suggestion state ───────────────────────────────────────────── @@ -647,19 +602,6 @@ export function AskBarView({ )} - {/* Model picker list renders above the input row in the normal DOM - flow so the morphing container's ResizeObserver can grow the - native window upward as the list mounts, avoiding the clipping - that an absolute popup would suffer inside `overflow-hidden`. */} - {modelPickerAvailable && activeModel && availableModels && ( - - )}
)} - {modelPickerAvailable && ( - - )} + {modelPickerAvailable && + activeModel && + availableModels && + onModelSelect && ( + + )} { />, ); fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - fireEvent.click(screen.getByRole('button', { name: 'qwen2.5:7b' })); + fireEvent.click(screen.getByRole('menuitem', { name: 'qwen2.5:7b' })); expect(onModelSelect).toHaveBeenCalledWith('qwen2.5:7b'); - expect(screen.queryByRole('button', { name: 'qwen2.5:7b' })).toBeNull(); + expect(screen.queryByRole('menuitem', { name: 'qwen2.5:7b' })).toBeNull(); }); it('closes the model picker popup when generation starts', () => { @@ -276,7 +276,7 @@ describe('AskBarView', () => { fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); expect( - screen.getByRole('button', { name: 'qwen2.5:7b' }), + screen.getByRole('menuitem', { name: 'qwen2.5:7b' }), ).toBeInTheDocument(); rerender( @@ -295,7 +295,7 @@ describe('AskBarView', () => { />, ); - expect(screen.queryByRole('button', { name: 'qwen2.5:7b' })).toBeNull(); + expect(screen.queryByRole('menuitem', { name: 'qwen2.5:7b' })).toBeNull(); }); it('closes the model picker popup when clicking outside the picker', () => { @@ -317,62 +317,11 @@ describe('AskBarView', () => { fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); expect( - screen.getByRole('button', { name: 'qwen2.5:7b' }), + screen.getByRole('menuitem', { name: 'qwen2.5:7b' }), ).toBeInTheDocument(); fireEvent.mouseDown(document.body); - expect(screen.queryByRole('button', { name: 'qwen2.5:7b' })).toBeNull(); - }); - - it('keeps the popup open when a mousedown lands on the trigger itself', () => { - render( - , - ); - - const trigger = screen.getByRole('button', { name: 'Choose model' }); - fireEvent.click(trigger); - fireEvent.mouseDown(trigger); - - expect( - screen.getByRole('button', { name: 'qwen2.5:7b' }), - ).toBeInTheDocument(); - }); - - it('keeps the popup open when a mousedown lands inside a row before click', () => { - render( - , - ); - - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - const row = screen.getByRole('button', { name: 'qwen2.5:7b' }); - fireEvent.mouseDown(row); - expect( - screen.getByRole('button', { name: 'qwen2.5:7b' }), - ).toBeInTheDocument(); + expect(screen.queryByRole('menuitem', { name: 'qwen2.5:7b' })).toBeNull(); }); it('hides the model picker trigger when no models are available', () => { From e121c8394179704b722de5c3569ea96c08a94772 Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Thu, 23 Apr 2026 17:43:59 -0700 Subject: [PATCH 14/42] feat: show model attribution chip under assistant messages Signed-off-by: Logan Nguyen --- src/components/ChatBubble.tsx | 48 +++++++++++++ src/components/__tests__/ChatBubble.test.tsx | 73 ++++++++++++++++++++ src/hooks/useOllama.ts | 2 + 3 files changed, 123 insertions(+) diff --git a/src/components/ChatBubble.tsx b/src/components/ChatBubble.tsx index b7c7b3b3..64306e47 100644 --- a/src/components/ChatBubble.tsx +++ b/src/components/ChatBubble.tsx @@ -76,6 +76,38 @@ function avatarColor(domain: string): string { /** Regex matching inline `[N]` citation markers in plain text. Captures the N. */ const CITATION_RE = /\[(\d+)\]/g; +/** + * Hoisted static SVG glyph for the model attribution chip. Mirrors the + * chip icon used by the model picker so the attribution visually couples + * to the picker UI. Rendered as a child of a color-controlled span. + */ +const ATTRIB_CHIP_ICON = ( + +); + /** * Walks the rendered answer DOM and replaces every plain-text `[N]` occurrence * with an anchor element that links to the matching source URL. Called inside @@ -228,6 +260,8 @@ interface ChatBubbleProps { /** Whether the search pipeline is currently running. When true, renders a * `SearchTraceBlock` in loading state even before any traces arrive. */ isSearching?: boolean; + /** When set on an assistant message, renders a chip-style attribution badge beside the CopyButton so the user sees which model produced this response. */ + modelName?: string; } /** @@ -277,6 +311,7 @@ export function ChatBubble({ sandboxUnavailable = false, searchTraces, isSearching = false, + modelName, }: ChatBubbleProps) { const isUser = role === 'user'; const [sourcesOpen, setSourcesOpen] = useState(false); @@ -534,6 +569,19 @@ export function ChatBubble({ )} + {/* Model attribution chip: visually couples the response to the + model-picker UI so users can see which model produced it. */} + {modelName && ( + + + {ATTRIB_CHIP_ICON} + + {modelName} + + )}
)}
diff --git a/src/components/__tests__/ChatBubble.test.tsx b/src/components/__tests__/ChatBubble.test.tsx index 7858e637..3b723e86 100644 --- a/src/components/__tests__/ChatBubble.test.tsx +++ b/src/components/__tests__/ChatBubble.test.tsx @@ -1111,4 +1111,77 @@ describe('ChatBubble', () => { ).toBeTruthy(); }); }); + + describe('model attribution', () => { + it('renders the attribution chip when modelName is provided on assistant messages', () => { + render( + , + ); + const chip = screen.getByTestId('model-attribution'); + expect(chip).toBeInTheDocument(); + expect(chip).toHaveTextContent('gemma4:e2b'); + }); + + it('does not render the attribution chip when modelName is absent', () => { + render(); + expect(screen.queryByTestId('model-attribution')).toBeNull(); + }); + + it('does not render the attribution chip on user messages even with modelName', () => { + render( + , + ); + expect(screen.queryByTestId('model-attribution')).toBeNull(); + }); + + it('does not render the attribution chip when the message is an error callout', () => { + render( + , + ); + expect(screen.queryByTestId('model-attribution')).toBeNull(); + }); + + it('does not render the attribution chip when sandbox is unavailable', () => { + render( + , + ); + expect(screen.queryByTestId('model-attribution')).toBeNull(); + }); + + it('does not render the attribution chip while the assistant is still streaming', () => { + render( + , + ); + // Footer row including the attribution is hidden during streaming. + expect(screen.queryByTestId('model-attribution')).toBeNull(); + }); + }); }); diff --git a/src/hooks/useOllama.ts b/src/hooks/useOllama.ts index 3e86e0c2..e71a7847 100644 --- a/src/hooks/useOllama.ts +++ b/src/hooks/useOllama.ts @@ -18,6 +18,8 @@ export interface Message { id: string; role: 'user' | 'assistant'; content: string; + /** Ollama model slug that produced this message. Present on assistant messages once the stream completes. */ + modelName?: string; /** Selected text from the host app that was quoted with this message, if any. */ quotedText?: string; /** Absolute file paths of images attached to this message, if any. */ From 49807f037ec66829e7bb0bf5e33993b65f8318c5 Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Thu, 23 Apr 2026 21:20:18 -0700 Subject: [PATCH 15/42] feat: persist model_name per assistant message Signed-off-by: Logan Nguyen --- src-tauri/src/database.rs | 175 +++++++++++++++++++++++++++++++++++--- src-tauri/src/history.rs | 12 +++ 2 files changed, 173 insertions(+), 14 deletions(-) diff --git a/src-tauri/src/database.rs b/src-tauri/src/database.rs index d875891f..92ba415e 100644 --- a/src-tauri/src/database.rs +++ b/src-tauri/src/database.rs @@ -14,7 +14,7 @@ use serde::Serialize; /// Tuple representing a message for batch insertion: /// (role, content, quoted_text, image_paths, thinking_content, search_sources, -/// search_warnings, search_metadata). +/// search_warnings, search_metadata, model_name). pub type MessageBatchRow = ( String, String, @@ -24,6 +24,7 @@ pub type MessageBatchRow = ( Option, Option, Option, + Option, ); /// Summary of a conversation for the history dropdown list. @@ -54,6 +55,9 @@ pub struct PersistedMessage { /// JSON-serialized `SearchMetadata` (iteration traces, timing) for this /// search turn. `None` for non-search messages and pre-Task-17 rows. pub search_metadata: Option, + /// Slug of the Ollama model that produced this assistant message. `None` + /// for user messages and rows written before the model_name migration. + pub model_name: Option, pub created_at: i64, } @@ -176,6 +180,10 @@ fn run_migrations(conn: &Connection) -> SqlResult<()> { // JSON-encoded Vec and SearchMetadata (Task 17). ensure_column(conn, "messages", "search_warnings", "TEXT")?; ensure_column(conn, "messages", "search_metadata", "TEXT")?; + // Per-message model attribution (slug of the Ollama model that produced + // the assistant response). NULL for user messages and rows written before + // this migration. + ensure_column(conn, "messages", "model_name", "TEXT")?; Ok(()) } @@ -297,6 +305,7 @@ pub fn insert_message( search_sources: Option<&str>, search_warnings: Option<&str>, search_metadata: Option<&str>, + model_name: Option<&str>, ) -> SqlResult { let id = uuid::Uuid::new_v4().to_string(); let now = now_millis(); @@ -304,8 +313,8 @@ pub fn insert_message( "INSERT INTO messages \ (id, conversation_id, role, content, quoted_text, image_paths, \ thinking_content, search_sources, search_warnings, search_metadata, \ - created_at) \ - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11)", + model_name, created_at) \ + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)", params![ id, conversation_id, @@ -317,6 +326,7 @@ pub fn insert_message( search_sources, search_warnings, search_metadata, + model_name, now ], )?; @@ -341,8 +351,8 @@ pub fn insert_messages_batch( "INSERT INTO messages \ (id, conversation_id, role, content, quoted_text, image_paths, \ thinking_content, search_sources, search_warnings, search_metadata, \ - created_at) \ - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11)", + model_name, created_at) \ + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)", )?; for ( role, @@ -353,6 +363,7 @@ pub fn insert_messages_batch( search_sources, search_warnings, search_metadata, + model_name, ) in messages { let id = uuid::Uuid::new_v4().to_string(); @@ -367,6 +378,7 @@ pub fn insert_messages_batch( search_sources.as_deref(), search_warnings.as_deref(), search_metadata.as_deref(), + model_name.as_deref(), now ])?; } @@ -383,7 +395,7 @@ pub fn insert_messages_batch( pub fn load_messages(conn: &Connection, conversation_id: &str) -> SqlResult> { let mut stmt = conn.prepare( "SELECT id, role, content, quoted_text, image_paths, thinking_content, \ - search_sources, search_warnings, search_metadata, created_at + search_sources, search_warnings, search_metadata, model_name, created_at FROM messages WHERE conversation_id = ?1 ORDER BY created_at ASC", @@ -399,7 +411,8 @@ pub fn load_messages(conn: &Connection, conversation_id: &str) -> SqlResult = conn + .prepare("PRAGMA table_info(messages)") + .unwrap() + .query_map([], |row| row.get::<_, String>(1)) + .unwrap() + .filter_map(|r| r.ok()) + .collect(); + assert!(cols.contains(&"model_name".to_string())); + } + + #[test] + fn insert_message_with_model_name_round_trips() { + let conn = open_in_memory().unwrap(); + let id = create_conversation(&conn, None, "gemma4:e2b").unwrap(); + + insert_message( + &conn, + &id, + "assistant", + "Hello from gemma.", + None, + None, + None, + None, + None, + None, + Some("gemma4:e2b"), + ) + .unwrap(); + + let msgs = load_messages(&conn, &id).unwrap(); + assert_eq!(msgs.len(), 1); + assert_eq!(msgs[0].model_name.as_deref(), Some("gemma4:e2b")); + } + + #[test] + fn insert_message_with_null_model_name() { + let conn = open_in_memory().unwrap(); + let id = create_conversation(&conn, None, "gemma4:e2b").unwrap(); + + insert_message( + &conn, &id, "user", "hi there", None, None, None, None, None, None, None, + ) + .unwrap(); + + let msgs = load_messages(&conn, &id).unwrap(); + assert_eq!(msgs.len(), 1); + assert!(msgs[0].model_name.is_none()); + } + + #[test] + fn insert_messages_batch_includes_model_name() { + let conn = open_in_memory().unwrap(); + let id = create_conversation(&conn, None, "gemma4:e2b").unwrap(); + + let batch = vec![ + ( + "assistant".to_string(), + "answer from gemma".to_string(), + None, + None, + None, + None, + None, + None, + Some("gemma4:e2b".to_string()), + ), + ( + "assistant".to_string(), + "answer from qwen".to_string(), + None, + None, + None, + None, + None, + None, + Some("qwen2.5:7b".to_string()), + ), + ]; + insert_messages_batch(&conn, &id, &batch).unwrap(); + + let msgs = load_messages(&conn, &id).unwrap(); + assert_eq!(msgs.len(), 2); + assert_eq!(msgs[0].model_name.as_deref(), Some("gemma4:e2b")); + assert_eq!(msgs[1].model_name.as_deref(), Some("qwen2.5:7b")); + } + + #[test] + fn load_messages_handles_null_model_name_for_legacy_rows() { + let conn = open_in_memory().unwrap(); + let id = create_conversation(&conn, None, "gemma4:e2b").unwrap(); + + // Simulate a row written before the model_name migration by inserting + // with an explicit column list that omits model_name entirely. + conn.execute( + "INSERT INTO messages (id, conversation_id, role, content, created_at) \ + VALUES (?1, ?2, ?3, ?4, ?5)", + params![ + uuid::Uuid::new_v4().to_string(), + &id, + "assistant", + "legacy row", + now_millis(), + ], + ) + .unwrap(); + + let msgs = load_messages(&conn, &id).unwrap(); + assert_eq!(msgs.len(), 1); + assert!(msgs[0].model_name.is_none()); + } } diff --git a/src-tauri/src/history.rs b/src-tauri/src/history.rs index 47d41f5c..a9941aec 100644 --- a/src-tauri/src/history.rs +++ b/src-tauri/src/history.rs @@ -47,6 +47,10 @@ pub struct SaveMessagePayload { /// Already-serialised `SearchMetadata` JSON string for search turns. /// Passed through verbatim to `messages.search_metadata`. pub search_metadata: Option, + /// Slug of the Ollama model that produced this response. Frontend stamps + /// assistant payloads with the active model at generation time; `None` + /// for user payloads. Accepted as missing via serde Option default. + pub model_name: Option, } /// Response returned when saving a conversation. @@ -111,6 +115,7 @@ pub fn save_conversation( sources_json, m.search_warnings, m.search_metadata, + m.model_name, ) }) .collect(); @@ -134,6 +139,7 @@ pub fn persist_message( search_sources: Option>, search_warnings: Option, search_metadata: Option, + model_name: Option, db: State<'_, Database>, ) -> Result<(), String> { let conn = db.0.lock().map_err(|e| e.to_string())?; @@ -154,6 +160,7 @@ pub fn persist_message( sources_json.as_deref(), search_warnings.as_deref(), search_metadata.as_deref(), + model_name.as_deref(), ) .map_err(|e| e.to_string())?; Ok(()) @@ -343,6 +350,7 @@ mod tests { search_sources: None, search_warnings: None, search_metadata: None, + model_name: None, }, SaveMessagePayload { role: "assistant".to_string(), @@ -364,6 +372,7 @@ mod tests { search_metadata: Some( r#"{"iterations":[],"total_duration_ms":10,"retries_performed":0}"#.to_string(), ), + model_name: Some("gemma4:e2b".to_string()), }, ]; @@ -399,6 +408,7 @@ mod tests { sources_json, m.search_warnings, m.search_metadata, + m.model_name, ) }) .collect(); @@ -436,6 +446,8 @@ mod tests { .contains("total_duration_ms")); assert!(loaded[0].search_warnings.is_none()); assert!(loaded[0].search_metadata.is_none()); + assert!(loaded[0].model_name.is_none()); + assert_eq!(loaded[1].model_name.as_deref(), Some("gemma4:e2b")); } #[test] From f97354a451056a916bb8571982b374f258a07b4a Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Thu, 23 Apr 2026 21:29:27 -0700 Subject: [PATCH 16/42] feat: thread model_name from picker to bubble and back Signed-off-by: Logan Nguyen --- src/App.tsx | 5 +- .../__tests__/useConversationHistory.test.tsx | 132 ++++++++++ src/hooks/__tests__/useOllama.test.tsx | 225 ++++++++++++------ src/hooks/useConversationHistory.ts | 4 + src/hooks/useOllama.ts | 15 +- src/types/history.ts | 6 + src/view/ConversationView.tsx | 1 + 7 files changed, 308 insertions(+), 80 deletions(-) diff --git a/src/App.tsx b/src/App.tsx index ebd784ef..9c14e87a 100644 --- a/src/App.tsx +++ b/src/App.tsx @@ -128,6 +128,9 @@ function App() { */ const morphingContainerNodeRef = useRef(null); + const { activeModel, availableModels, refreshModels, setActiveModel } = + useModelSelection(); + const { conversationId, isSaved, @@ -163,7 +166,7 @@ function App() { searchStage, reset, loadMessages, - } = useOllama(handleTurnComplete); + } = useOllama(activeModel, handleTurnComplete); /** * Sticky flag: once the user invokes `/search`, subsequent submits in the diff --git a/src/hooks/__tests__/useConversationHistory.test.tsx b/src/hooks/__tests__/useConversationHistory.test.tsx index e3ddab9c..0a8ca5ff 100644 --- a/src/hooks/__tests__/useConversationHistory.test.tsx +++ b/src/hooks/__tests__/useConversationHistory.test.tsx @@ -46,6 +46,7 @@ describe('useConversationHistory', () => { search_sources: null, search_warnings: null, search_metadata: null, + model_name: null, }, { role: 'assistant', @@ -56,6 +57,7 @@ describe('useConversationHistory', () => { search_sources: null, search_warnings: null, search_metadata: null, + model_name: null, }, ], }); @@ -97,6 +99,7 @@ describe('useConversationHistory', () => { search_sources: null, search_warnings: null, search_metadata: null, + model_name: null, }, { role: 'assistant', @@ -107,6 +110,7 @@ describe('useConversationHistory', () => { search_sources: null, search_warnings: null, search_metadata: null, + model_name: null, }, ], model: MODEL, @@ -180,6 +184,7 @@ describe('useConversationHistory', () => { searchSources: null, searchWarnings: null, searchMetadata: null, + modelName: null, }); expect(invoke).toHaveBeenCalledWith('persist_message', { conversationId: 'conv-123', @@ -191,6 +196,7 @@ describe('useConversationHistory', () => { searchSources: null, searchWarnings: null, searchMetadata: null, + modelName: null, }); }); @@ -281,12 +287,14 @@ describe('useConversationHistory', () => { role: 'user', content: 'Saved question', quotedText: undefined, + modelName: undefined, }, { id: 'm2', role: 'assistant', content: 'Saved answer', quotedText: 'ctx', + modelName: undefined, }, ]); }); @@ -417,6 +425,7 @@ describe('useConversationHistory', () => { searchSources: null, searchWarnings: null, searchMetadata: null, + modelName: null, }); }); @@ -451,6 +460,7 @@ describe('useConversationHistory', () => { search_sources: null, search_warnings: null, search_metadata: null, + model_name: null, }, { role: 'assistant', @@ -461,6 +471,7 @@ describe('useConversationHistory', () => { search_sources: null, search_warnings: JSON.stringify(['reader_unavailable']), search_metadata: null, + model_name: null, }, ], }); @@ -497,6 +508,7 @@ describe('useConversationHistory', () => { search_sources: null, search_warnings: null, search_metadata: null, + model_name: null, }, { role: 'assistant', @@ -507,6 +519,7 @@ describe('useConversationHistory', () => { search_sources: null, search_warnings: null, search_metadata: null, + model_name: null, }, ], }); @@ -717,6 +730,7 @@ describe('useConversationHistory', () => { searchSources: null, searchWarnings: JSON.stringify(['reader_unavailable']), searchMetadata: null, + modelName: null, }); }); @@ -928,6 +942,7 @@ describe('useConversationHistory', () => { searchSources: null, searchWarnings: null, searchMetadata: JSON.stringify(metadata), + modelName: null, }); }); @@ -974,6 +989,7 @@ describe('useConversationHistory', () => { searchSources: null, searchWarnings: null, searchMetadata: JSON.stringify(traces), + modelName: null, }); }); @@ -1321,4 +1337,120 @@ describe('useConversationHistory', () => { ]), ); }); + + // ─── model_name round trip ─────────────────────────────────────────────────── + + it('save() stamps model_name on payloads when Message has modelName', async () => { + invoke.mockResolvedValueOnce({ conversation_id: 'conv-model-save' }); + invoke.mockResolvedValue(undefined); + + const messagesWithModel: Message[] = [ + { id: 'u1', role: 'user', content: 'Hi' }, + { + id: 'a1', + role: 'assistant', + content: 'Hello', + modelName: 'gemma4:e2b', + }, + ]; + + const { result } = renderHook(() => useConversationHistory()); + + await act(async () => { + await result.current.save(messagesWithModel, MODEL); + }); + + expect(invoke).toHaveBeenCalledWith( + 'save_conversation', + expect.objectContaining({ + messages: [ + expect.objectContaining({ role: 'user', model_name: null }), + expect.objectContaining({ + role: 'assistant', + model_name: 'gemma4:e2b', + }), + ], + }), + ); + }); + + it('persistTurn() sends modelName for assistant, null for user', async () => { + invoke.mockResolvedValueOnce({ conversation_id: 'conv-model-persist' }); + invoke.mockResolvedValue(undefined); + + const { result } = renderHook(() => useConversationHistory()); + + await act(async () => { + await result.current.save(MESSAGES, MODEL); + }); + invoke.mockClear(); + + const userMsg: Message = { id: 'u-m', role: 'user', content: 'q' }; + const assistantMsg: Message = { + id: 'a-m', + role: 'assistant', + content: 'answer', + modelName: 'qwen2.5:7b', + }; + + await act(async () => { + await result.current.persistTurn(userMsg, assistantMsg); + }); + + expect(invoke).toHaveBeenCalledWith( + 'persist_message', + expect.objectContaining({ + role: 'user', + modelName: null, + }), + ); + expect(invoke).toHaveBeenCalledWith( + 'persist_message', + expect.objectContaining({ + role: 'assistant', + modelName: 'qwen2.5:7b', + }), + ); + }); + + it('loadConversation() maps model_name back to modelName on restore', async () => { + invoke.mockResolvedValueOnce([ + { + id: 'u1', + role: 'user', + content: 'Hi', + quoted_text: null, + image_paths: null, + thinking_content: null, + search_sources: null, + search_warnings: null, + search_metadata: null, + model_name: null, + created_at: 1, + }, + { + id: 'a1', + role: 'assistant', + content: 'Hello', + quoted_text: null, + image_paths: null, + thinking_content: null, + search_sources: null, + search_warnings: null, + search_metadata: null, + model_name: 'gemma4:e2b', + created_at: 2, + }, + ]); + + const { result } = renderHook(() => useConversationHistory()); + let loaded: Message[] = []; + + await act(async () => { + loaded = await result.current.loadConversation('conv-model-load'); + }); + + expect(loaded[0].modelName).toBeUndefined(); + expect(loaded[1].modelName).toBe('gemma4:e2b'); + }); }); diff --git a/src/hooks/__tests__/useOllama.test.tsx b/src/hooks/__tests__/useOllama.test.tsx index 388de991..5dcb930a 100644 --- a/src/hooks/__tests__/useOllama.test.tsx +++ b/src/hooks/__tests__/useOllama.test.tsx @@ -25,7 +25,7 @@ describe('useOllama', () => { describe('ask()', () => { it('sends message via invoke with correct command name and args', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('hello world'); @@ -55,7 +55,7 @@ describe('useOllama', () => { }, ); - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); // Start ask but don't await so we can read state while in-flight act(() => { @@ -72,7 +72,7 @@ describe('useOllama', () => { }); it('adds user message and empty assistant placeholder immediately on ask', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('my question'); @@ -95,7 +95,7 @@ describe('useOllama', () => { }); it('stores quotedText on user message when provided', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('what is this?', 'code snippet'); @@ -111,7 +111,7 @@ describe('useOllama', () => { }); it('sends quotedText to invoke when provided', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('summarize', 'selected text'); @@ -127,7 +127,7 @@ describe('useOllama', () => { }); it('accumulates streaming tokens into the assistant message', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('hello'); @@ -148,7 +148,7 @@ describe('useOllama', () => { }); it('keeps assistant message in place on Done chunk', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('hello'); @@ -172,7 +172,7 @@ describe('useOllama', () => { }); it('does nothing for empty prompt', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask(''); @@ -183,7 +183,7 @@ describe('useOllama', () => { }); it('does nothing for whitespace-only prompt', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask(' '); @@ -205,7 +205,7 @@ describe('useOllama', () => { }, ); - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); // Start the first ask (stalls) act(() => { @@ -230,7 +230,7 @@ describe('useOllama', () => { }); it('sends promptOverride as message to backend when provided', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask( @@ -255,7 +255,7 @@ describe('useOllama', () => { }); it('sends displayContent as message when no promptOverride provided', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('hello world'); @@ -270,7 +270,7 @@ describe('useOllama', () => { }); it('sends displayContent when promptOverride is undefined', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask( @@ -295,7 +295,7 @@ describe('useOllama', () => { describe('imagePaths handling', () => { it('allows ask() with empty text but valid imagePaths', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('', undefined, ['/tmp/img1.jpg']); @@ -320,7 +320,7 @@ describe('useOllama', () => { }); it('returns early for empty text AND no imagePaths', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('', undefined, undefined); @@ -331,7 +331,7 @@ describe('useOllama', () => { }); it('returns early for empty text AND empty imagePaths array', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('', undefined, []); @@ -342,7 +342,7 @@ describe('useOllama', () => { }); it('includes imagePaths in message and invoke when text AND imagePaths are provided', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('describe this', undefined, [ @@ -368,7 +368,7 @@ describe('useOllama', () => { }); it('sets message.imagePaths to undefined and invoke imagePaths to null when no imagePaths', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('hello'); @@ -388,7 +388,7 @@ describe('useOllama', () => { describe('error handling', () => { it('Error chunk sets isGenerating to false', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('test'); @@ -413,7 +413,7 @@ describe('useOllama', () => { it('invoke rejection sets isGenerating to false', async () => { invoke.mockRejectedValueOnce(new Error('connection refused')); - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('test'); @@ -423,7 +423,7 @@ describe('useOllama', () => { }); it('Error chunk updates assistant placeholder with errorKind', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('test'); @@ -452,7 +452,7 @@ describe('useOllama', () => { }); it('Error chunk with partial tokens replaces content with error', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('test'); @@ -479,7 +479,7 @@ describe('useOllama', () => { it('invoke rejection creates assistant message with Other errorKind', async () => { invoke.mockRejectedValueOnce(new Error('network error')); - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('test'); @@ -497,7 +497,7 @@ describe('useOllama', () => { describe('streaming edge cases', () => { it('handles Token with empty string', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('hello'); @@ -518,7 +518,7 @@ describe('useOllama', () => { }); it('drops the placeholder when only an empty ThinkingToken arrives before cancellation', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('hello', undefined, undefined, true); @@ -567,7 +567,7 @@ describe('useOllama', () => { } }); - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let secondAsk!: Promise; let thirdAsk!: Promise; @@ -621,7 +621,7 @@ describe('useOllama', () => { } }); - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); act(() => { void result.current.ask('late failure'); @@ -663,7 +663,7 @@ describe('useOllama', () => { }, ); - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); act(() => { void result.current.ask('hello'); @@ -708,7 +708,7 @@ describe('useOllama', () => { }, ); - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); act(() => { void result.current.askSearch('rust'); @@ -752,7 +752,7 @@ describe('useOllama', () => { }); it('does nothing when not generating', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.cancel(); @@ -767,7 +767,7 @@ describe('useOllama', () => { describe('Cancelled chunk', () => { it('keeps partial content as assistant message on Cancelled', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('hello'); @@ -792,7 +792,7 @@ describe('useOllama', () => { }); it('removes assistant placeholder when cancelled with no tokens', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('hello'); @@ -816,7 +816,7 @@ describe('useOllama', () => { describe('reset()', () => { it('clears all state', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); // Build up some state await act(async () => { @@ -847,7 +847,7 @@ describe('useOllama', () => { describe('onTurnComplete callback', () => { it('is called with user and assistant messages on Done', async () => { const onTurnComplete = vi.fn(); - const { result } = renderHook(() => useOllama(onTurnComplete)); + const { result } = renderHook(() => useOllama('', onTurnComplete)); await act(async () => { await result.current.ask('ping'); @@ -870,7 +870,7 @@ describe('useOllama', () => { it('is not called when Cancelled', async () => { const onTurnComplete = vi.fn(); - const { result } = renderHook(() => useOllama(onTurnComplete)); + const { result } = renderHook(() => useOllama('', onTurnComplete)); await act(async () => { await result.current.ask('ping'); @@ -887,7 +887,7 @@ describe('useOllama', () => { it('is not called when an Error chunk is received', async () => { const onTurnComplete = vi.fn(); - const { result } = renderHook(() => useOllama(onTurnComplete)); + const { result } = renderHook(() => useOllama('', onTurnComplete)); await act(async () => { await result.current.ask('ping'); @@ -905,11 +905,82 @@ describe('useOllama', () => { }); }); + // ─── modelName attribution ─────────────────────────────────────────────────── + + describe('modelName attribution', () => { + it('stamps the assistant message with activeModel on ask() completion', async () => { + const onTurnComplete = vi.fn(); + const { result } = renderHook(() => + useOllama('gemma4:e2b', onTurnComplete), + ); + + await act(async () => { + await result.current.ask('hi'); + }); + + const channel = getChannel(); + act(() => { + channel!.simulateMessage({ type: 'Token', data: 'hello' }); + channel!.simulateMessage({ type: 'Done' }); + }); + + const [, assistantMsg] = onTurnComplete.mock.calls[0]; + expect(assistantMsg.modelName).toBe('gemma4:e2b'); + expect(result.current.messages[1]).toMatchObject({ + role: 'assistant', + modelName: 'gemma4:e2b', + }); + }); + + it('leaves modelName undefined when activeModel is an empty string', async () => { + const onTurnComplete = vi.fn(); + const { result } = renderHook(() => useOllama('', onTurnComplete)); + + await act(async () => { + await result.current.ask('hi'); + }); + + const channel = getChannel(); + act(() => { + channel!.simulateMessage({ type: 'Token', data: 'hello' }); + channel!.simulateMessage({ type: 'Done' }); + }); + + const [, assistantMsg] = onTurnComplete.mock.calls[0]; + expect(assistantMsg.modelName).toBeUndefined(); + }); + + it('stamps the assistant message with activeModel on askSearch() turns', async () => { + const onTurnComplete = vi.fn(); + const { result } = renderHook(() => + useOllama('qwen2.5:7b', onTurnComplete), + ); + + let pending: Promise | undefined; + await act(async () => { + pending = result.current.askSearch('rust async'); + }); + + const channel = getChannel(); + act(() => { + channel!.simulateMessage({ type: 'Token', content: 'answer' }); + channel!.simulateMessage({ type: 'Done' }); + }); + + await act(async () => { + await pending; + }); + + const [, assistantMsg] = onTurnComplete.mock.calls[0]; + expect(assistantMsg.modelName).toBe('qwen2.5:7b'); + }); + }); + // ─── loadMessages() ────────────────────────────────────────────────────────── describe('loadMessages()', () => { it('replaces messages state with provided array', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('original question'); @@ -934,7 +1005,7 @@ describe('useOllama', () => { it('clears generating state when loading messages', async () => { invoke.mockRejectedValueOnce(new Error('boom')); - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('fail'); @@ -953,7 +1024,7 @@ describe('useOllama', () => { describe('ThinkingToken handling', () => { it('marks the assistant placeholder as a /think turn when think is true', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('hello', undefined, undefined, true); @@ -966,7 +1037,7 @@ describe('useOllama', () => { }); it('accumulates ThinkingTokens into thinkingContent', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('hello', undefined, undefined, true); @@ -987,7 +1058,7 @@ describe('useOllama', () => { }); it('passes think parameter to invoke', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('hello', undefined, undefined, true); @@ -1002,7 +1073,7 @@ describe('useOllama', () => { }); it('passes think as false by default', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('hello'); @@ -1018,7 +1089,7 @@ describe('useOllama', () => { it('includes thinkingContent in onTurnComplete on Done', async () => { const onTurnComplete = vi.fn(); - const { result } = renderHook(() => useOllama(onTurnComplete)); + const { result } = renderHook(() => useOllama('', onTurnComplete)); await act(async () => { await result.current.ask('hello', undefined, undefined, true); @@ -1043,7 +1114,7 @@ describe('useOllama', () => { it('does not set thinkingContent when no thinking happened', async () => { const onTurnComplete = vi.fn(); - const { result } = renderHook(() => useOllama(onTurnComplete)); + const { result } = renderHook(() => useOllama('', onTurnComplete)); await act(async () => { await result.current.ask('hello'); @@ -1061,7 +1132,7 @@ describe('useOllama', () => { }); it('preserves thinking content when cancelled with thinking but no regular tokens', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); await act(async () => { await result.current.ask('hello', undefined, undefined, true); @@ -1091,7 +1162,7 @@ describe('useOllama', () => { describe('history', () => { it('maintains message history across multiple sequential asks', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); // First ask + response await act(async () => { @@ -1133,7 +1204,7 @@ describe('useOllama', () => { describe('askSearch()', () => { it('invokes search_pipeline with the trimmed query', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch(' rust async '); @@ -1153,7 +1224,7 @@ describe('useOllama', () => { }); it('stores quotedText on the /search user message when provided', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch( @@ -1180,7 +1251,7 @@ describe('useOllama', () => { }); it('resolves immediately with final=true on empty query', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let outcome: { final: boolean } | undefined; await act(async () => { outcome = await result.current.askSearch(' '); @@ -1190,7 +1261,7 @@ describe('useOllama', () => { }); it('resolves with final=true when a token is received followed by Done', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); const metadata = { iterations: [ { @@ -1233,7 +1304,7 @@ describe('useOllama', () => { it('resolves with final=false when a clarify trace is followed by question tokens and Done', async () => { const onTurnComplete = vi.fn(); - const { result } = renderHook(() => useOllama(onTurnComplete)); + const { result } = renderHook(() => useOllama('', onTurnComplete)); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('who is him'); @@ -1270,7 +1341,7 @@ describe('useOllama', () => { }); it('updates searchStage through the pipeline phases', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); @@ -1321,7 +1392,7 @@ describe('useOllama', () => { }); it('handles FetchingUrl, finalizes traces on IterationComplete, and ignores empty tokens', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let pending!: Promise<{ final: boolean }>; await act(async () => { @@ -1383,7 +1454,7 @@ describe('useOllama', () => { }); it('ignores IterationComplete events when no trace steps have started', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let pending!: Promise<{ final: boolean }>; await act(async () => { @@ -1418,7 +1489,7 @@ describe('useOllama', () => { }); it('drops the empty placeholder on Cancelled with no content', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); @@ -1438,7 +1509,7 @@ describe('useOllama', () => { }); it('keeps partial content on Cancelled after tokens arrived', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); @@ -1459,7 +1530,7 @@ describe('useOllama', () => { it('renders an Error event as an error bubble', async () => { const onTurnComplete = vi.fn(); - const { result } = renderHook(() => useOllama(onTurnComplete)); + const { result } = renderHook(() => useOllama('', onTurnComplete)); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); @@ -1483,7 +1554,7 @@ describe('useOllama', () => { }); it('guards against concurrent invocations', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let firstPending!: Promise<{ final: boolean }>; await act(async () => { firstPending = result.current.askSearch('first'); @@ -1532,7 +1603,7 @@ describe('useOllama', () => { } }); - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let firstPending!: Promise<{ final: boolean }>; let secondPending!: Promise<{ final: boolean }>; @@ -1578,7 +1649,7 @@ describe('useOllama', () => { invoke.mockImplementationOnce(async () => { throw new Error('ipc failed'); }); - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let outcome: { final: boolean } | undefined; await act(async () => { outcome = await result.current.askSearch('q'); @@ -1611,7 +1682,7 @@ describe('useOllama', () => { } }); - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let pending!: Promise<{ final: boolean }>; act(() => { @@ -1642,7 +1713,7 @@ describe('useOllama', () => { it('does not persist an empty turn on Done', async () => { const onTurnComplete = vi.fn(); - const { result } = renderHook(() => useOllama(onTurnComplete)); + const { result } = renderHook(() => useOllama('', onTurnComplete)); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); @@ -1660,7 +1731,7 @@ describe('useOllama', () => { it('persists searchSources to the assistant message on Sources + Token + Done', async () => { const onTurnComplete = vi.fn(); - const { result } = renderHook(() => useOllama(onTurnComplete)); + const { result } = renderHook(() => useOllama('', onTurnComplete)); const metadata = { iterations: [ { @@ -1706,7 +1777,7 @@ describe('useOllama', () => { }); it('Warning event accumulates into message.searchWarnings while streaming continues', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); @@ -1731,7 +1802,7 @@ describe('useOllama', () => { it('askSearch accumulates warnings from Warning events into the persisted turn', async () => { const onTurnComplete = vi.fn(); - const { result } = renderHook(() => useOllama(onTurnComplete)); + const { result } = renderHook(() => useOllama('', onTurnComplete)); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); @@ -1763,7 +1834,7 @@ describe('useOllama', () => { it('askSearch passes multiple warnings through in order', async () => { const onTurnComplete = vi.fn(); - const { result } = renderHook(() => useOllama(onTurnComplete)); + const { result } = renderHook(() => useOllama('', onTurnComplete)); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); @@ -1793,7 +1864,7 @@ describe('useOllama', () => { }); it('Trace events accumulate steps on the assistant message', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); @@ -1840,7 +1911,7 @@ describe('useOllama', () => { }); it('Trace updates replace earlier steps with the same id', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); @@ -1889,7 +1960,7 @@ describe('useOllama', () => { it('Trace events are passed to onTurnComplete', async () => { const onTurnComplete = vi.fn(); - const { result } = renderHook(() => useOllama(onTurnComplete)); + const { result } = renderHook(() => useOllama('', onTurnComplete)); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); @@ -1924,7 +1995,7 @@ describe('useOllama', () => { it('preserves completed traces on Done when no running steps need finalization', async () => { const onTurnComplete = vi.fn(); - const { result } = renderHook(() => useOllama(onTurnComplete)); + const { result } = renderHook(() => useOllama('', onTurnComplete)); let pending!: Promise<{ final: boolean }>; await act(async () => { @@ -1966,7 +2037,7 @@ describe('useOllama', () => { it('searchTraces is undefined when no Trace event is received', async () => { const onTurnComplete = vi.fn(); - const { result } = renderHook(() => useOllama(onTurnComplete)); + const { result } = renderHook(() => useOllama('', onTurnComplete)); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); @@ -1988,7 +2059,7 @@ describe('useOllama', () => { describe('search state cleanup', () => { it('reset clears the search stage indicator', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); @@ -2011,7 +2082,7 @@ describe('useOllama', () => { }); it('loadMessages clears the search stage indicator', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); @@ -2034,7 +2105,7 @@ describe('useOllama', () => { }); it('Searching after RefiningSearch sets gap:true stage', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); @@ -2061,7 +2132,7 @@ describe('useOllama', () => { }); it('ReadingSources after RefiningSearch sets gap:true stage', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); @@ -2089,7 +2160,7 @@ describe('useOllama', () => { it('SandboxUnavailable event sets sandboxUnavailable on assistant message', async () => { const onTurnComplete = vi.fn(); - const { result } = renderHook(() => useOllama(onTurnComplete)); + const { result } = renderHook(() => useOllama('', onTurnComplete)); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); @@ -2110,7 +2181,7 @@ describe('useOllama', () => { }); it('SandboxUnavailable event does not set errorKind', async () => { - const { result } = renderHook(() => useOllama()); + const { result } = renderHook(() => useOllama('')); let pending!: Promise<{ final: boolean }>; await act(async () => { pending = result.current.askSearch('q'); diff --git a/src/hooks/useConversationHistory.ts b/src/hooks/useConversationHistory.ts index c1b53b15..4b5a9c91 100644 --- a/src/hooks/useConversationHistory.ts +++ b/src/hooks/useConversationHistory.ts @@ -41,6 +41,7 @@ function toPayload(msg: Message): SaveMessagePayload { : msg.searchTraces && msg.searchTraces.length > 0 ? JSON.stringify(msg.searchTraces) : null, + model_name: msg.modelName ?? null, }; } @@ -241,6 +242,7 @@ function fromPersisted(msg: PersistedMessage): Message { searchMetadata, searchTraces: searchTraces && searchTraces.length > 0 ? searchTraces : undefined, + modelName: msg.model_name ?? undefined, }; } @@ -325,6 +327,7 @@ export function useConversationHistory() { searchSources: null, searchWarnings: null, searchMetadata: null, + modelName: null, }), invoke('persist_message', { conversationId, @@ -346,6 +349,7 @@ export function useConversationHistory() { assistantMsg.searchTraces.length > 0 ? JSON.stringify(assistantMsg.searchTraces) : null, + modelName: assistantMsg.modelName ?? null, }), ]); }, diff --git a/src/hooks/useOllama.ts b/src/hooks/useOllama.ts index e71a7847..9a2fae03 100644 --- a/src/hooks/useOllama.ts +++ b/src/hooks/useOllama.ts @@ -122,8 +122,17 @@ function finalizeSearchTraceSteps( * * Manages message history, streaming state, and the Tauri IPC channels used by * both the normal chat path and the `/search` pipeline. + * + * @param activeModel Ollama model slug that should be attributed to each + * assistant message produced by this hook. Passed as a hook parameter (not + * a per-call argument) so the latest App-level selection is captured via + * closure on every render. An empty string (briefly possible on startup, + * before the model list resolves) is coerced to `undefined` on the emitted + * `Message`, so no attribution chip is rendered rather than a blank one. + * @param onTurnComplete Optional callback invoked after each completed turn. */ export function useOllama( + activeModel: string, onTurnComplete?: (userMsg: Message, assistantMsg: Message) => void, ) { const [messages, setMessages] = useState([]); @@ -222,6 +231,7 @@ export function useOllama( role: 'assistant', content: '', fromThink: think ? true : undefined, + modelName: activeModel || undefined, }; setMessages((prev) => [...prev, userMsg, assistantMsg]); @@ -338,7 +348,7 @@ export function useOllama( setSearchStage(null); } }, - [onTurnComplete], + [onTurnComplete, activeModel], ); /** @@ -376,6 +386,7 @@ export function useOllama( role: 'assistant', content: '', fromSearch: true, + modelName: activeModel || undefined, }; setMessages((prev) => [...prev, userMsg, assistantMsg]); @@ -566,7 +577,7 @@ export function useOllama( }); }); }, - [onTurnComplete], + [onTurnComplete, activeModel], ); /** Cancels the currently active generation. */ diff --git a/src/types/history.ts b/src/types/history.ts index fd8360f2..182aa8c7 100644 --- a/src/types/history.ts +++ b/src/types/history.ts @@ -45,6 +45,9 @@ export interface PersistedMessage { * `SearchTraceStep[]` or legacy iteration traces. Null for non-search * messages. */ search_metadata: string | null; + /** Ollama model slug attributed to this message. Null for user messages + * and legacy messages written before the model_name migration. */ + model_name: string | null; /** Unix timestamp (seconds) the message was created. */ created_at: number; } @@ -71,4 +74,7 @@ export interface SaveMessagePayload { search_warnings: string | null; /** Pre-serialized JSON string of SearchMetadata or a legacy trace payload. */ search_metadata: string | null; + /** Ollama model slug that produced this response. Null for user messages + * and messages from pre-migration conversations. */ + model_name: string | null; } diff --git a/src/view/ConversationView.tsx b/src/view/ConversationView.tsx index 15bf40b2..9e6dd2fd 100644 --- a/src/view/ConversationView.tsx +++ b/src/view/ConversationView.tsx @@ -252,6 +252,7 @@ export function ConversationView({ searchWarnings={msg.searchWarnings} sandboxUnavailable={msg.sandboxUnavailable} searchTraces={msg.searchTraces} + modelName={msg.modelName} isSearching={ isGenerating && msg.fromSearch === true && From 4adac7f51d8533b38473f696c540a1bb2e33aafb Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Thu, 23 Apr 2026 21:32:43 -0700 Subject: [PATCH 17/42] docs: clarify Message.modelName semantics Stamped at creation time, stable across mid-stream model switches. Review feedback from TG4 quality pass. Signed-off-by: Logan Nguyen --- src/hooks/useOllama.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/hooks/useOllama.ts b/src/hooks/useOllama.ts index 9a2fae03..f292864e 100644 --- a/src/hooks/useOllama.ts +++ b/src/hooks/useOllama.ts @@ -18,7 +18,9 @@ export interface Message { id: string; role: 'user' | 'assistant'; content: string; - /** Ollama model slug that produced this message. Present on assistant messages once the stream completes. */ + /** Ollama model slug attributed to this assistant message at creation time. + * Remains stable even if the user switches models mid-stream. Undefined for + * user messages and for legacy conversations loaded from pre-migration rows. */ modelName?: string; /** Selected text from the host app that was quoted with this message, if any. */ quotedText?: string; From e0323010ff6b1876429e857d7ba5a35538ed5a9f Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Thu, 23 Apr 2026 21:48:14 -0700 Subject: [PATCH 18/42] fix: full-width inline model picker list with tooltip trigger Signed-off-by: Logan Nguyen --- src/components/ModelPicker.tsx | 376 ++++++------------ src/components/__tests__/ModelPicker.test.tsx | 295 +++++--------- src/view/AskBarView.tsx | 74 +++- src/view/__tests__/AskBarView.test.tsx | 70 ++++ 4 files changed, 359 insertions(+), 456 deletions(-) diff --git a/src/components/ModelPicker.tsx b/src/components/ModelPicker.tsx index 6e25dc8d..0a6c4a31 100644 --- a/src/components/ModelPicker.tsx +++ b/src/components/ModelPicker.tsx @@ -1,12 +1,23 @@ import { AnimatePresence, motion } from 'framer-motion'; -import { - useCallback, - useEffect, - useLayoutEffect, - useRef, - useState, -} from 'react'; -import { createPortal } from 'react-dom'; +import type { RefObject } from 'react'; + +/** + * Model picker is split into two exports so that the opening/closing state and + * outside-click lifecycle live in {@link AskBarView}, which also owns the Tauri + * window sizing via a morphing-container ResizeObserver. The list renders + * **inline in the DOM flow** (not via a portal) so the ResizeObserver detects + * the added height and grows the native window upward to reveal the menu. + * + * - {@link ModelPickerTrigger} - stateless chip button; wrapped in a `Tooltip` + * at the call site rather than internally. + * - {@link ModelPickerList} - animated, full-width inline list shown above the + * ask bar input row, following the same pattern as `CommandSuggestion`. + * + * An earlier implementation rendered the menu via `createPortal(document.body)` + * to escape the ask bar's `overflow-hidden` chrome, but the portal was still + * bounded by the Tauri web view which is only ~80px tall in ask-bar mode, so + * menus of 50-160px clipped. DOM-flow rendering grows the window naturally. + */ /** * Hoisted static SVG - chip-style trigger icon for the model picker. @@ -41,259 +52,132 @@ const CHIP_ICON = ( ); -/** Hoisted static checkmark path used on the active row. */ -const CHECK_ICON_PATH = ( - -); - -/** Fixed target width for the portal menu in pixels. */ -const MENU_WIDTH = 220; -/** Viewport-edge padding used when clamping the left position. */ -const EDGE_PADDING = 8; -/** Vertical gap between the trigger and the menu. */ -const MENU_GAP = 8; +/** Props for {@link ModelPickerTrigger}. */ +export interface ModelPickerTriggerProps { + /** Ref forwarded to the trigger button for outside-click discrimination. */ + triggerRef?: RefObject; + /** Whether the associated list is currently expanded. Drives `aria-expanded`. */ + isOpen: boolean; + /** True while generation is active or another busy state gates the picker. */ + disabled: boolean; + /** Called when the user toggles the menu open or closed via the chip. */ + onToggle: () => void; +} -/** Screen position for the portal menu, computed from the trigger rect. */ -interface MenuPosition { - top: number; - left: number; +/** + * Chip-style button that toggles the model picker list. Stateless: the + * parent owns `isOpen` so it can coordinate with the outside-click listener + * and the inline {@link ModelPickerList} that renders above the input row. + */ +export function ModelPickerTrigger({ + triggerRef, + isOpen, + disabled, + onToggle, +}: ModelPickerTriggerProps) { + return ( + + ); } -/** Props for the {@link ModelPicker} component. */ -export interface ModelPickerProps { - /** Currently active model slug; the matching row renders an orange tick. */ +/** Props for {@link ModelPickerList}. */ +export interface ModelPickerListProps { + /** Ref forwarded to the list wrapper for outside-click discrimination. */ + listRef?: RefObject; + /** Currently active model slug; the matching row renders an orange check. */ activeModel: string; /** Full list of available model slugs from Ollama's tags endpoint. */ models: string[]; - /** When true the trigger is inert (e.g. during generation) and any open menu closes. */ - disabled: boolean; - /** Called with the chosen slug when the user picks a row. */ + /** When true the list animates in; when false it animates out. */ + isOpen: boolean; + /** Called with the chosen slug when the user clicks a row. */ onSelect: (model: string) => void; } /** - * Single self-contained model picker rendered as a portal menu. - * - * The menu escapes the ask bar's morphing container (which sets - * `overflow-hidden`) by rendering into `document.body` via - * {@link createPortal}. That keeps the Thuki window size stable while the - * menu floats above it like a native macOS NSMenu. + * Animated full-width list rendered above the ask bar input row. Sits inside + * the morphing container (not a portal), so the existing ResizeObserver picks + * up the added height and grows the Tauri window upward to reveal it. * - * Positioning algorithm: - * 1. Read the trigger's `getBoundingClientRect` on open. - * 2. Right-align the menu to the trigger, clamped to 8px from the left edge. - * 3. Prefer opening above the trigger. If that would clip above the - * viewport, open below instead. Uses a two-phase rAF measurement: - * render once to measure the menu height, then adjust `top`. - * 4. Re-run on every scroll / resize / window blur while the menu is open. - * - * All listeners (scroll, resize, mousedown, keydown) are attached in a single - * effect gated on {@link showMenu} and removed on close or unmount. + * Visual layout: + * - Outer wrapper is full-width with no right alignment so the window grows + * cleanly and there is no blank "void" on the left side. + * - Inner `px-3 pt-2 pb-1` padding matches the ask bar's horizontal chrome. + * - The card (`rounded-xl border bg-surface-elevated/40`) fills the full + * width between the padding, reading as a slight elevation on the main + * surface-base background. */ -export function ModelPicker({ +export function ModelPickerList({ + listRef, activeModel, models, - disabled, + isOpen, onSelect, -}: ModelPickerProps) { - const [isOpen, setIsOpen] = useState(false); - const [position, setPosition] = useState(null); - - const triggerRef = useRef(null); - const menuRef = useRef(null); - - /** - * Combined open gate: hides the menu if the picker becomes disabled or - * empty while the user intent (`isOpen`) is still true. The underlying - * `isOpen` state is still reset to false by the disabled-sync effect - * below so re-enabling does not reopen a stale menu. - */ - const showMenu = isOpen && !disabled && models.length > 0; - - /** Recomputes the menu position from the current trigger rect. */ - const updatePosition = useCallback(() => { - const trigger = triggerRef.current; - /* v8 ignore start -- trigger ref is always set while the menu can be open; - guard is defensive for concurrent unmount. */ - if (!trigger) return; - /* v8 ignore stop */ - const rect = trigger.getBoundingClientRect(); - const left = Math.max(EDGE_PADDING, rect.right - MENU_WIDTH); - - const menuEl = menuRef.current; - const menuHeight = menuEl?.offsetHeight ?? 0; - let top = rect.top - menuHeight - MENU_GAP; - if (top < 0) { - top = rect.bottom + MENU_GAP; - } - setPosition({ top, left }); - }, []); - - /** - * First-frame position: read the rect synchronously so the menu mounts - * at an approximate spot, then the effect below re-measures height and - * flips above/below on the following frame. - */ - /* eslint-disable @eslint-react/set-state-in-effect -- intentional: seeding - the initial menu position from the trigger rect is exactly what a layout - effect is for. The rAF inside the next effect corrects the top coordinate - once the menu has laid out and a real height is available. */ - useLayoutEffect(() => { - if (!showMenu) { - setPosition(null); - return; - } - const trigger = triggerRef.current; - /* v8 ignore start -- showMenu implies trigger is mounted; defensive guard. */ - if (!trigger) return; - /* v8 ignore stop */ - const rect = trigger.getBoundingClientRect(); - const left = Math.max(EDGE_PADDING, rect.right - MENU_WIDTH); - // Start above the trigger by an estimated offset so the first paint - // is close to final. The rAF below corrects based on real height. - setPosition({ top: rect.top - MENU_GAP, left }); - }, [showMenu]); - /* eslint-enable @eslint-react/set-state-in-effect */ - - /** - * Attaches all live listeners for the open menu and re-measures once the - * menu has laid out so the above/below flip uses the real height. - */ - useEffect(() => { - if (!showMenu) return; - - // Re-measure after the portal has rendered once. - const rafId = requestAnimationFrame(updatePosition); - - const handleScroll = () => { - requestAnimationFrame(updatePosition); - }; - const handleResize = () => { - requestAnimationFrame(updatePosition); - }; - const handleMouseDown = (event: MouseEvent) => { - const target = event.target as Node; - if (triggerRef.current?.contains(target)) return; - if (menuRef.current?.contains(target)) return; - setIsOpen(false); - }; - const handleKeyDown = (event: KeyboardEvent) => { - if (event.key === 'Escape') { - setIsOpen(false); - } - }; - - window.addEventListener('scroll', handleScroll, { passive: true }); - window.addEventListener('resize', handleResize, { passive: true }); - document.addEventListener('mousedown', handleMouseDown); - document.addEventListener('keydown', handleKeyDown); - - return () => { - cancelAnimationFrame(rafId); - window.removeEventListener('scroll', handleScroll); - window.removeEventListener('resize', handleResize); - document.removeEventListener('mousedown', handleMouseDown); - document.removeEventListener('keydown', handleKeyDown); - }; - }, [showMenu, updatePosition]); - - /** - * When the picker becomes disabled (e.g. generation starts), collapse - * the open intent so re-enabling does not reopen a stale menu. - */ - /* eslint-disable @eslint-react/set-state-in-effect -- intentional: mirror the - disabled prop into the local open state so a mid-open disable cleanly - closes. No secondary effects are triggered by this reset. */ - useEffect(() => { - if (disabled && isOpen) setIsOpen(false); - }, [disabled, isOpen]); - /* eslint-enable @eslint-react/set-state-in-effect */ - - const handleToggle = useCallback(() => { - setIsOpen((prev) => !prev); - }, []); - - const handleRowClick = useCallback( - (model: string) => { - onSelect(model); - setIsOpen(false); - }, - [onSelect], - ); - - if (models.length === 0) return null; - - /* v8 ignore next 2 -- SSR guard; Tauri + happy-dom always provide document. */ - const portalTarget = typeof document !== 'undefined' ? document.body : null; - +}: ModelPickerListProps) { return ( - <> - - {portalTarget && - createPortal( - - {showMenu && position && ( - - {models.map((model) => { - const active = model === activeModel; - return ( - - ); - })} - - )} - , - portalTarget, - )} - + + {isOpen && models.length > 0 && ( + +
+
+ {models.map((model) => ( + + ))} +
+
+
+ )} +
); } diff --git a/src/components/__tests__/ModelPicker.test.tsx b/src/components/__tests__/ModelPicker.test.tsx index b1e56fd4..c33baa47 100644 --- a/src/components/__tests__/ModelPicker.test.tsx +++ b/src/components/__tests__/ModelPicker.test.tsx @@ -1,235 +1,130 @@ -import { render, screen, fireEvent, act } from '@testing-library/react'; +import { render, screen, fireEvent } from '@testing-library/react'; import { describe, it, expect, vi } from 'vitest'; -import { ModelPicker } from '../ModelPicker'; +import { ModelPickerList, ModelPickerTrigger } from '../ModelPicker'; -/** Renders a ModelPicker with a small default model list. */ -function renderPicker( - overrides: Partial> = {}, -) { - const props: React.ComponentProps = { - activeModel: 'gemma4:e2b', - models: ['gemma4:e2b', 'qwen2.5:7b'], - disabled: false, - onSelect: vi.fn(), - ...overrides, - }; - return { props, ...render() }; -} +// ─── ModelPickerTrigger ───────────────────────────────────────────────────── -describe('ModelPicker', () => { - it('does not render when models list is empty', () => { - const { container } = renderPicker({ models: [], activeModel: '' }); - expect(container.firstChild).toBeNull(); - expect(screen.queryByRole('button', { name: 'Choose model' })).toBeNull(); - }); - - it('renders the Choose model trigger with chip icon', () => { - const { container } = renderPicker(); +describe('ModelPickerTrigger', () => { + it('renders the Choose model button with aria-expanded=false when closed', () => { + render( + , + ); const trigger = screen.getByRole('button', { name: 'Choose model' }); expect(trigger).toBeInTheDocument(); expect(trigger).toHaveAttribute('aria-expanded', 'false'); expect(trigger).toHaveAttribute('aria-haspopup', 'menu'); - // The chip icon is rendered inside the trigger. - expect(container.querySelector('svg')).not.toBeNull(); }); - it('opens menu on click with aria-expanded true', () => { - renderPicker(); - const trigger = screen.getByRole('button', { name: 'Choose model' }); - fireEvent.click(trigger); - expect(trigger).toHaveAttribute('aria-expanded', 'true'); - expect(screen.getByRole('menu')).toBeInTheDocument(); + it('renders with aria-expanded=true when open', () => { + render( + , + ); + expect( + screen.getByRole('button', { name: 'Choose model' }), + ).toHaveAttribute('aria-expanded', 'true'); + }); + + it('fires onToggle on click', () => { + const onToggle = vi.fn(); + render( + , + ); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect(onToggle).toHaveBeenCalledTimes(1); }); - it('portals the menu to document.body (not inside trigger DOM)', () => { - const { container } = renderPicker(); + it('does not fire onToggle when disabled', () => { + const onToggle = vi.fn(); + render( + , + ); fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - const menu = screen.getByRole('menu'); - // Menu is NOT inside the component container. - expect(container.contains(menu)).toBe(false); - // Menu IS a descendant of document.body. - expect(document.body.contains(menu)).toBe(true); + expect(onToggle).not.toHaveBeenCalled(); }); +}); - it('lists each model slug LEFT of the check icon', () => { - renderPicker(); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); +// ─── ModelPickerList ──────────────────────────────────────────────────────── + +describe('ModelPickerList', () => { + const DEFAULT_MODELS = ['gemma4:e2b', 'qwen2.5:7b']; + + it('renders nothing when closed', () => { + render( + , + ); + expect(screen.queryByRole('menu')).toBeNull(); + }); + + it('renders nothing when models list is empty', () => { + render( + , + ); + expect(screen.queryByRole('menu')).toBeNull(); + }); + + it('renders rows with slug on the left and check on the right when open', () => { + render( + , + ); const firstRow = screen.getByRole('menuitem', { name: 'gemma4:e2b' }); const slug = firstRow.querySelector('span'); const check = firstRow.querySelector('svg'); expect(slug).not.toBeNull(); expect(check).not.toBeNull(); expect(slug!.textContent).toBe('gemma4:e2b'); - // DOM order: slug precedes the check svg. const children = Array.from(firstRow.children); expect(children.indexOf(slug!)).toBeLessThan(children.indexOf(check!)); }); - it('marks only the active row with visible check (opacity 1 via inline style)', () => { - renderPicker(); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + it('marks only the active row with visible check (opacity 1)', () => { + render( + , + ); const activeRow = screen.getByRole('menuitem', { name: 'gemma4:e2b' }); const inactiveRow = screen.getByRole('menuitem', { name: 'qwen2.5:7b' }); expect(activeRow).toHaveAttribute('aria-current', 'true'); expect(inactiveRow).not.toHaveAttribute('aria-current'); - const activeCheck = activeRow.querySelector('svg')!; - const inactiveCheck = inactiveRow.querySelector('svg')!; - expect((activeCheck as SVGElement).style.opacity).toBe('1'); - expect((inactiveCheck as SVGElement).style.opacity).toBe('0'); + const activeCheck = activeRow.querySelector('svg') as SVGElement; + const inactiveCheck = inactiveRow.querySelector('svg') as SVGElement; + expect(activeCheck.style.opacity).toBe('1'); + expect(inactiveCheck.style.opacity).toBe('0'); }); - it('calls onSelect and closes when row clicked', () => { + it('fires onSelect with the chosen slug when a row is clicked', () => { const onSelect = vi.fn(); - renderPicker({ onSelect }); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + render( + , + ); fireEvent.click(screen.getByRole('menuitem', { name: 'qwen2.5:7b' })); expect(onSelect).toHaveBeenCalledWith('qwen2.5:7b'); - expect(screen.queryByRole('menu')).toBeNull(); - }); - - it('closes on outside click (document mousedown)', () => { - renderPicker(); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - expect(screen.getByRole('menu')).toBeInTheDocument(); - fireEvent.mouseDown(document.body); - expect(screen.queryByRole('menu')).toBeNull(); - }); - - it('keeps the menu open on mousedown inside the menu', () => { - renderPicker(); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - const row = screen.getByRole('menuitem', { name: 'qwen2.5:7b' }); - fireEvent.mouseDown(row); - expect(screen.getByRole('menu')).toBeInTheDocument(); - }); - - it('keeps the menu open on mousedown on the trigger itself', () => { - renderPicker(); - const trigger = screen.getByRole('button', { name: 'Choose model' }); - fireEvent.click(trigger); - fireEvent.mouseDown(trigger); - expect(screen.getByRole('menu')).toBeInTheDocument(); - }); - - it('closes on Escape key', () => { - renderPicker(); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - expect(screen.getByRole('menu')).toBeInTheDocument(); - fireEvent.keyDown(document, { key: 'Escape' }); - expect(screen.queryByRole('menu')).toBeNull(); - }); - - it('ignores non-Escape document keydown events', () => { - renderPicker(); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - fireEvent.keyDown(document, { key: 'ArrowDown' }); - expect(screen.getByRole('menu')).toBeInTheDocument(); - }); - - it('closes when disabled flips true mid-open', () => { - const { rerender, props } = renderPicker(); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - expect(screen.getByRole('menu')).toBeInTheDocument(); - rerender(); - expect(screen.queryByRole('menu')).toBeNull(); - }); - - it('does not fire onSelect when trigger is disabled (click ignored)', () => { - const onSelect = vi.fn(); - renderPicker({ disabled: true, onSelect }); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - expect(screen.queryByRole('menu')).toBeNull(); - expect(onSelect).not.toHaveBeenCalled(); - }); - - it('repositions on window resize without throwing', () => { - renderPicker(); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - expect(() => { - act(() => { - window.dispatchEvent(new Event('resize')); - }); - }).not.toThrow(); - expect(screen.getByRole('menu')).toBeInTheDocument(); - }); - - it('repositions on window scroll without throwing', () => { - renderPicker(); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - expect(() => { - act(() => { - window.dispatchEvent(new Event('scroll')); - }); - }).not.toThrow(); - expect(screen.getByRole('menu')).toBeInTheDocument(); - }); - - it('clicking the trigger while open toggles closed', () => { - renderPicker(); - const trigger = screen.getByRole('button', { name: 'Choose model' }); - fireEvent.click(trigger); - expect(screen.getByRole('menu')).toBeInTheDocument(); - fireEvent.click(trigger); - expect(screen.queryByRole('menu')).toBeNull(); - }); - - it('opens below the trigger when there is no room above', () => { - // Force the trigger rect to read as being at the very top of the viewport - // so the above-trigger math would go negative and the menu must flip below. - const originalGetRect = Element.prototype.getBoundingClientRect; - Element.prototype.getBoundingClientRect = function () { - return { - top: 0, - left: 100, - right: 128, - bottom: 28, - width: 28, - height: 28, - x: 100, - y: 0, - toJSON() { - return {}; - }, - } as DOMRect; - }; - try { - renderPicker(); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - const menu = screen.getByRole('menu'); - // Top coordinate should be non-negative since we flipped below. - const topPx = parseFloat((menu as HTMLElement).style.top); - expect(topPx).toBeGreaterThanOrEqual(0); - } finally { - Element.prototype.getBoundingClientRect = originalGetRect; - } - }); - - it('clamps left to the 8px edge when the trigger is near the left edge', () => { - // Trigger far to the left so right-align math would produce a negative left. - const originalGetRect = Element.prototype.getBoundingClientRect; - Element.prototype.getBoundingClientRect = function () { - return { - top: 500, - left: 0, - right: 28, - bottom: 528, - width: 28, - height: 28, - x: 0, - y: 500, - toJSON() { - return {}; - }, - } as DOMRect; - }; - try { - renderPicker(); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - const menu = screen.getByRole('menu'); - const leftPx = parseFloat((menu as HTMLElement).style.left); - expect(leftPx).toBe(8); - } finally { - Element.prototype.getBoundingClientRect = originalGetRect; - } }); }); diff --git a/src/view/AskBarView.tsx b/src/view/AskBarView.tsx index a2c1d6bd..2311cad9 100644 --- a/src/view/AskBarView.tsx +++ b/src/view/AskBarView.tsx @@ -5,7 +5,7 @@ import { formatQuotedText } from '../utils/formatQuote'; import { useConfig } from '../contexts/ConfigContext'; import { ImageThumbnails } from '../components/ImageThumbnails'; import { CommandSuggestion } from '../components/CommandSuggestion'; -import { ModelPicker } from '../components/ModelPicker'; +import { ModelPickerList, ModelPickerTrigger } from '../components/ModelPicker'; import { Tooltip } from '../components/Tooltip'; import type { AttachedImage } from '../types/image'; import { MAX_IMAGE_SIZE_BYTES } from '../types/image'; @@ -303,6 +303,48 @@ export function AskBarView({ onModelSelect, ); + /** Whether the inline model picker list is currently expanded. */ + const [isModelPickerOpen, setIsModelPickerOpen] = useState(false); + const modelPickerTriggerRef = useRef(null); + const modelPickerListRef = useRef(null); + + /** Combined gate: don't show the list while busy or without data. */ + const showModelPicker = isModelPickerOpen && !isBusy && modelPickerAvailable; + + /* Auto-close the picker when generation starts. */ + /* eslint-disable @eslint-react/set-state-in-effect -- intentional: mirror the + busy prop into the local open state so a mid-open busy flip cleanly closes + the list. No secondary effects are triggered by this reset. */ + useEffect(() => { + if (isBusy && isModelPickerOpen) setIsModelPickerOpen(false); + }, [isBusy, isModelPickerOpen]); + /* eslint-enable @eslint-react/set-state-in-effect */ + + /* Outside-click closes the picker. Listener attached only while open. */ + useEffect(() => { + if (!showModelPicker) return; + const handleMouseDown = (event: MouseEvent) => { + const target = event.target as Node; + if (modelPickerTriggerRef.current?.contains(target)) return; + if (modelPickerListRef.current?.contains(target)) return; + setIsModelPickerOpen(false); + }; + document.addEventListener('mousedown', handleMouseDown); + return () => document.removeEventListener('mousedown', handleMouseDown); + }, [showModelPicker]); + + const toggleModelPicker = useCallback(() => { + setIsModelPickerOpen((open) => !open); + }, []); + + const handleModelRowSelect = useCallback( + (model: string) => { + onModelSelect?.(model); + setIsModelPickerOpen(false); + }, + [onModelSelect], + ); + // ─── Command suggestion state ───────────────────────────────────────────── /** @@ -602,6 +644,19 @@ export function AskBarView({ )} + {/* Model picker list renders inline above the input row in the same + DOM-flow pattern as CommandSuggestion. The morphing-container + ResizeObserver picks up the added height and grows the Tauri + window upward to reveal the menu without clipping. */} + {modelPickerAvailable && activeModel && availableModels && ( + + )}
)} - {modelPickerAvailable && - activeModel && - availableModels && - onModelSelect && ( - + - )} + + )} { expect(screen.queryByRole('menuitem', { name: 'qwen2.5:7b' })).toBeNull(); }); + it('keeps the popup open when a mousedown lands inside a row before click', () => { + render( + , + ); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + const row = screen.getByRole('menuitem', { name: 'qwen2.5:7b' }); + fireEvent.mouseDown(row); + expect( + screen.getByRole('menuitem', { name: 'qwen2.5:7b' }), + ).toBeInTheDocument(); + }); + + it('keeps the popup open when a mousedown lands on the trigger itself', () => { + render( + , + ); + const trigger = screen.getByRole('button', { name: 'Choose model' }); + fireEvent.click(trigger); + fireEvent.mouseDown(trigger); + expect( + screen.getByRole('menuitem', { name: 'qwen2.5:7b' }), + ).toBeInTheDocument(); + }); + + it('renders a Choose model tooltip wrapper around the trigger', () => { + render( + , + ); + const trigger = screen.getByRole('button', { name: 'Choose model' }); + // Hovering the Tooltip wrapper reveals the label text via portal. + fireEvent.mouseEnter(trigger.parentElement!); + expect(screen.getByText('Choose model')).toBeInTheDocument(); + }); + it('hides the model picker trigger when no models are available', () => { render( Date: Thu, 23 Apr 2026 22:08:00 -0700 Subject: [PATCH 19/42] fix: reserve window space for picker menu and wire Tooltip Signed-off-by: Logan Nguyen --- src/components/ModelPicker.tsx | 419 ++++++++++++------ src/components/Tooltip.tsx | 7 + src/components/__tests__/ModelPicker.test.tsx | 392 ++++++++++++---- src/components/__tests__/Tooltip.test.tsx | 15 + src/view/AskBarView.tsx | 119 +++-- src/view/__tests__/AskBarView.test.tsx | 156 +++++-- 6 files changed, 788 insertions(+), 320 deletions(-) diff --git a/src/components/ModelPicker.tsx b/src/components/ModelPicker.tsx index 0a6c4a31..548cd286 100644 --- a/src/components/ModelPicker.tsx +++ b/src/components/ModelPicker.tsx @@ -1,23 +1,12 @@ import { AnimatePresence, motion } from 'framer-motion'; -import type { RefObject } from 'react'; - -/** - * Model picker is split into two exports so that the opening/closing state and - * outside-click lifecycle live in {@link AskBarView}, which also owns the Tauri - * window sizing via a morphing-container ResizeObserver. The list renders - * **inline in the DOM flow** (not via a portal) so the ResizeObserver detects - * the added height and grows the native window upward to reveal the menu. - * - * - {@link ModelPickerTrigger} - stateless chip button; wrapped in a `Tooltip` - * at the call site rather than internally. - * - {@link ModelPickerList} - animated, full-width inline list shown above the - * ask bar input row, following the same pattern as `CommandSuggestion`. - * - * An earlier implementation rendered the menu via `createPortal(document.body)` - * to escape the ask bar's `overflow-hidden` chrome, but the portal was still - * bounded by the Tauri web view which is only ~80px tall in ask-bar mode, so - * menus of 50-160px clipped. DOM-flow rendering grows the window naturally. - */ +import { + useCallback, + useEffect, + useLayoutEffect, + useRef, + useState, +} from 'react'; +import { createPortal } from 'react-dom'; /** * Hoisted static SVG - chip-style trigger icon for the model picker. @@ -52,132 +41,308 @@ const CHIP_ICON = ( ); -/** Props for {@link ModelPickerTrigger}. */ -export interface ModelPickerTriggerProps { - /** Ref forwarded to the trigger button for outside-click discrimination. */ - triggerRef?: RefObject; - /** Whether the associated list is currently expanded. Drives `aria-expanded`. */ - isOpen: boolean; - /** True while generation is active or another busy state gates the picker. */ - disabled: boolean; - /** Called when the user toggles the menu open or closed via the chip. */ - onToggle: () => void; +/** Hoisted static checkmark path used on the active row. */ +const CHECK_ICON_PATH = ( + +); + +/** Fixed target width for the portal menu in pixels. */ +const MENU_WIDTH = 220; +/** Viewport-edge padding used when clamping the left position. */ +const EDGE_PADDING = 8; +/** Vertical gap between the trigger and the menu. */ +const MENU_GAP = 8; + +/** Screen position for the portal menu, computed from the trigger rect. */ +interface MenuPosition { + top: number; + left: number; } /** - * Chip-style button that toggles the model picker list. Stateless: the - * parent owns `isOpen` so it can coordinate with the outside-click listener - * and the inline {@link ModelPickerList} that renders above the input row. + * State snapshot emitted by {@link ModelPicker} via `onMenuChange`. + * + * `openDirection` reflects the flip decision taken in `updatePosition`: + * "above" when the menu fits above the trigger, "below" when it flipped + * downward because there was no room above. `height` is the measured + * `offsetHeight` of the menu (0 before the first rAF measurement and after + * close). Consumers use these fields to reserve native window space below + * the trigger when the menu opens downward. */ -export function ModelPickerTrigger({ - triggerRef, - isOpen, - disabled, - onToggle, -}: ModelPickerTriggerProps) { - return ( - - ); +export interface ModelPickerMenuState { + /** True while the portal menu is mounted and visible. */ + isOpen: boolean; + /** Measured menu height in pixels, or 0 before first measurement / after close. */ + height: number; + /** Resolved placement: above the trigger, or flipped below it. */ + openDirection: 'above' | 'below'; } -/** Props for {@link ModelPickerList}. */ -export interface ModelPickerListProps { - /** Ref forwarded to the list wrapper for outside-click discrimination. */ - listRef?: RefObject; - /** Currently active model slug; the matching row renders an orange check. */ +/** Props for the {@link ModelPicker} component. */ +export interface ModelPickerProps { + /** Currently active model slug; the matching row renders an orange tick. */ activeModel: string; /** Full list of available model slugs from Ollama's tags endpoint. */ models: string[]; - /** When true the list animates in; when false it animates out. */ - isOpen: boolean; - /** Called with the chosen slug when the user clicks a row. */ + /** When true the trigger is inert (e.g. during generation) and any open menu closes. */ + disabled: boolean; + /** Called with the chosen slug when the user picks a row. */ onSelect: (model: string) => void; + /** + * Emitted when the portal menu opens, resizes, or closes. Consumers use + * this to reserve native window space below the trigger when the menu + * flips downward (ask-bar-only mode), since the NSPanel frame clips + * content that extends past the ask-bar row. + */ + onMenuChange?: (state: ModelPickerMenuState) => void; } /** - * Animated full-width list rendered above the ask bar input row. Sits inside - * the morphing container (not a portal), so the existing ResizeObserver picks - * up the added height and grows the Tauri window upward to reveal it. + * Single self-contained model picker rendered as a portal menu. + * + * The menu escapes the ask bar's morphing container (which sets + * `overflow-hidden`) by rendering into `document.body` via + * {@link createPortal}. That keeps the Thuki window size stable while the + * menu floats above it like a native macOS NSMenu. + * + * Positioning algorithm: + * 1. Read the trigger's `getBoundingClientRect` on open. + * 2. Right-align the menu to the trigger, clamped to 8px from the left edge. + * 3. Prefer opening above the trigger. If that would clip above the + * viewport, open below instead. Uses a two-phase rAF measurement: + * render once to measure the menu height, then adjust `top`. + * 4. Re-run on every scroll / resize / window blur while the menu is open. * - * Visual layout: - * - Outer wrapper is full-width with no right alignment so the window grows - * cleanly and there is no blank "void" on the left side. - * - Inner `px-3 pt-2 pb-1` padding matches the ask bar's horizontal chrome. - * - The card (`rounded-xl border bg-surface-elevated/40`) fills the full - * width between the padding, reading as a slight elevation on the main - * surface-base background. + * All listeners (scroll, resize, mousedown, keydown) are attached in a single + * effect gated on {@link showMenu} and removed on close or unmount. */ -export function ModelPickerList({ - listRef, +export function ModelPicker({ activeModel, models, - isOpen, + disabled, onSelect, -}: ModelPickerListProps) { + onMenuChange, +}: ModelPickerProps) { + const [isOpen, setIsOpen] = useState(false); + const [position, setPosition] = useState(null); + + const triggerRef = useRef(null); + const menuRef = useRef(null); + + /** + * Combined open gate: hides the menu if the picker becomes disabled or + * empty while the user intent (`isOpen`) is still true. The underlying + * `isOpen` state is still reset to false by the disabled-sync effect + * below so re-enabling does not reopen a stale menu. + */ + const showMenu = isOpen && !disabled && models.length > 0; + + /** Recomputes the menu position from the current trigger rect. */ + const updatePosition = useCallback(() => { + const trigger = triggerRef.current; + /* v8 ignore start -- trigger ref is always set while the menu can be open; + guard is defensive for concurrent unmount. */ + if (!trigger) return; + /* v8 ignore stop */ + const rect = trigger.getBoundingClientRect(); + const left = Math.max(EDGE_PADDING, rect.right - MENU_WIDTH); + + const menuEl = menuRef.current; + const menuHeight = menuEl?.offsetHeight ?? 0; + let top = rect.top - menuHeight - MENU_GAP; + if (top < 0) { + top = rect.bottom + MENU_GAP; + } + // `top < rect.top` means we kept the above placement; otherwise we flipped + // below the trigger. Emitted so the host can reserve window space below + // the trigger when the menu flipped downward. + const openDirection: 'above' | 'below' = top < rect.top ? 'above' : 'below'; + setPosition({ top, left }); + onMenuChange?.({ isOpen: true, height: menuHeight, openDirection }); + }, [onMenuChange]); + + /** + * First-frame position: read the rect synchronously so the menu mounts + * at an approximate spot, then the effect below re-measures height and + * flips above/below on the following frame. + */ + /* eslint-disable @eslint-react/set-state-in-effect -- intentional: seeding + the initial menu position from the trigger rect is exactly what a layout + effect is for. The rAF inside the next effect corrects the top coordinate + once the menu has laid out and a real height is available. */ + useLayoutEffect(() => { + if (!showMenu) { + setPosition(null); + return; + } + const trigger = triggerRef.current; + /* v8 ignore start -- showMenu implies trigger is mounted; defensive guard. */ + if (!trigger) return; + /* v8 ignore stop */ + const rect = trigger.getBoundingClientRect(); + const left = Math.max(EDGE_PADDING, rect.right - MENU_WIDTH); + // Start above the trigger by an estimated offset so the first paint + // is close to final. The rAF below corrects based on real height. + setPosition({ top: rect.top - MENU_GAP, left }); + }, [showMenu]); + /* eslint-enable @eslint-react/set-state-in-effect */ + + /** + * Attaches all live listeners for the open menu and re-measures once the + * menu has laid out so the above/below flip uses the real height. + */ + useEffect(() => { + if (!showMenu) return; + + // Re-measure after the portal has rendered once. + const rafId = requestAnimationFrame(updatePosition); + + const handleScroll = () => { + requestAnimationFrame(updatePosition); + }; + const handleResize = () => { + requestAnimationFrame(updatePosition); + }; + const handleMouseDown = (event: MouseEvent) => { + const target = event.target as Node; + if (triggerRef.current?.contains(target)) return; + if (menuRef.current?.contains(target)) return; + setIsOpen(false); + }; + const handleKeyDown = (event: KeyboardEvent) => { + if (event.key === 'Escape') { + setIsOpen(false); + } + }; + + window.addEventListener('scroll', handleScroll, { passive: true }); + window.addEventListener('resize', handleResize, { passive: true }); + document.addEventListener('mousedown', handleMouseDown); + document.addEventListener('keydown', handleKeyDown); + + return () => { + cancelAnimationFrame(rafId); + window.removeEventListener('scroll', handleScroll); + window.removeEventListener('resize', handleResize); + document.removeEventListener('mousedown', handleMouseDown); + document.removeEventListener('keydown', handleKeyDown); + }; + }, [showMenu, updatePosition]); + + /** + * Notify the host when the menu transitions to closed, including on + * unmount while the menu is still open, so any reserved native window + * space can collapse. Open-state emissions happen from inside + * {@link updatePosition} where the real measured height and flip + * direction are known. + */ + useEffect(() => { + if (!showMenu) { + onMenuChange?.({ isOpen: false, height: 0, openDirection: 'below' }); + return; + } + return () => { + onMenuChange?.({ isOpen: false, height: 0, openDirection: 'below' }); + }; + }, [showMenu, onMenuChange]); + + /** + * When the picker becomes disabled (e.g. generation starts), collapse + * the open intent so re-enabling does not reopen a stale menu. + */ + /* eslint-disable @eslint-react/set-state-in-effect -- intentional: mirror the + disabled prop into the local open state so a mid-open disable cleanly + closes. No secondary effects are triggered by this reset. */ + useEffect(() => { + if (disabled && isOpen) setIsOpen(false); + }, [disabled, isOpen]); + /* eslint-enable @eslint-react/set-state-in-effect */ + + const handleToggle = useCallback(() => { + setIsOpen((prev) => !prev); + }, []); + + const handleRowClick = useCallback( + (model: string) => { + onSelect(model); + setIsOpen(false); + }, + [onSelect], + ); + + if (models.length === 0) return null; + + /* v8 ignore next 2 -- SSR guard; Tauri + happy-dom always provide document. */ + const portalTarget = typeof document !== 'undefined' ? document.body : null; + return ( - - {isOpen && models.length > 0 && ( - -
-
- {models.map((model) => ( - - ))} -
-
-
- )} -
+ <> + + {portalTarget && + createPortal( + + {showMenu && position && ( + + {models.map((model) => { + const active = model === activeModel; + return ( + + ); + })} + + )} + , + portalTarget, + )} + ); } diff --git a/src/components/Tooltip.tsx b/src/components/Tooltip.tsx index c5dc26e1..5df2e8e4 100644 --- a/src/components/Tooltip.tsx +++ b/src/components/Tooltip.tsx @@ -71,6 +71,12 @@ export function Tooltip({ setIsVisible(true); }; + /** + * Hides the tooltip. Fired on both `mouseleave` and `mousedown` so a click + * on a tooltipped trigger that opens a popup (e.g. the model picker) + * dismisses the tooltip instead of letting it overlap the popup. The + * tooltip reappears normally on the next fresh hover. + */ const handleMouseLeave = () => { setIsVisible(false); }; @@ -86,6 +92,7 @@ export function Tooltip({ ref={triggerRef} onMouseEnter={handleMouseEnter} onMouseLeave={handleMouseLeave} + onMouseDown={handleMouseLeave} className={`inline-flex${className ? ` ${className}` : ''}`} > {children} diff --git a/src/components/__tests__/ModelPicker.test.tsx b/src/components/__tests__/ModelPicker.test.tsx index c33baa47..c386234c 100644 --- a/src/components/__tests__/ModelPicker.test.tsx +++ b/src/components/__tests__/ModelPicker.test.tsx @@ -1,130 +1,332 @@ -import { render, screen, fireEvent } from '@testing-library/react'; +import { + render, + screen, + fireEvent, + act, + waitFor, +} from '@testing-library/react'; import { describe, it, expect, vi } from 'vitest'; -import { ModelPickerList, ModelPickerTrigger } from '../ModelPicker'; +import { ModelPicker } from '../ModelPicker'; -// ─── ModelPickerTrigger ───────────────────────────────────────────────────── +/** Renders a ModelPicker with a small default model list. */ +function renderPicker( + overrides: Partial> = {}, +) { + const props: React.ComponentProps = { + activeModel: 'gemma4:e2b', + models: ['gemma4:e2b', 'qwen2.5:7b'], + disabled: false, + onSelect: vi.fn(), + ...overrides, + }; + return { props, ...render() }; +} -describe('ModelPickerTrigger', () => { - it('renders the Choose model button with aria-expanded=false when closed', () => { - render( - , - ); +describe('ModelPicker', () => { + it('does not render when models list is empty', () => { + const { container } = renderPicker({ models: [], activeModel: '' }); + expect(container.firstChild).toBeNull(); + expect(screen.queryByRole('button', { name: 'Choose model' })).toBeNull(); + }); + + it('renders the Choose model trigger with chip icon', () => { + const { container } = renderPicker(); const trigger = screen.getByRole('button', { name: 'Choose model' }); expect(trigger).toBeInTheDocument(); expect(trigger).toHaveAttribute('aria-expanded', 'false'); expect(trigger).toHaveAttribute('aria-haspopup', 'menu'); + // The chip icon is rendered inside the trigger. + expect(container.querySelector('svg')).not.toBeNull(); }); - it('renders with aria-expanded=true when open', () => { - render( - , - ); - expect( - screen.getByRole('button', { name: 'Choose model' }), - ).toHaveAttribute('aria-expanded', 'true'); - }); - - it('fires onToggle on click', () => { - const onToggle = vi.fn(); - render( - , - ); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - expect(onToggle).toHaveBeenCalledTimes(1); + it('opens menu on click with aria-expanded true', () => { + renderPicker(); + const trigger = screen.getByRole('button', { name: 'Choose model' }); + fireEvent.click(trigger); + expect(trigger).toHaveAttribute('aria-expanded', 'true'); + expect(screen.getByRole('menu')).toBeInTheDocument(); }); - it('does not fire onToggle when disabled', () => { - const onToggle = vi.fn(); - render( - , - ); + it('portals the menu to document.body (not inside trigger DOM)', () => { + const { container } = renderPicker(); fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - expect(onToggle).not.toHaveBeenCalled(); + const menu = screen.getByRole('menu'); + // Menu is NOT inside the component container. + expect(container.contains(menu)).toBe(false); + // Menu IS a descendant of document.body. + expect(document.body.contains(menu)).toBe(true); }); -}); - -// ─── ModelPickerList ──────────────────────────────────────────────────────── - -describe('ModelPickerList', () => { - const DEFAULT_MODELS = ['gemma4:e2b', 'qwen2.5:7b']; - it('renders nothing when closed', () => { - render( - , - ); - expect(screen.queryByRole('menu')).toBeNull(); - }); - - it('renders nothing when models list is empty', () => { - render( - , - ); - expect(screen.queryByRole('menu')).toBeNull(); - }); - - it('renders rows with slug on the left and check on the right when open', () => { - render( - , - ); + it('lists each model slug LEFT of the check icon', () => { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); const firstRow = screen.getByRole('menuitem', { name: 'gemma4:e2b' }); const slug = firstRow.querySelector('span'); const check = firstRow.querySelector('svg'); expect(slug).not.toBeNull(); expect(check).not.toBeNull(); expect(slug!.textContent).toBe('gemma4:e2b'); + // DOM order: slug precedes the check svg. const children = Array.from(firstRow.children); expect(children.indexOf(slug!)).toBeLessThan(children.indexOf(check!)); }); - it('marks only the active row with visible check (opacity 1)', () => { - render( - , - ); + it('marks only the active row with visible check (opacity 1 via inline style)', () => { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); const activeRow = screen.getByRole('menuitem', { name: 'gemma4:e2b' }); const inactiveRow = screen.getByRole('menuitem', { name: 'qwen2.5:7b' }); expect(activeRow).toHaveAttribute('aria-current', 'true'); expect(inactiveRow).not.toHaveAttribute('aria-current'); - const activeCheck = activeRow.querySelector('svg') as SVGElement; - const inactiveCheck = inactiveRow.querySelector('svg') as SVGElement; - expect(activeCheck.style.opacity).toBe('1'); - expect(inactiveCheck.style.opacity).toBe('0'); + const activeCheck = activeRow.querySelector('svg')!; + const inactiveCheck = inactiveRow.querySelector('svg')!; + expect((activeCheck as SVGElement).style.opacity).toBe('1'); + expect((inactiveCheck as SVGElement).style.opacity).toBe('0'); }); - it('fires onSelect with the chosen slug when a row is clicked', () => { + it('calls onSelect and closes when row clicked', () => { const onSelect = vi.fn(); - render( - , - ); + renderPicker({ onSelect }); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); fireEvent.click(screen.getByRole('menuitem', { name: 'qwen2.5:7b' })); expect(onSelect).toHaveBeenCalledWith('qwen2.5:7b'); + expect(screen.queryByRole('menu')).toBeNull(); + }); + + it('closes on outside click (document mousedown)', () => { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect(screen.getByRole('menu')).toBeInTheDocument(); + fireEvent.mouseDown(document.body); + expect(screen.queryByRole('menu')).toBeNull(); + }); + + it('keeps the menu open on mousedown inside the menu', () => { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + const row = screen.getByRole('menuitem', { name: 'qwen2.5:7b' }); + fireEvent.mouseDown(row); + expect(screen.getByRole('menu')).toBeInTheDocument(); + }); + + it('keeps the menu open on mousedown on the trigger itself', () => { + renderPicker(); + const trigger = screen.getByRole('button', { name: 'Choose model' }); + fireEvent.click(trigger); + fireEvent.mouseDown(trigger); + expect(screen.getByRole('menu')).toBeInTheDocument(); + }); + + it('closes on Escape key', () => { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect(screen.getByRole('menu')).toBeInTheDocument(); + fireEvent.keyDown(document, { key: 'Escape' }); + expect(screen.queryByRole('menu')).toBeNull(); + }); + + it('ignores non-Escape document keydown events', () => { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + fireEvent.keyDown(document, { key: 'ArrowDown' }); + expect(screen.getByRole('menu')).toBeInTheDocument(); + }); + + it('closes when disabled flips true mid-open', () => { + const { rerender, props } = renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect(screen.getByRole('menu')).toBeInTheDocument(); + rerender(); + expect(screen.queryByRole('menu')).toBeNull(); + }); + + it('does not fire onSelect when trigger is disabled (click ignored)', () => { + const onSelect = vi.fn(); + renderPicker({ disabled: true, onSelect }); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect(screen.queryByRole('menu')).toBeNull(); + expect(onSelect).not.toHaveBeenCalled(); + }); + + it('repositions on window resize without throwing', () => { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect(() => { + act(() => { + window.dispatchEvent(new Event('resize')); + }); + }).not.toThrow(); + expect(screen.getByRole('menu')).toBeInTheDocument(); + }); + + it('repositions on window scroll without throwing', () => { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + expect(() => { + act(() => { + window.dispatchEvent(new Event('scroll')); + }); + }).not.toThrow(); + expect(screen.getByRole('menu')).toBeInTheDocument(); + }); + + it('clicking the trigger while open toggles closed', () => { + renderPicker(); + const trigger = screen.getByRole('button', { name: 'Choose model' }); + fireEvent.click(trigger); + expect(screen.getByRole('menu')).toBeInTheDocument(); + fireEvent.click(trigger); + expect(screen.queryByRole('menu')).toBeNull(); + }); + + it('opens below the trigger when there is no room above', () => { + // Force the trigger rect to read as being at the very top of the viewport + // so the above-trigger math would go negative and the menu must flip below. + const originalGetRect = Element.prototype.getBoundingClientRect; + Element.prototype.getBoundingClientRect = function () { + return { + top: 0, + left: 100, + right: 128, + bottom: 28, + width: 28, + height: 28, + x: 100, + y: 0, + toJSON() { + return {}; + }, + } as DOMRect; + }; + try { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + const menu = screen.getByRole('menu'); + // Top coordinate should be non-negative since we flipped below. + const topPx = parseFloat((menu as HTMLElement).style.top); + expect(topPx).toBeGreaterThanOrEqual(0); + } finally { + Element.prototype.getBoundingClientRect = originalGetRect; + } + }); + + it('emits onMenuChange with openDirection "below" when the menu flips downward', async () => { + const originalGetRect = Element.prototype.getBoundingClientRect; + Element.prototype.getBoundingClientRect = function () { + return { + top: 0, + left: 100, + right: 128, + bottom: 28, + width: 28, + height: 28, + x: 100, + y: 0, + toJSON() { + return {}; + }, + } as DOMRect; + }; + try { + const onMenuChange = vi.fn(); + renderPicker({ onMenuChange }); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + await waitFor(() => { + expect(onMenuChange).toHaveBeenCalledWith( + expect.objectContaining({ isOpen: true, openDirection: 'below' }), + ); + }); + } finally { + Element.prototype.getBoundingClientRect = originalGetRect; + } + }); + + it('emits onMenuChange with openDirection "above" when the menu fits above the trigger', async () => { + const originalGetRect = Element.prototype.getBoundingClientRect; + Element.prototype.getBoundingClientRect = function () { + return { + top: 600, + left: 100, + right: 128, + bottom: 628, + width: 28, + height: 28, + x: 100, + y: 600, + toJSON() { + return {}; + }, + } as DOMRect; + }; + try { + const onMenuChange = vi.fn(); + renderPicker({ onMenuChange }); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + await waitFor(() => { + expect(onMenuChange).toHaveBeenCalledWith( + expect.objectContaining({ isOpen: true, openDirection: 'above' }), + ); + }); + } finally { + Element.prototype.getBoundingClientRect = originalGetRect; + } + }); + + it('emits onMenuChange with isOpen: false when the menu closes via Escape', () => { + const onMenuChange = vi.fn(); + renderPicker({ onMenuChange }); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + onMenuChange.mockClear(); + fireEvent.keyDown(document, { key: 'Escape' }); + expect(onMenuChange).toHaveBeenCalledWith( + expect.objectContaining({ isOpen: false, height: 0 }), + ); + }); + + it('emits onMenuChange with isOpen: false when the picker unmounts while open', () => { + const onMenuChange = vi.fn(); + const { unmount } = renderPicker({ onMenuChange }); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + onMenuChange.mockClear(); + unmount(); + expect(onMenuChange).toHaveBeenCalledWith( + expect.objectContaining({ isOpen: false }), + ); + }); + + it('does not require onMenuChange (optional prop)', () => { + renderPicker(); + const trigger = screen.getByRole('button', { name: 'Choose model' }); + expect(() => fireEvent.click(trigger)).not.toThrow(); + expect(screen.getByRole('menu')).toBeInTheDocument(); + }); + + it('clamps left to the 8px edge when the trigger is near the left edge', () => { + // Trigger far to the left so right-align math would produce a negative left. + const originalGetRect = Element.prototype.getBoundingClientRect; + Element.prototype.getBoundingClientRect = function () { + return { + top: 500, + left: 0, + right: 28, + bottom: 528, + width: 28, + height: 28, + x: 0, + y: 500, + toJSON() { + return {}; + }, + } as DOMRect; + }; + try { + renderPicker(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + const menu = screen.getByRole('menu'); + const leftPx = parseFloat((menu as HTMLElement).style.left); + expect(leftPx).toBe(8); + } finally { + Element.prototype.getBoundingClientRect = originalGetRect; + } }); }); diff --git a/src/components/__tests__/Tooltip.test.tsx b/src/components/__tests__/Tooltip.test.tsx index f2c861bb..a119d744 100644 --- a/src/components/__tests__/Tooltip.test.tsx +++ b/src/components/__tests__/Tooltip.test.tsx @@ -110,6 +110,21 @@ describe('Tooltip', () => { expect(wrapper?.classList.contains('inline-flex')).toBe(true); }); + it('hides on mouseDown so the tooltip does not overlap a popup the click opens', () => { + render( + + + , + ); + const wrapper = screen.getByRole('button', { + name: 'Trigger', + }).parentElement!; + fireEvent.mouseEnter(wrapper); + expect(screen.getByText('Choose model')).toBeInTheDocument(); + fireEvent.mouseDown(wrapper); + expect(screen.queryByText('Choose model')).not.toBeInTheDocument(); + }); + it('applies extra className to the wrapper div', () => { const { container } = render( diff --git a/src/view/AskBarView.tsx b/src/view/AskBarView.tsx index 2311cad9..13ce59da 100644 --- a/src/view/AskBarView.tsx +++ b/src/view/AskBarView.tsx @@ -5,7 +5,10 @@ import { formatQuotedText } from '../utils/formatQuote'; import { useConfig } from '../contexts/ConfigContext'; import { ImageThumbnails } from '../components/ImageThumbnails'; import { CommandSuggestion } from '../components/CommandSuggestion'; -import { ModelPickerList, ModelPickerTrigger } from '../components/ModelPicker'; +import { + ModelPicker, + type ModelPickerMenuState, +} from '../components/ModelPicker'; import { Tooltip } from '../components/Tooltip'; import type { AttachedImage } from '../types/image'; import { MAX_IMAGE_SIZE_BYTES } from '../types/image'; @@ -287,6 +290,29 @@ export function AskBarView({ /** True briefly after a paste attempt is rejected because max images reached. */ const [pasteMaxError, setPasteMaxError] = useState(false); + /** + * Height reserved below the ask-bar row so the portal model-picker menu is + * not clipped by the Thuki NSPanel frame. Non-zero only when the menu is + * open and flips downward (ask-bar-only mode). The App-level + * ResizeObserver watches the morphing container and grows the native + * window to match, which makes room for the downward-opening popup. + */ + const [modelMenuPadBottom, setModelMenuPadBottom] = useState(0); + + /** + * Syncs reserved space with the picker's flip decision. Only an + * "opened below" state actually needs space: above-flips overlay the + * chat area which is already inside the window. + */ + const handleModelMenuChange = useCallback((state: ModelPickerMenuState) => { + if (state.isOpen && state.openDirection === 'below' && state.height > 0) { + // 8px matches the MENU_GAP used by ModelPicker when placing the popup. + setModelMenuPadBottom(state.height + 8); + } else { + setModelMenuPadBottom(0); + } + }, []); + useEffect(() => { if (!pasteMaxError) return; const timer = setTimeout(() => setPasteMaxError(false), 2000); @@ -303,48 +329,6 @@ export function AskBarView({ onModelSelect, ); - /** Whether the inline model picker list is currently expanded. */ - const [isModelPickerOpen, setIsModelPickerOpen] = useState(false); - const modelPickerTriggerRef = useRef(null); - const modelPickerListRef = useRef(null); - - /** Combined gate: don't show the list while busy or without data. */ - const showModelPicker = isModelPickerOpen && !isBusy && modelPickerAvailable; - - /* Auto-close the picker when generation starts. */ - /* eslint-disable @eslint-react/set-state-in-effect -- intentional: mirror the - busy prop into the local open state so a mid-open busy flip cleanly closes - the list. No secondary effects are triggered by this reset. */ - useEffect(() => { - if (isBusy && isModelPickerOpen) setIsModelPickerOpen(false); - }, [isBusy, isModelPickerOpen]); - /* eslint-enable @eslint-react/set-state-in-effect */ - - /* Outside-click closes the picker. Listener attached only while open. */ - useEffect(() => { - if (!showModelPicker) return; - const handleMouseDown = (event: MouseEvent) => { - const target = event.target as Node; - if (modelPickerTriggerRef.current?.contains(target)) return; - if (modelPickerListRef.current?.contains(target)) return; - setIsModelPickerOpen(false); - }; - document.addEventListener('mousedown', handleMouseDown); - return () => document.removeEventListener('mousedown', handleMouseDown); - }, [showModelPicker]); - - const toggleModelPicker = useCallback(() => { - setIsModelPickerOpen((open) => !open); - }, []); - - const handleModelRowSelect = useCallback( - (model: string) => { - onModelSelect?.(model); - setIsModelPickerOpen(false); - }, - [onModelSelect], - ); - // ─── Command suggestion state ───────────────────────────────────────────── /** @@ -644,19 +628,6 @@ export function AskBarView({ )} - {/* Model picker list renders inline above the input row in the same - DOM-flow pattern as CommandSuggestion. The morphing-container - ResizeObserver picks up the added height and grows the Tauri - window upward to reveal the menu without clipping. */} - {modelPickerAvailable && activeModel && availableModels && ( - - )}
)} - {modelPickerAvailable && ( - - - - )} + {modelPickerAvailable && + activeModel && + availableModels && + onModelSelect && ( + + + + )}
+ {/* Invisible in-flow spacer that reserves space below the ask-bar row + for the model picker's portal menu when it flips downward. The + App-level ResizeObserver watches the morphing container and resizes + the native NSPanel to match, preventing the popup from being + clipped at the window bottom. Height animates to match the 160ms + menu fade-in for visual coherence. */} + ); } diff --git a/src/view/__tests__/AskBarView.test.tsx b/src/view/__tests__/AskBarView.test.tsx index 236da676..18970e85 100644 --- a/src/view/__tests__/AskBarView.test.tsx +++ b/src/view/__tests__/AskBarView.test.tsx @@ -324,37 +324,84 @@ describe('AskBarView', () => { expect(screen.queryByRole('menuitem', { name: 'qwen2.5:7b' })).toBeNull(); }); - it('keeps the popup open when a mousedown lands inside a row before click', () => { - render( - , + it('reserves space below the ask-bar row when the picker opens downward', async () => { + // Force the trigger to read as top:0 so the above-math goes negative + // and ModelPicker flips the menu below. Also override offsetHeight so + // the menu reports a positive height on rAF measurement. + const originalGetRect = Element.prototype.getBoundingClientRect; + Element.prototype.getBoundingClientRect = function () { + return { + top: 0, + left: 100, + right: 128, + bottom: 28, + width: 28, + height: 28, + x: 100, + y: 0, + toJSON() { + return {}; + }, + } as DOMRect; + }; + const originalOffsetHeight = Object.getOwnPropertyDescriptor( + HTMLElement.prototype, + 'offsetHeight', ); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - const row = screen.getByRole('menuitem', { name: 'qwen2.5:7b' }); - fireEvent.mouseDown(row); - expect( - screen.getByRole('menuitem', { name: 'qwen2.5:7b' }), - ).toBeInTheDocument(); + Object.defineProperty(HTMLElement.prototype, 'offsetHeight', { + configurable: true, + get() { + return 120; + }, + }); + try { + const { getByTestId } = render( + , + ); + expect(getByTestId('model-menu-spacer').style.height).toBe('0px'); + + // act-wrap to flush all state updates scheduled by the sync rAF fire. + await act(async () => { + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + }); + // Trigger a resize to force another updatePosition pass with the + // menuRef attached and offsetHeight overridden. + await act(async () => { + window.dispatchEvent(new Event('resize')); + }); + // Measured 120px menu + 8px MENU_GAP = 128px spacer height. + expect(getByTestId('model-menu-spacer').style.height).toBe('128px'); + } finally { + Element.prototype.getBoundingClientRect = originalGetRect; + if (originalOffsetHeight) { + Object.defineProperty( + HTMLElement.prototype, + 'offsetHeight', + originalOffsetHeight, + ); + } + } }); - it('keeps the popup open when a mousedown lands on the trigger itself', () => { + it('collapses reserved space when the picker closes', async () => { render( { onModelSelect={vi.fn()} />, ); - const trigger = screen.getByRole('button', { name: 'Choose model' }); - fireEvent.click(trigger); - fireEvent.mouseDown(trigger); - expect( - screen.getByRole('menuitem', { name: 'qwen2.5:7b' }), - ).toBeInTheDocument(); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + fireEvent.keyDown(document, { key: 'Escape' }); + const spacer = screen.getByTestId('model-menu-spacer'); + expect(spacer.style.height).toBe('0px'); }); - it('renders a Choose model tooltip wrapper around the trigger', () => { + it('does not reserve space when the picker opens above the trigger', async () => { + const originalGetRect = Element.prototype.getBoundingClientRect; + Element.prototype.getBoundingClientRect = function () { + return { + top: 600, + left: 100, + right: 128, + bottom: 628, + width: 28, + height: 28, + x: 100, + y: 600, + toJSON() { + return {}; + }, + } as DOMRect; + }; + try { + render( + , + ); + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + await act(async () => { + await new Promise((resolve) => requestAnimationFrame(resolve)); + }); + const spacer = screen.getByTestId('model-menu-spacer'); + expect(spacer.style.height).toBe('0px'); + } finally { + Element.prototype.getBoundingClientRect = originalGetRect; + } + }); + + it('renders the model picker inside a Choose model tooltip wrapper', () => { render( { />, ); const trigger = screen.getByRole('button', { name: 'Choose model' }); - // Hovering the Tooltip wrapper reveals the label text via portal. fireEvent.mouseEnter(trigger.parentElement!); - expect(screen.getByText('Choose model')).toBeInTheDocument(); + // Two tooltip-labeled texts: the aria-label on the trigger AND the + // tooltip content. Filter by tooltip portal text (not the aria-label). + expect(screen.getAllByText('Choose model').length).toBeGreaterThanOrEqual( + 1, + ); }); it('hides the model picker trigger when no models are available', () => { From 04580a66f5f221448a166466521d549a230f3ac4 Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Fri, 24 Apr 2026 10:48:15 -0700 Subject: [PATCH 20/42] style: polish model picker chip styling in WindowControls and ChatBubble - WindowControls.tsx: 13x13 chip icon (matches icon sizes), text-secondary by default, orange on hover via named group/pill to prevent bleed from outer hover - ChatBubble.tsx: attribution chip border rounded-md instead of rounded-full, model slug truncated at max-w-[100px] Signed-off-by: Logan Nguyen --- src/components/ChatBubble.tsx | 4 +- src/components/WindowControls.tsx | 75 +++++++++++++++++++++++++++++-- 2 files changed, 74 insertions(+), 5 deletions(-) diff --git a/src/components/ChatBubble.tsx b/src/components/ChatBubble.tsx index 64306e47..b6e6aa73 100644 --- a/src/components/ChatBubble.tsx +++ b/src/components/ChatBubble.tsx @@ -574,12 +574,12 @@ export function ChatBubble({ {modelName && ( {ATTRIB_CHIP_ICON} - {modelName} + {modelName} )}
diff --git a/src/components/WindowControls.tsx b/src/components/WindowControls.tsx index a95f4aee..03ff502f 100644 --- a/src/components/WindowControls.tsx +++ b/src/components/WindowControls.tsx @@ -66,6 +66,24 @@ const NEW_CONVERSATION_ICON = ( ); +/** Hoisted chip icon for the active-model pill trigger. */ +const CHIP_ICON = ( + +); + /** Hoisted history (clock) icon. */ const HISTORY_ICON = ( void; + /** + * Currently active model slug displayed in the pill trigger. + * Requires `onModelPickerToggle` to be present; omit either to hide the pill. + */ + activeModel?: string; + /** + * Called when the user clicks the active-model pill to open/close the picker. + * Requires `activeModel` to be present; omit either to hide the pill. + */ + onModelPickerToggle?: () => void; + /** Drives `aria-expanded` on the pill button. */ + isModelPickerOpen?: boolean; } /** Decorative dot color for inactive buttons. */ @@ -124,6 +154,9 @@ export const WindowControls = memo(function WindowControls({ canSave = false, onHistoryOpen, onNewConversation, + activeModel, + onModelPickerToggle, + isModelPickerOpen = false, }: WindowControlsProps) { // Disabled only when there is nothing to save yet and the conversation hasn't // been saved. Once saved the button stays active so the user can unsave. @@ -173,8 +206,44 @@ export const WindowControls = memo(function WindowControls({ aria-hidden="true" /> - {/* Right-side header controls - save bookmark + history dropdown */} + {/* Right-side header controls */}
+ {/* Active model pill trigger — leftmost, before save */} + {activeModel !== undefined && onModelPickerToggle !== undefined && ( + + + + )} + {onSave !== undefined && ( {NEW_CONVERSATION_ICON} @@ -220,7 +289,7 @@ export const WindowControls = memo(function WindowControls({ onClick={onHistoryOpen} aria-label="Open history" data-history-toggle - className="w-7 h-7 flex items-center justify-center rounded-lg text-text-secondary hover:text-text-primary hover:bg-white/5 transition-colors duration-150 cursor-pointer" + className="w-7 h-7 flex items-center justify-center rounded-lg text-text-secondary hover:text-primary hover:bg-primary/8 transition-colors duration-150 cursor-pointer" > {HISTORY_ICON} From 3e29e96d72baa482bfa232f24d8d2c4d632a6d83 Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Fri, 24 Apr 2026 14:44:05 -0700 Subject: [PATCH 21/42] feat(model-picker): extract drawer panel + harden selection pipeline Extracts the model picker content into ModelPickerPanel so both the ask-bar drawer and the chat-mode floating dropdown share layout, filtering, and aria-selected semantics. Adds combobox-style keyboard nav (Arrow/Home/End to move the aria-activedescendant, Enter to commit, Escape to close) and unifies click-outside dismissal across both modes so the drawer closes from the ask bar too. Backend: - resolve_seed_active_model: startup seed trusts the persisted choice unconditionally so a valid user selection survives restart even before /api/tags reconciliation. - should_persist_resolved: get_model_picker_state now coalesces the DB read + conditional write into a single critical section and refuses to persist when Ollama reports an empty inventory, eliminating the TOCTOU / clobber-on-empty-list hazards. - validate_model_slug: length + charset guard in front of set_active_model so adversarial IPC inputs cannot reach the network or database layers. - fetch_installed_model_names: caps Content-Length and read-back body size at 4 MiB to guard against a misbehaving localhost Ollama. - MODEL_NOT_INSTALLED_ERR_PREFIX: exported stable prefix replaces the prose-matched error contract. - ModelConfig: drops the unused `all` field now that /api/tags is the authoritative installed list. Frontend: - useModelSelection: mounted ref + monotonic request token serialize rapid picks and drop resolutions that arrive after unmount or are superseded by a newer call. - handleModelSelect no longer swallows rejection; a failed set triggers a refreshModels() resync so the chip + list match reality. - ModelPicker chip carries data-model-picker-toggle so the unified click-outside handler ignores the ask-bar trigger too. Tests: - 100% line coverage preserved on both sides; adds coverage for keyboard nav, unmount/stale guards, TOCTOU skip, body-size caps, slug validation, and the no-swallow refresh path. Signed-off-by: Logan Nguyen --- src-tauri/src/lib.rs | 15 +- src-tauri/src/models.rs | 374 ++++++++++++++++-- src/App.tsx | 150 ++++++- src/__tests__/App.test.tsx | 329 ++++++++++++++- src/components/ModelPicker.tsx | 334 ++-------------- src/components/ModelPickerPanel.tsx | 196 +++++++++ src/components/WindowControls.tsx | 2 +- src/components/__tests__/ModelPicker.test.tsx | 329 ++------------- .../__tests__/ModelPickerPanel.test.tsx | 217 ++++++++++ .../__tests__/WindowControls.test.tsx | 86 ++++ .../__tests__/useModelSelection.test.tsx | 111 ++++++ src/hooks/useModelSelection.ts | 55 ++- src/view/AskBarView.tsx | 95 ++--- src/view/ConversationView.tsx | 12 + src/view/__tests__/AskBarView.test.tsx | 222 ++--------- 15 files changed, 1582 insertions(+), 945 deletions(-) create mode 100644 src/components/ModelPickerPanel.tsx create mode 100644 src/components/__tests__/ModelPickerPanel.test.tsx diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 0cb7d87c..48b70924 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -742,18 +742,15 @@ pub fn run() { // ── Active-model state: seed from SQLite app_config table ── // The installed list isn't queried here (no async runtime yet); - // get_model_picker_state reconciles against the live - // `/api/tags` list on first open and may replace this seed. + // get_model_picker_state reconciles against the live /api/tags + // inventory on first open and may replace this seed. + // resolve_seed_active_model trusts the persisted choice + // unconditionally, so a valid user selection survives restarts + // even before the first picker open reconciles. let persisted_active = database::get_config(&db_conn, models::ACTIVE_MODEL_KEY) .expect("failed to read active_model from app_config"); - // The live installed-model list isn't available at startup (no async - // reqwest here). Passing `&[]` forces `resolve_active_model` to fall - // through to the bootstrap default; the first call to - // `get_model_picker_state` from the frontend reconciles against the - // real list and may replace this seed. - let initial_active_model = models::resolve_active_model( + let initial_active_model = models::resolve_seed_active_model( persisted_active.as_deref(), - &[], &bootstrap_active, ); app.manage(models::ActiveModelState(std::sync::Mutex::new( diff --git a/src-tauri/src/models.rs b/src-tauri/src/models.rs index 472ad20f..0d31af8a 100644 --- a/src-tauri/src/models.rs +++ b/src-tauri/src/models.rs @@ -23,9 +23,25 @@ use crate::history::Database; /// `app_config` key used to persist the user's selected model slug. pub const ACTIVE_MODEL_KEY: &str = "active_model"; +/// Maximum accepted byte length for a model slug passed to `set_active_model`. +/// Real Ollama slugs are a handful of characters; 256 is generous while still +/// capping adversarial inputs long before any network or database work. +pub const MAX_MODEL_SLUG_LEN: usize = 256; + +/// Shared error-message prefix used when a requested slug is not present in +/// the live Ollama inventory. Exported so the frontend and tests can match +/// against a stable constant instead of a prose string. +pub const MODEL_NOT_INSTALLED_ERR_PREFIX: &str = "Model is not installed in Ollama: "; + +/// Maximum accepted body size for the `/api/tags` response. Guards against +/// a misbehaving or compromised localhost Ollama streaming an unbounded +/// response that would exhaust memory. 4 MiB comfortably fits thousands of +/// model entries. +const MAX_TAGS_BODY_BYTES: usize = 4 * 1024 * 1024; + /// In-memory cache of the currently active model slug. Written once at -/// startup (after `resolve_active_model`) and updated every time the user -/// picks a new model via `set_active_model`. +/// startup (after `resolve_seed_active_model`) and updated every time the +/// user picks a new model via `set_active_model`. #[derive(Default)] pub struct ActiveModelState(pub Mutex); @@ -51,6 +67,12 @@ struct TagsModel { /// 1. If `persisted` is `Some` and still appears in `installed`, use it. /// 2. Otherwise use the first entry in `installed`. /// 3. Otherwise fall back to `bootstrap` (the compiled-in / env default). +/// +/// This helper assumes `installed` reflects real Ollama ground truth. At +/// startup when no ground truth is available, use +/// [`resolve_seed_active_model`] instead so a valid persisted choice is +/// never overridden by the bootstrap default just because Ollama has not +/// been queried yet. pub fn resolve_active_model( persisted: Option<&str>, installed: &[String], @@ -67,17 +89,66 @@ pub fn resolve_active_model( bootstrap.to_string() } +/// Startup-time resolver that never cross-checks against an installed list. +/// +/// At process start we cannot call Ollama (no async runtime yet), so the +/// safe behavior is to trust the persisted value when present and only fall +/// back to the bootstrap default when nothing was ever persisted. The first +/// `get_model_picker_state` call from the frontend reconciles against the +/// real installed list and may replace this seed. +pub fn resolve_seed_active_model(persisted: Option<&str>, bootstrap: &str) -> String { + match persisted { + Some(slug) if !slug.is_empty() => slug.to_string(), + _ => bootstrap.to_string(), + } +} + +/// Returns true when the resolved slug should be written back to persistent +/// storage. Only writes when Ollama actually reported some inventory AND the +/// resolved slug differs from the currently-persisted value. This prevents a +/// partially-up Ollama returning `models:[]` from clobbering a valid +/// persisted user preference with the bootstrap fallback. +pub fn should_persist_resolved( + installed: &[String], + persisted: Option<&str>, + resolved: &str, +) -> bool { + !installed.is_empty() && persisted != Some(resolved) +} + /// Verifies that `model` is present in `installed`. Returns an `Err` with -/// the exact error copy the frontend surfaces when a user somehow requests -/// a slug that is not pulled locally. +/// a stable prefix (see [`MODEL_NOT_INSTALLED_ERR_PREFIX`]) so the frontend +/// can match against a constant rather than a verbatim prose string. pub fn validate_model_installed(model: &str, installed: &[String]) -> Result<(), String> { if installed.iter().any(|m| m == model) { Ok(()) } else { - Err(format!("Model is not installed in Ollama: {model}")) + Err(format!("{MODEL_NOT_INSTALLED_ERR_PREFIX}{model}")) } } +/// Validates shape of a model slug coming across the IPC boundary before any +/// network work. Rejects empty, over-length, and out-of-charset inputs. +/// Accepted charset covers everything real Ollama slugs use: +/// `A-Z a-z 0-9 : . _ / -`. +pub fn validate_model_slug(model: &str) -> Result<(), String> { + if model.is_empty() { + return Err("Model name cannot be empty".to_string()); + } + if model.len() > MAX_MODEL_SLUG_LEN { + return Err(format!( + "Model name exceeds maximum length of {MAX_MODEL_SLUG_LEN} bytes" + )); + } + if !model + .chars() + .all(|c| c.is_ascii_alphanumeric() || matches!(c, ':' | '.' | '_' | '/' | '-')) + { + return Err("Model name contains invalid characters".to_string()); + } + Ok(()) +} + /// Per-request timeout for the Ollama `/api/tags` GET. Guards the IPC /// boundary: if the daemon accepts the TCP connection but never responds /// (hung socket, stuck process, network partition), `get_model_picker_state` @@ -87,9 +158,9 @@ const TAGS_REQUEST_TIMEOUT: std::time::Duration = std::time::Duration::from_secs /// GETs `{base_url}/api/tags` and returns the list of installed model slugs. /// -/// Every failure mode (transport error, non-2xx status, JSON decode error) -/// is translated to `Err(String)` so the Tauri command layer can propagate -/// it verbatim to the frontend without panicking. +/// Every failure mode (transport error, non-2xx status, oversized body, +/// JSON decode error) is translated to `Err(String)` so the Tauri command +/// layer can propagate it verbatim to the frontend without panicking. pub async fn fetch_installed_model_names( client: &reqwest::Client, base_url: &str, @@ -104,6 +175,18 @@ async fn fetch_installed_model_names_with_timeout( client: &reqwest::Client, base_url: &str, timeout: std::time::Duration, +) -> Result, String> { + fetch_installed_model_names_inner(client, base_url, timeout, MAX_TAGS_BODY_BYTES).await +} + +/// Innermost implementation of the tags fetcher with both timeout and body +/// size cap configurable. Exists so the size-cap branches can be exercised +/// deterministically in tests without allocating production-scale buffers. +async fn fetch_installed_model_names_inner( + client: &reqwest::Client, + base_url: &str, + timeout: std::time::Duration, + max_body_bytes: usize, ) -> Result, String> { let url = format!("{}/api/tags", base_url.trim_end_matches('/')); let response = client @@ -120,9 +203,26 @@ async fn fetch_installed_model_names_with_timeout( )); } - let body: TagsResponse = response - .json() + if let Some(declared_len) = response.content_length() { + if declared_len as usize > max_body_bytes { + return Err(format!( + "/api/tags response exceeded {max_body_bytes} bytes" + )); + } + } + + let bytes = response + .bytes() .await + .map_err(|e| format!("failed to read /api/tags body: {e}"))?; + + if bytes.len() > max_body_bytes { + return Err(format!( + "/api/tags response exceeded {max_body_bytes} bytes" + )); + } + + let body: TagsResponse = serde_json::from_slice(&bytes) .map_err(|e| format!("failed to decode /api/tags response: {e}"))?; Ok(body.models.into_iter().map(|m| m.name).collect()) @@ -132,6 +232,11 @@ async fn fetch_installed_model_names_with_timeout( /// persisting the resolved active model so future launches see it. /// /// Shape: `{ "active": "", "all": ["", ...] }`. +/// +/// Coalesces the read + conditional write into a single database critical +/// section to avoid a TOCTOU window where a concurrent `set_active_model` +/// could be clobbered, and refuses to persist when Ollama reports an empty +/// inventory so a partially-up daemon cannot corrupt the persisted choice. #[cfg_attr(coverage_nightly, coverage(off))] #[cfg_attr(not(coverage), tauri::command)] pub async fn get_model_picker_state( @@ -142,18 +247,17 @@ pub async fn get_model_picker_state( ) -> Result { let installed = fetch_installed_model_names(&client, DEFAULT_OLLAMA_URL).await?; - let persisted = { + let resolved = { let conn = db.0.lock().map_err(|e| e.to_string())?; - get_config(&conn, ACTIVE_MODEL_KEY).map_err(|e| e.to_string())? + let persisted = get_config(&conn, ACTIVE_MODEL_KEY).map_err(|e| e.to_string())?; + let resolved = + resolve_active_model(persisted.as_deref(), &installed, app_config.model.active()); + if should_persist_resolved(&installed, persisted.as_deref(), &resolved) { + set_config(&conn, ACTIVE_MODEL_KEY, &resolved).map_err(|e| e.to_string())?; + } + resolved }; - let resolved = resolve_active_model(persisted.as_deref(), &installed, app_config.model.active()); - - { - let conn = db.0.lock().map_err(|e| e.to_string())?; - set_config(&conn, ACTIVE_MODEL_KEY, &resolved).map_err(|e| e.to_string())?; - } - { let mut guard = active_model.0.lock().map_err(|e| e.to_string())?; *guard = resolved.clone(); @@ -162,9 +266,9 @@ pub async fn get_model_picker_state( Ok(serde_json::json!({ "active": resolved, "all": installed })) } -/// Persists `model` as the active model after validating that Ollama still -/// reports it as installed. Rejects uninstalled slugs with the exact error -/// copy `"Model is not installed in Ollama: {model}"`. +/// Persists `model` as the active model after validating its shape and +/// confirming Ollama still reports it as installed. Rejects uninstalled +/// slugs with an error that starts with [`MODEL_NOT_INSTALLED_ERR_PREFIX`]. #[cfg_attr(coverage_nightly, coverage(off))] #[cfg_attr(not(coverage), tauri::command)] pub async fn set_active_model( @@ -173,6 +277,8 @@ pub async fn set_active_model( db: tauri::State<'_, Database>, active_model: tauri::State<'_, ActiveModelState>, ) -> Result<(), String> { + validate_model_slug(&model)?; + let installed = fetch_installed_model_names(&client, DEFAULT_OLLAMA_URL).await?; validate_model_installed(&model, &installed)?; @@ -228,12 +334,64 @@ mod tests { #[test] fn resolve_with_empty_persisted_bootstrap_used_when_installed_empty() { let installed: Vec = vec![]; - // Persisted is present but installed list is empty: bootstrap wins - // because there's nothing to cross-check against. let result = resolve_active_model(Some("gemma4:e2b"), &installed, "fallback"); assert_eq!(result, "fallback"); } + // ── resolve_seed_active_model ──────────────────────────────────────────── + + #[test] + fn seed_resolve_prefers_persisted() { + let result = resolve_seed_active_model(Some("llama3:8b"), "bootstrap-model"); + assert_eq!(result, "llama3:8b"); + } + + #[test] + fn seed_resolve_falls_back_to_bootstrap_when_none() { + let result = resolve_seed_active_model(None, "bootstrap-model"); + assert_eq!(result, "bootstrap-model"); + } + + #[test] + fn seed_resolve_falls_back_to_bootstrap_when_empty_persisted() { + let result = resolve_seed_active_model(Some(""), "bootstrap-model"); + assert_eq!(result, "bootstrap-model"); + } + + // ── should_persist_resolved ───────────────────────────────────────────── + + #[test] + fn should_persist_true_when_resolved_differs_and_inventory_present() { + let installed = vec!["gemma4:e2b".to_string()]; + assert!(should_persist_resolved( + &installed, + Some("llama3:8b"), + "gemma4:e2b" + )); + } + + #[test] + fn should_persist_false_when_resolved_matches_persisted() { + let installed = vec!["gemma4:e2b".to_string()]; + assert!(!should_persist_resolved( + &installed, + Some("gemma4:e2b"), + "gemma4:e2b" + )); + } + + #[test] + fn should_persist_false_when_inventory_empty() { + let installed: Vec = vec![]; + assert!(!should_persist_resolved(&installed, None, "bootstrap")); + } + + #[test] + fn should_persist_true_when_nothing_previously_persisted_but_resolved_available() { + let installed = vec!["gemma4:e2b".to_string()]; + assert!(should_persist_resolved(&installed, None, "gemma4:e2b")); + } + // ── validate_model_installed ───────────────────────────────────────────── #[test] @@ -243,17 +401,65 @@ mod tests { } #[test] - fn validate_rejects_uninstalled_model_with_exact_message() { + fn validate_rejects_uninstalled_model_with_stable_prefix() { let installed = vec!["gemma4:e2b".to_string()]; let err = validate_model_installed("llama3:8b", &installed).unwrap_err(); - assert_eq!(err, "Model is not installed in Ollama: llama3:8b"); + assert!( + err.starts_with(MODEL_NOT_INSTALLED_ERR_PREFIX), + "expected stable prefix, got: {err}" + ); + assert!(err.ends_with("llama3:8b")); } #[test] fn validate_rejects_when_installed_list_empty() { let installed: Vec = vec![]; let err = validate_model_installed("gemma4:e2b", &installed).unwrap_err(); - assert_eq!(err, "Model is not installed in Ollama: gemma4:e2b"); + assert_eq!(err, format!("{MODEL_NOT_INSTALLED_ERR_PREFIX}gemma4:e2b")); + } + + // ── validate_model_slug ────────────────────────────────────────────────── + + #[test] + fn validate_slug_accepts_valid_forms() { + assert!(validate_model_slug("gemma4:e2b").is_ok()); + assert!(validate_model_slug("llama3.1:8b").is_ok()); + assert!(validate_model_slug("registry.example.com/user/model:tag").is_ok()); + assert!(validate_model_slug("my_model-v2").is_ok()); + } + + #[test] + fn validate_slug_rejects_empty() { + let err = validate_model_slug("").unwrap_err(); + assert!(err.contains("empty")); + } + + #[test] + fn validate_slug_rejects_oversized() { + let oversized = "a".repeat(MAX_MODEL_SLUG_LEN + 1); + let err = validate_model_slug(&oversized).unwrap_err(); + assert!(err.contains("maximum length")); + } + + #[test] + fn validate_slug_accepts_max_length() { + let at_limit = "a".repeat(MAX_MODEL_SLUG_LEN); + assert!(validate_model_slug(&at_limit).is_ok()); + } + + #[test] + fn validate_slug_rejects_shell_metacharacters() { + assert!(validate_model_slug("bad; rm -rf /").is_err()); + assert!(validate_model_slug("../etc/passwd").is_ok()); // `.` `/` `-` allowed individually + assert!(validate_model_slug("bad name").is_err()); // whitespace rejected + assert!(validate_model_slug("bad\nname").is_err()); + assert!(validate_model_slug("bad$(whoami)").is_err()); + assert!(validate_model_slug("bad`whoami`").is_err()); + } + + #[test] + fn validate_slug_rejects_non_ascii() { + assert!(validate_model_slug("gëmma").is_err()); } // ── fetch_installed_model_names ────────────────────────────────────────── @@ -361,17 +567,9 @@ mod tests { #[tokio::test] async fn fetch_installed_model_names_times_out_when_ollama_hangs() { - // Bind a TCP listener that accepts connections but never writes a - // response. reqwest will complete the TCP handshake, send the GET, - // then block waiting for bytes that never arrive. The per-request - // timeout is the only thing that lets us recover. Use a 100ms - // override so the test stays fast and deterministic. let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); let addr = listener.local_addr().unwrap(); - // Accept in a background thread but never read/write, so the socket - // stays open and idle until the test drops it. std::thread::spawn(move || { - // Hold the accepted stream to keep the connection half-open. let _held = listener.accept().ok(); std::thread::sleep(std::time::Duration::from_secs(10)); }); @@ -404,7 +602,6 @@ mod tests { .await; let client = reqwest::Client::new(); - // Pass the URL with a trailing slash; the helper must strip it. let url_with_slash = format!("{}/", server.url()); let result = fetch_installed_model_names(&client, &url_with_slash).await; @@ -412,6 +609,103 @@ mod tests { assert_eq!(result.unwrap(), vec!["x".to_string()]); } + #[tokio::test] + async fn fetch_rejects_body_exceeding_size_cap_via_content_length() { + let mut server = mockito::Server::new_async().await; + // Tight cap (32 bytes) + a declared Content-Length that matches a + // 100-byte payload; the pre-read guard on `content_length` must + // reject before the bytes() call is issued. + let body = "x".repeat(100); + server + .mock("GET", "/api/tags") + .with_status(200) + .with_header("content-type", "application/json") + .with_body(body) + .create_async() + .await; + + let client = reqwest::Client::new(); + let result = fetch_installed_model_names_inner( + &client, + &server.url(), + std::time::Duration::from_secs(5), + 32, + ) + .await; + + let err = result.unwrap_err(); + assert!( + err.contains("exceeded"), + "expected size-cap error, got: {err}" + ); + } + + #[tokio::test] + async fn fetch_maps_body_read_error_to_err_string() { + // Headers advertise Content-Length but the server closes the socket + // before sending any body bytes. reqwest's bytes() surfaces this as + // a transport error; the helper must map it to the documented prose. + let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = listener.local_addr().unwrap(); + std::thread::spawn(move || { + let (mut stream, _) = listener.accept().unwrap(); + use std::io::{Read, Write}; + let mut buf = [0u8; 1024]; + let _ = stream.read(&mut buf); + // Promise 100 body bytes, then immediately hang up. + let _ = stream.write_all( + b"HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nContent-Length: 100\r\nConnection: close\r\n\r\n", + ); + }); + + let client = reqwest::Client::new(); + let base = format!("http://{addr}"); + let result = fetch_installed_model_names(&client, &base).await; + + let err = result.unwrap_err(); + assert!( + err.contains("failed to read /api/tags body"), + "expected body-read error, got: {err}" + ); + } + + #[tokio::test] + async fn fetch_rejects_body_exceeding_size_cap_when_no_content_length() { + // Chunked-encoding response (no Content-Length); the post-read guard + // on `bytes.len()` must still reject. + let listener = std::net::TcpListener::bind("127.0.0.1:0").unwrap(); + let addr = listener.local_addr().unwrap(); + std::thread::spawn(move || { + let (mut stream, _) = listener.accept().unwrap(); + use std::io::{Read, Write}; + let mut buf = [0u8; 1024]; + let _ = stream.read(&mut buf); + let body = "x".repeat(200); + let response = format!( + "HTTP/1.1 200 OK\r\nContent-Type: application/json\r\nTransfer-Encoding: chunked\r\n\r\n{:x}\r\n{}\r\n0\r\n\r\n", + body.len(), + body + ); + let _ = stream.write_all(response.as_bytes()); + }); + + let client = reqwest::Client::new(); + let base = format!("http://{addr}"); + let result = fetch_installed_model_names_inner( + &client, + &base, + std::time::Duration::from_secs(5), + 32, + ) + .await; + + let err = result.unwrap_err(); + assert!( + err.contains("exceeded"), + "expected post-read size-cap error, got: {err}" + ); + } + // ── ActiveModelState ───────────────────────────────────────────────────── #[test] @@ -444,4 +738,12 @@ mod tests { fn active_model_key_constant_matches_expected_value() { assert_eq!(ACTIVE_MODEL_KEY, "active_model"); } + + #[test] + fn model_not_installed_err_prefix_is_stable() { + assert_eq!( + MODEL_NOT_INSTALLED_ERR_PREFIX, + "Model is not installed in Ollama: " + ); + } } diff --git a/src/App.tsx b/src/App.tsx index 9c14e87a..bec9ead6 100644 --- a/src/App.tsx +++ b/src/App.tsx @@ -20,6 +20,7 @@ import { AskBarView, MAX_IMAGES } from './view/AskBarView'; import { OnboardingView } from './view/onboarding/index'; import type { OnboardingStage } from './view/onboarding/index'; import { HistoryPanel } from './components/HistoryPanel'; +import { ModelPickerPanel } from './components/ModelPickerPanel'; import { ImagePreviewModal } from './components/ImagePreviewModal'; import type { AttachedImage } from './types/image'; import { MAX_IMAGE_SIZE_BYTES } from './types/image'; @@ -114,6 +115,9 @@ function App() { * but rendered differently based on `isChatMode`). */ const [isHistoryOpen, setIsHistoryOpen] = useState(false); + + /** Whether the model picker panel is currently open. Mutually exclusive with `isHistoryOpen`. */ + const [isModelPickerOpen, setIsModelPickerOpen] = useState(false); /** * True when the user clicked + while an unsaved conversation is active. * Causes the history dropdown to show a SwitchConfirmation prompt instead @@ -372,6 +376,16 @@ function App() { } }, [isGenerating]); + /* eslint-disable @eslint-react/set-state-in-effect -- intentional: close + the picker when the user triggers generation so it can't stay open over + a streaming response. No secondary effects are triggered by this reset. */ + useEffect(() => { + if (isGenerating || isSubmitPending) { + setIsModelPickerOpen(false); + } + }, [isGenerating, isSubmitPending]); + /* eslint-enable @eslint-react/set-state-in-effect */ + /** * Replays the entrance sequence by transitioning the overlay to the visible state. * Clears conversation state for a fresh session each time the overlay appears. @@ -400,6 +414,7 @@ function App() { setQuery(''); setSelectedContext(context); setIsHistoryOpen(false); + setIsModelPickerOpen(false); setAttachedImages((prev) => { for (const img of prev) URL.revokeObjectURL(img.blobUrl); return []; @@ -448,9 +463,39 @@ function App() { /** Ref attached to the chat-mode history dropdown for click-outside detection. */ const historyDropdownRef = useRef(null); - /** Toggles the history panel open/closed. */ + /** Ref attached to the chat-mode model picker dropdown for click-outside detection. */ + const modelPickerDropdownRef = useRef(null); + /** Ref attached to the ask-bar mode model picker drawer for click-outside detection. */ + const modelPickerAskBarRef = useRef(null); + + /** + * Close the model picker when the user clicks outside it, in either mode. + * Clicks on any pill trigger (data-model-picker-toggle) are excluded so the + * trigger's own onClick can manage the toggle without a double-close race. + */ + useEffect(() => { + if (!isModelPickerOpen) return; + + const handleMouseDown = (e: MouseEvent) => { + const target = e.target as Element; + if ( + modelPickerDropdownRef.current?.contains(target) || + modelPickerAskBarRef.current?.contains(target) || + target.closest?.('[data-model-picker-toggle]') + ) { + return; + } + setIsModelPickerOpen(false); + }; + + document.addEventListener('mousedown', handleMouseDown); + return () => document.removeEventListener('mousedown', handleMouseDown); + }, [isModelPickerOpen]); + + /** Toggles the history panel open/closed. Closes model picker (mutually exclusive). */ const handleHistoryToggle = useCallback(() => { setIsHistoryOpen((prev) => !prev); + setIsModelPickerOpen(false); }, []); /** @@ -1289,23 +1334,32 @@ function App() { }, [isSubmitPending, cancel, setSearchActive, setSelectedContext]); /** - * Persists the user's model choice via the backend. Silently no-ops on - * rejection: the only reject path is a race where the chosen model was - * uninstalled between the picker render and the click. The next - * `refreshModels` (fired on overlay show) will reconcile the list. + * Persists the user's model choice via the backend and closes the picker panel. + * On rejection (e.g. the chosen model was uninstalled between render and click), + * triggers a refresh so the picker list and the active chip resync with the + * actual backend state instead of silently drifting. */ const handleModelSelect = useCallback( (model: string) => { - void setActiveModel(model).catch( - /* v8 ignore next 3 -- rejection requires a mid-render uninstall race that cannot be triggered in jsdom */ - () => { - // Intentional swallow: see docblock above. - }, - ); + setIsModelPickerOpen(false); + void setActiveModel(model).catch(() => { + void refreshModels(); + }); }, - [setActiveModel], + [setActiveModel, refreshModels], ); + /** Closes the model picker panel. Wired to Escape key inside the panel. */ + const handleModelPickerClose = useCallback(() => { + setIsModelPickerOpen(false); + }, []); + + /** Toggles the model picker panel. Closes history panel (mutually exclusive). */ + const handleModelPickerToggle = useCallback(() => { + setIsModelPickerOpen((prev) => !prev); + setIsHistoryOpen(false); + }, []); + /** * Synchronizes the React animation state with Tauri-driven overlay visibility * requests emitted from the Rust backend. @@ -1499,7 +1553,7 @@ function App() { : 'rounded-2xl shadow-bar' }`} > - {/* Chat Messages Area - morphs in when in chat mode */} + {/* Chat Messages Area - morphs in when in chat mode. */} {isChatMode ? ( ) : null} + {/* Ask-bar mode model picker drawer - above the input bar. + In chat mode the trigger and drawer move to the header area above. */} + {!isChatMode && ( + + {isModelPickerOpen && + activeModel && + availableModels && + availableModels.length > 0 ? ( + + + + ) : null} + + )} + {/* Ask-bar mode history panel - inline below the input bar. The !isChatMode gate lives OUTSIDE AnimatePresence so that when a conversation is loaded (isChatMode → true) the panel unmounts @@ -1592,10 +1684,40 @@ function App() { isDragOver={isDragOver ?? undefined} activeModel={activeModel} availableModels={availableModels} - onModelSelect={handleModelSelect} + onModelPickerToggle={handleModelPickerToggle} + isModelPickerOpen={isModelPickerOpen} />
+ {/* Chat-mode model picker dropdown - floating card identical in style + to the chat-history dropdown. Anchored absolute right-3 top-10 + so it appears just below the header pill trigger without pushing + the conversation content. Click-outside closes it. */} + + {isChatMode && + isModelPickerOpen && + activeModel && + availableModels && + availableModels.length > 0 ? ( + + + + ) : null} + + {/* Chat-mode history dropdown - sibling of the morphing container so it is never clipped by its overflow-hidden. Positioned absolutely within this relative wrapper (same coordinate space as the diff --git a/src/__tests__/App.test.tsx b/src/__tests__/App.test.tsx index a39eb00c..c6788941 100644 --- a/src/__tests__/App.test.tsx +++ b/src/__tests__/App.test.tsx @@ -83,7 +83,7 @@ describe('App', () => { fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); await act(async () => { - fireEvent.click(screen.getByRole('menuitem', { name: 'qwen2.5:7b' })); + fireEvent.click(screen.getByRole('option', { name: 'qwen2.5:7b' })); }); const textarea = screen.getByPlaceholderText('Ask Thuki anything...'); @@ -104,6 +104,333 @@ describe('App', () => { ); }); + it('opens model picker panel when trigger is clicked', async () => { + enableChannelCaptureWithResponses({ + get_model_picker_state: { + active: 'gemma4:e2b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }, + }); + render(); + await act(async () => {}); + await showOverlay(); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + await act(async () => {}); + + expect( + screen.getByRole('option', { name: 'gemma4:e2b' }), + ).toBeInTheDocument(); + expect( + screen.getByRole('option', { name: 'qwen2.5:7b' }), + ).toBeInTheDocument(); + }); + + it('closes model picker and opens history when history toggle clicked', async () => { + enableChannelCaptureWithResponses({ + get_model_picker_state: { + active: 'gemma4:e2b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }, + list_conversations: [], + }); + render(); + await act(async () => {}); + await showOverlay(); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + await act(async () => {}); + expect( + screen.getByRole('option', { name: 'gemma4:e2b' }), + ).toBeInTheDocument(); + + fireEvent.click(screen.getByRole('button', { name: 'Open history' })); + await act(async () => {}); + expect(screen.queryByRole('option', { name: 'gemma4:e2b' })).toBeNull(); + expect( + screen.getByPlaceholderText(/search past chats/i), + ).toBeInTheDocument(); + }); + + it('closes history and opens model picker when model picker trigger clicked', async () => { + enableChannelCaptureWithResponses({ + get_model_picker_state: { + active: 'gemma4:e2b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }, + list_conversations: [], + }); + render(); + await act(async () => {}); + await showOverlay(); + + fireEvent.click(screen.getByRole('button', { name: 'Open history' })); + await act(async () => {}); + expect( + screen.getByPlaceholderText(/search past chats/i), + ).toBeInTheDocument(); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + await act(async () => {}); + expect(screen.queryByPlaceholderText(/search past chats/i)).toBeNull(); + expect( + screen.getByRole('option', { name: 'gemma4:e2b' }), + ).toBeInTheDocument(); + }); + + it('closes model picker when a model is selected', async () => { + enableChannelCaptureWithResponses({ + get_model_picker_state: { + active: 'gemma4:e2b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }, + set_active_model: undefined, + }); + render(); + await act(async () => {}); + await showOverlay(); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + await act(async () => {}); + expect( + screen.getByRole('option', { name: 'qwen2.5:7b' }), + ).toBeInTheDocument(); + + fireEvent.click(screen.getByRole('option', { name: 'qwen2.5:7b' })); + await act(async () => {}); + expect(screen.queryByRole('option', { name: 'qwen2.5:7b' })).toBeNull(); + }); + + it('closes model picker when generation starts', async () => { + enableChannelCaptureWithResponses({ + get_model_picker_state: { + active: 'gemma4:e2b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }, + }); + render(); + await act(async () => {}); + await showOverlay(); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + await act(async () => {}); + expect( + screen.getByRole('option', { name: 'gemma4:e2b' }), + ).toBeInTheDocument(); + + const textarea = screen.getByPlaceholderText('Ask Thuki anything...'); + fireEvent.change(textarea, { target: { value: 'hi' } }); + fireEvent.keyDown(textarea, { key: 'Enter', shiftKey: false }); + await act(async () => {}); + + expect(screen.queryByRole('option', { name: 'gemma4:e2b' })).toBeNull(); + }); + + it('shows active model pill in chat mode header and opens picker from there', async () => { + enableChannelCaptureWithResponses({ + get_model_picker_state: { + active: 'gemma4:e2b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }, + }); + render(); + await act(async () => {}); + await showOverlay(); + + // Transition to chat mode by submitting a message + const textarea = screen.getByPlaceholderText('Ask Thuki anything...'); + fireEvent.change(textarea, { target: { value: 'hi' } }); + fireEvent.keyDown(textarea, { key: 'Enter', shiftKey: false }); + await act(async () => {}); + + act(() => { + getLastChannel()?.simulateMessage({ type: 'Token', data: 'Hello!' }); + getLastChannel()?.simulateMessage({ type: 'Done' }); + }); + await act(async () => {}); + + // Pill button should now be in the header (WindowControls), showing the model name + const pill = screen.getByRole('button', { name: 'Choose model' }); + expect(pill).toBeInTheDocument(); + expect(pill.textContent).toContain('gemma4:e2b'); + + // Click pill → model picker panel opens ABOVE the conversation + fireEvent.click(pill); + await act(async () => {}); + expect( + screen.getByRole('option', { name: 'gemma4:e2b' }), + ).toBeInTheDocument(); + expect( + screen.getByRole('option', { name: 'qwen2.5:7b' }), + ).toBeInTheDocument(); + }); + + it('closes chat-mode model picker when clicking outside the dropdown', async () => { + enableChannelCaptureWithResponses({ + get_model_picker_state: { + active: 'gemma4:e2b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }, + }); + render(); + await act(async () => {}); + await showOverlay(); + + const textarea = screen.getByPlaceholderText('Ask Thuki anything...'); + fireEvent.change(textarea, { target: { value: 'hi' } }); + fireEvent.keyDown(textarea, { key: 'Enter', shiftKey: false }); + await act(async () => {}); + act(() => { + getLastChannel()?.simulateMessage({ type: 'Token', data: 'Hello!' }); + getLastChannel()?.simulateMessage({ type: 'Done' }); + }); + await act(async () => {}); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + await act(async () => {}); + expect( + screen.getByRole('option', { name: 'gemma4:e2b' }), + ).toBeInTheDocument(); + + fireEvent.mouseDown(document.body); + await act(async () => {}); + expect(screen.queryByRole('option', { name: 'gemma4:e2b' })).toBeNull(); + }); + + it('chat-mode click-outside does NOT close when clicking inside the dropdown or on the pill', async () => { + enableChannelCaptureWithResponses({ + get_model_picker_state: { + active: 'gemma4:e2b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }, + }); + render(); + await act(async () => {}); + await showOverlay(); + + const textarea = screen.getByPlaceholderText('Ask Thuki anything...'); + fireEvent.change(textarea, { target: { value: 'hi' } }); + fireEvent.keyDown(textarea, { key: 'Enter', shiftKey: false }); + await act(async () => {}); + act(() => { + getLastChannel()?.simulateMessage({ type: 'Token', data: 'Hello!' }); + getLastChannel()?.simulateMessage({ type: 'Done' }); + }); + await act(async () => {}); + + const pill = screen.getByRole('button', { name: 'Choose model' }); + fireEvent.click(pill); + await act(async () => {}); + const option = screen.getByRole('option', { name: 'gemma4:e2b' }); + expect(option).toBeInTheDocument(); + + // mousedown inside the dropdown must not close the picker + fireEvent.mouseDown(option); + await act(async () => {}); + expect( + screen.getByRole('option', { name: 'gemma4:e2b' }), + ).toBeInTheDocument(); + + // mousedown on the pill trigger must not close the picker either + fireEvent.mouseDown(pill); + await act(async () => {}); + expect( + screen.getByRole('option', { name: 'gemma4:e2b' }), + ).toBeInTheDocument(); + }); + + it('ask-bar mode click-outside closes the model picker drawer', async () => { + enableChannelCaptureWithResponses({ + get_model_picker_state: { + active: 'gemma4:e2b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }, + }); + render(); + await act(async () => {}); + await showOverlay(); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + await act(async () => {}); + expect( + screen.getByRole('option', { name: 'gemma4:e2b' }), + ).toBeInTheDocument(); + + // Clicking inside the drawer must NOT close it + fireEvent.mouseDown(screen.getByRole('option', { name: 'gemma4:e2b' })); + await act(async () => {}); + expect( + screen.getByRole('option', { name: 'gemma4:e2b' }), + ).toBeInTheDocument(); + + // Clicking outside closes the drawer + fireEvent.mouseDown(document.body); + await act(async () => {}); + expect(screen.queryByRole('option', { name: 'gemma4:e2b' })).toBeNull(); + }); + + it('refreshes model list when set_active_model rejects', async () => { + let rejectionSeen = false; + let refreshesAfterRejection = 0; + invoke.mockImplementation(async (cmd: string) => { + if (cmd === 'get_model_picker_state') { + if (rejectionSeen) { + refreshesAfterRejection += 1; + return { active: 'gemma4:e2b', all: ['gemma4:e2b'] }; + } + return { active: 'gemma4:e2b', all: ['gemma4:e2b', 'qwen2.5:7b'] }; + } + if (cmd === 'set_active_model') { + rejectionSeen = true; + throw new Error('Model is not installed in Ollama: qwen2.5:7b'); + } + return undefined; + }); + render(); + await act(async () => {}); + await showOverlay(); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + await act(async () => {}); + fireEvent.click(screen.getByRole('option', { name: 'qwen2.5:7b' })); + await act(async () => {}); + + // The rejection handler must have triggered at least one refresh fetch. + expect(refreshesAfterRejection).toBeGreaterThanOrEqual(1); + + // Reopen to confirm the list is the post-refresh one (qwen was removed). + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + await act(async () => {}); + expect( + screen.getByRole('option', { name: 'gemma4:e2b' }), + ).toBeInTheDocument(); + expect(screen.queryByRole('option', { name: 'qwen2.5:7b' })).toBeNull(); + }); + + it('closes the model picker drawer when Escape is pressed in the filter input', async () => { + enableChannelCaptureWithResponses({ + get_model_picker_state: { + active: 'gemma4:e2b', + all: ['gemma4:e2b', 'qwen2.5:7b'], + }, + }); + render(); + await act(async () => {}); + await showOverlay(); + + fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); + await act(async () => {}); + expect( + screen.getByRole('option', { name: 'gemma4:e2b' }), + ).toBeInTheDocument(); + + fireEvent.keyDown(screen.getByPlaceholderText(/filter models/i), { + key: 'Escape', + }); + await act(async () => {}); + expect(screen.queryByRole('option', { name: 'gemma4:e2b' })).toBeNull(); + }); + it('grows upward when near bottom screen edge', async () => { const { container } = render(); await act(async () => {}); diff --git a/src/components/ModelPicker.tsx b/src/components/ModelPicker.tsx index 548cd286..08f0b9d6 100644 --- a/src/components/ModelPicker.tsx +++ b/src/components/ModelPicker.tsx @@ -1,19 +1,3 @@ -import { AnimatePresence, motion } from 'framer-motion'; -import { - useCallback, - useEffect, - useLayoutEffect, - useRef, - useState, -} from 'react'; -import { createPortal } from 'react-dom'; - -/** - * Hoisted static SVG - chip-style trigger icon for the model picker. - * Redrawn to occupy ~88% of the 16x16 canvas so visual weight matches - * the adjacent camera and send icons in the ask bar. - * @see Vercel React Best Practices - Hoist Static JSX Elements - */ const CHIP_ICON = ( ); -/** Hoisted static checkmark path used on the active row. */ -const CHECK_ICON_PATH = ( - -); - -/** Fixed target width for the portal menu in pixels. */ -const MENU_WIDTH = 220; -/** Viewport-edge padding used when clamping the left position. */ -const EDGE_PADDING = 8; -/** Vertical gap between the trigger and the menu. */ -const MENU_GAP = 8; - -/** Screen position for the portal menu, computed from the trigger rect. */ -interface MenuPosition { - top: number; - left: number; -} - -/** - * State snapshot emitted by {@link ModelPicker} via `onMenuChange`. - * - * `openDirection` reflects the flip decision taken in `updatePosition`: - * "above" when the menu fits above the trigger, "below" when it flipped - * downward because there was no room above. `height` is the measured - * `offsetHeight` of the menu (0 before the first rAF measurement and after - * close). Consumers use these fields to reserve native window space below - * the trigger when the menu opens downward. - */ -export interface ModelPickerMenuState { - /** True while the portal menu is mounted and visible. */ - isOpen: boolean; - /** Measured menu height in pixels, or 0 before first measurement / after close. */ - height: number; - /** Resolved placement: above the trigger, or flipped below it. */ - openDirection: 'above' | 'below'; -} - -/** Props for the {@link ModelPicker} component. */ +/** Props for the {@link ModelPicker} trigger button. */ export interface ModelPickerProps { - /** Currently active model slug; the matching row renders an orange tick. */ - activeModel: string; - /** Full list of available model slugs from Ollama's tags endpoint. */ - models: string[]; - /** When true the trigger is inert (e.g. during generation) and any open menu closes. */ + /** Called when the user clicks the trigger to toggle the picker panel. */ + onClick: () => void; + /** When true, the button is inert (e.g. during generation). */ disabled: boolean; - /** Called with the chosen slug when the user picks a row. */ - onSelect: (model: string) => void; - /** - * Emitted when the portal menu opens, resizes, or closes. Consumers use - * this to reserve native window space below the trigger when the menu - * flips downward (ask-bar-only mode), since the NSPanel frame clips - * content that extends past the ask-bar row. - */ - onMenuChange?: (state: ModelPickerMenuState) => void; + /** Reflects whether the picker panel is currently open (drives aria-expanded). */ + isOpen: boolean; } /** - * Single self-contained model picker rendered as a portal menu. - * - * The menu escapes the ask bar's morphing container (which sets - * `overflow-hidden`) by rendering into `document.body` via - * {@link createPortal}. That keeps the Thuki window size stable while the - * menu floats above it like a native macOS NSMenu. + * Chip-style trigger button that opens/closes the model picker panel. * - * Positioning algorithm: - * 1. Read the trigger's `getBoundingClientRect` on open. - * 2. Right-align the menu to the trigger, clamped to 8px from the left edge. - * 3. Prefer opening above the trigger. If that would clip above the - * viewport, open below instead. Uses a two-phase rAF measurement: - * render once to measure the menu height, then adjust `top`. - * 4. Re-run on every scroll / resize / window blur while the menu is open. - * - * All listeners (scroll, resize, mousedown, keydown) are attached in a single - * effect gated on {@link showMenu} and removed on close or unmount. + * The panel itself is rendered by App.tsx as an inline drawer (same + * grow/shrink animation as the history panel) so the ResizeObserver drives + * natural window growth without any portal or frame-manipulation logic. */ -export function ModelPicker({ - activeModel, - models, - disabled, - onSelect, - onMenuChange, -}: ModelPickerProps) { - const [isOpen, setIsOpen] = useState(false); - const [position, setPosition] = useState(null); - - const triggerRef = useRef(null); - const menuRef = useRef(null); - - /** - * Combined open gate: hides the menu if the picker becomes disabled or - * empty while the user intent (`isOpen`) is still true. The underlying - * `isOpen` state is still reset to false by the disabled-sync effect - * below so re-enabling does not reopen a stale menu. - */ - const showMenu = isOpen && !disabled && models.length > 0; - - /** Recomputes the menu position from the current trigger rect. */ - const updatePosition = useCallback(() => { - const trigger = triggerRef.current; - /* v8 ignore start -- trigger ref is always set while the menu can be open; - guard is defensive for concurrent unmount. */ - if (!trigger) return; - /* v8 ignore stop */ - const rect = trigger.getBoundingClientRect(); - const left = Math.max(EDGE_PADDING, rect.right - MENU_WIDTH); - - const menuEl = menuRef.current; - const menuHeight = menuEl?.offsetHeight ?? 0; - let top = rect.top - menuHeight - MENU_GAP; - if (top < 0) { - top = rect.bottom + MENU_GAP; - } - // `top < rect.top` means we kept the above placement; otherwise we flipped - // below the trigger. Emitted so the host can reserve window space below - // the trigger when the menu flipped downward. - const openDirection: 'above' | 'below' = top < rect.top ? 'above' : 'below'; - setPosition({ top, left }); - onMenuChange?.({ isOpen: true, height: menuHeight, openDirection }); - }, [onMenuChange]); - - /** - * First-frame position: read the rect synchronously so the menu mounts - * at an approximate spot, then the effect below re-measures height and - * flips above/below on the following frame. - */ - /* eslint-disable @eslint-react/set-state-in-effect -- intentional: seeding - the initial menu position from the trigger rect is exactly what a layout - effect is for. The rAF inside the next effect corrects the top coordinate - once the menu has laid out and a real height is available. */ - useLayoutEffect(() => { - if (!showMenu) { - setPosition(null); - return; - } - const trigger = triggerRef.current; - /* v8 ignore start -- showMenu implies trigger is mounted; defensive guard. */ - if (!trigger) return; - /* v8 ignore stop */ - const rect = trigger.getBoundingClientRect(); - const left = Math.max(EDGE_PADDING, rect.right - MENU_WIDTH); - // Start above the trigger by an estimated offset so the first paint - // is close to final. The rAF below corrects based on real height. - setPosition({ top: rect.top - MENU_GAP, left }); - }, [showMenu]); - /* eslint-enable @eslint-react/set-state-in-effect */ - - /** - * Attaches all live listeners for the open menu and re-measures once the - * menu has laid out so the above/below flip uses the real height. - */ - useEffect(() => { - if (!showMenu) return; - - // Re-measure after the portal has rendered once. - const rafId = requestAnimationFrame(updatePosition); - - const handleScroll = () => { - requestAnimationFrame(updatePosition); - }; - const handleResize = () => { - requestAnimationFrame(updatePosition); - }; - const handleMouseDown = (event: MouseEvent) => { - const target = event.target as Node; - if (triggerRef.current?.contains(target)) return; - if (menuRef.current?.contains(target)) return; - setIsOpen(false); - }; - const handleKeyDown = (event: KeyboardEvent) => { - if (event.key === 'Escape') { - setIsOpen(false); - } - }; - - window.addEventListener('scroll', handleScroll, { passive: true }); - window.addEventListener('resize', handleResize, { passive: true }); - document.addEventListener('mousedown', handleMouseDown); - document.addEventListener('keydown', handleKeyDown); - - return () => { - cancelAnimationFrame(rafId); - window.removeEventListener('scroll', handleScroll); - window.removeEventListener('resize', handleResize); - document.removeEventListener('mousedown', handleMouseDown); - document.removeEventListener('keydown', handleKeyDown); - }; - }, [showMenu, updatePosition]); - - /** - * Notify the host when the menu transitions to closed, including on - * unmount while the menu is still open, so any reserved native window - * space can collapse. Open-state emissions happen from inside - * {@link updatePosition} where the real measured height and flip - * direction are known. - */ - useEffect(() => { - if (!showMenu) { - onMenuChange?.({ isOpen: false, height: 0, openDirection: 'below' }); - return; - } - return () => { - onMenuChange?.({ isOpen: false, height: 0, openDirection: 'below' }); - }; - }, [showMenu, onMenuChange]); - - /** - * When the picker becomes disabled (e.g. generation starts), collapse - * the open intent so re-enabling does not reopen a stale menu. - */ - /* eslint-disable @eslint-react/set-state-in-effect -- intentional: mirror the - disabled prop into the local open state so a mid-open disable cleanly - closes. No secondary effects are triggered by this reset. */ - useEffect(() => { - if (disabled && isOpen) setIsOpen(false); - }, [disabled, isOpen]); - /* eslint-enable @eslint-react/set-state-in-effect */ - - const handleToggle = useCallback(() => { - setIsOpen((prev) => !prev); - }, []); - - const handleRowClick = useCallback( - (model: string) => { - onSelect(model); - setIsOpen(false); - }, - [onSelect], - ); - - if (models.length === 0) return null; - - /* v8 ignore next 2 -- SSR guard; Tauri + happy-dom always provide document. */ - const portalTarget = typeof document !== 'undefined' ? document.body : null; - +export function ModelPicker({ onClick, disabled, isOpen }: ModelPickerProps) { return ( - <> - - {portalTarget && - createPortal( - - {showMenu && position && ( - - {models.map((model) => { - const active = model === activeModel; - return ( - - {model} - - - - ); - })} - - )} - , - portalTarget, - )} - + ); } diff --git a/src/components/ModelPickerPanel.tsx b/src/components/ModelPickerPanel.tsx new file mode 100644 index 00000000..c045bebe --- /dev/null +++ b/src/components/ModelPickerPanel.tsx @@ -0,0 +1,196 @@ +import { useEffect, useMemo, useRef, useState } from 'react'; + +const CHECK_ICON_PATH = ( + +); + +const LISTBOX_ID = 'thuki-model-picker-listbox'; + +/** Props for the {@link ModelPickerPanel} content panel. */ +export interface ModelPickerPanelProps { + /** Full list of available model slugs. */ + models: string[]; + /** Currently active model slug. */ + activeModel: string; + /** Called with the chosen slug when the user clicks or keyboard-selects a row. */ + onSelect: (model: string) => void; + /** + * Called when the user presses Escape inside the panel. The host is + * responsible for closing the drawer/dropdown in response. + */ + onClose?: () => void; +} + +/** + * Inline model picker panel rendered as a drawer above the ask bar or as a + * floating dropdown in chat mode. + * + * Combobox-style keyboard model: focus stays in the filter input, ArrowDown/ + * ArrowUp move the `aria-activedescendant` marker through the visible rows, + * Enter commits the highlighted row, and Escape asks the host to close. + */ +export function ModelPickerPanel({ + models, + activeModel, + onSelect, + onClose, +}: ModelPickerPanelProps) { + const [filter, setFilter] = useState(''); + const [highlightedIndex, setHighlightedIndex] = useState(0); + const listboxRef = useRef(null); + + const filtered = useMemo(() => { + const trimmed = filter.trim(); + if (trimmed === '') return models; + const needle = trimmed.toLowerCase(); + return models.filter((m) => m.toLowerCase().includes(needle)); + }, [filter, models]); + + /* eslint-disable @eslint-react/set-state-in-effect -- canonical index-clamp + when the filtered list shrinks; drives no secondary effects and React + bails out of the rerender when the next state equals the previous. */ + useEffect(() => { + if (filtered.length === 0) { + setHighlightedIndex(0); + return; + } + if (highlightedIndex >= filtered.length) { + setHighlightedIndex(filtered.length - 1); + } + }, [filtered, highlightedIndex]); + /* eslint-enable @eslint-react/set-state-in-effect */ + + const activeId = + filtered.length > 0 && highlightedIndex < filtered.length + ? `${LISTBOX_ID}-option-${highlightedIndex}` + : undefined; + + // Keep the highlighted row visible when it scrolls off-view. scrollIntoView + // is absent in happy-dom/jsdom; the optional call becomes a no-op there. + useEffect(() => { + if (!activeId) return; + const el = listboxRef.current?.querySelector(`#${activeId}`); + /* v8 ignore next -- scrollIntoView is a host API not available in jsdom */ + el?.scrollIntoView?.({ block: 'nearest' }); + }, [activeId]); + + const commit = (index: number) => { + if (index < 0 || index >= filtered.length) return; + onSelect(filtered[index]); + }; + + return ( +
+
+ setFilter(e.target.value)} + onKeyDown={(e) => { + if (e.key === 'ArrowDown') { + e.preventDefault(); + if (filtered.length === 0) return; + setHighlightedIndex((i) => (i + 1) % filtered.length); + return; + } + if (e.key === 'ArrowUp') { + e.preventDefault(); + if (filtered.length === 0) return; + setHighlightedIndex( + (i) => (i - 1 + filtered.length) % filtered.length, + ); + return; + } + if (e.key === 'Home') { + e.preventDefault(); + if (filtered.length > 0) setHighlightedIndex(0); + return; + } + if (e.key === 'End') { + e.preventDefault(); + if (filtered.length > 0) setHighlightedIndex(filtered.length - 1); + return; + } + if (e.key === 'Enter') { + e.preventDefault(); + commit(highlightedIndex); + return; + } + if (e.key === 'Escape') { + e.preventDefault(); + onClose?.(); + return; + } + }} + placeholder="Filter models..." + autoFocus + className="w-full bg-transparent text-xs text-text-primary placeholder:text-text-secondary outline-none" + /> +
+ +
+ {models.length === 0 ? ( +

+ No models available. +

+ ) : filtered.length === 0 ? ( +

+ No models found. +

+ ) : ( + filtered.map((model, index) => { + const active = model === activeModel; + const highlighted = index === highlightedIndex; + return ( + + ); + }) + )} +
+
+ ); +} diff --git a/src/components/WindowControls.tsx b/src/components/WindowControls.tsx index 03ff502f..db706819 100644 --- a/src/components/WindowControls.tsx +++ b/src/components/WindowControls.tsx @@ -208,7 +208,7 @@ export const WindowControls = memo(function WindowControls({ {/* Right-side header controls */}
- {/* Active model pill trigger — leftmost, before save */} + {/* Active model pill trigger: leftmost, before save */} {activeModel !== undefined && onModelPickerToggle !== undefined && ( @@ -658,7 +642,7 @@ export function AskBarView({ @@ -673,7 +657,7 @@ export function AskBarView({ autoFocus rows={1} placeholder={isChatMode ? 'Reply...' : 'Ask Thuki anything...'} - className="relative w-full bg-transparent border-none outline-none text-transparent text-sm placeholder:text-text-secondary py-2 px-1 disabled:opacity-50 resize-none leading-relaxed" + className="relative w-full bg-transparent border-none outline-none text-transparent text-sm placeholder:text-text-secondary mt-1 py-2 disabled:opacity-50 resize-none leading-5" style={{ caretColor: 'var(--color-text-primary)' }} />
@@ -697,27 +681,22 @@ export function AskBarView({ onClick={onScreenshot} disabled={isBusy} aria-label="Take screenshot" - className="shrink-0 w-7 h-7 flex items-center justify-center rounded-lg text-text-secondary hover:text-text-primary hover:bg-white/8 transition-colors duration-150 disabled:opacity-40 disabled:cursor-default cursor-pointer" + className="shrink-0 w-7 h-7 flex items-center justify-center rounded-lg text-text-secondary hover:text-primary hover:bg-primary/10 transition-colors duration-150 disabled:opacity-40 disabled:cursor-default cursor-pointer" > {CAMERA_ICON} )} - {modelPickerAvailable && - activeModel && - availableModels && - onModelSelect && ( - - - - )} + {modelPickerAvailable && onModelPickerToggle && ( + + + + )}
- {/* Invisible in-flow spacer that reserves space below the ask-bar row - for the model picker's portal menu when it flips downward. The - App-level ResizeObserver watches the morphing container and resizes - the native NSPanel to match, preventing the popup from being - clipped at the window bottom. Height animates to match the 160ms - menu fade-in for visual coherence. */} - ); } diff --git a/src/view/ConversationView.tsx b/src/view/ConversationView.tsx index 9e6dd2fd..f3c5badc 100644 --- a/src/view/ConversationView.tsx +++ b/src/view/ConversationView.tsx @@ -74,6 +74,12 @@ interface ConversationViewProps { * of the typing indicator. */ searchStage?: SearchStage; + /** Currently active model slug forwarded to the WindowControls pill trigger. */ + activeModel?: string; + /** Toggles the model picker panel; forwarded to WindowControls. */ + onModelPickerToggle?: () => void; + /** Whether the model picker panel is open; drives aria-expanded on the pill. */ + isModelPickerOpen?: boolean; } /** @@ -97,6 +103,9 @@ export function ConversationView({ onNewConversation, onImagePreview, searchStage = null, + activeModel, + onModelPickerToggle, + isModelPickerOpen, }: ConversationViewProps) { const scrollContainerRef = useRef(null); @@ -197,6 +206,9 @@ export function ConversationView({ canSave={canSave} onNewConversation={onNewConversation} onHistoryOpen={onHistoryOpen} + activeModel={activeModel} + onModelPickerToggle={onModelPickerToggle} + isModelPickerOpen={isModelPickerOpen} />
{ ).toBeInTheDocument(); }); - it('renders a model picker trigger near send when models are available', () => { + it('renders a model picker trigger in ask-bar mode when models are available', () => { render( , ); - expect( screen.getByRole('button', { name: 'Choose model' }), ).toBeInTheDocument(); }); - it('calls onModelSelect and closes the popup when a model row is chosen', () => { - const onModelSelect = vi.fn(); + it('hides model picker trigger in chat mode (trigger moves to WindowControls header)', () => { render( { inputRef={makeRef()} activeModel="gemma4:e2b" availableModels={['gemma4:e2b', 'qwen2.5:7b']} - onModelSelect={onModelSelect} - />, - ); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - fireEvent.click(screen.getByRole('menuitem', { name: 'qwen2.5:7b' })); - expect(onModelSelect).toHaveBeenCalledWith('qwen2.5:7b'); - expect(screen.queryByRole('menuitem', { name: 'qwen2.5:7b' })).toBeNull(); - }); - - it('closes the model picker popup when generation starts', () => { - const { rerender } = render( - , - ); - - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - expect( - screen.getByRole('menuitem', { name: 'qwen2.5:7b' }), - ).toBeInTheDocument(); - - rerender( - , ); - - expect(screen.queryByRole('menuitem', { name: 'qwen2.5:7b' })).toBeNull(); + expect(screen.queryByRole('button', { name: 'Choose model' })).toBeNull(); }); - it('closes the model picker popup when clicking outside the picker', () => { + it('calls onModelPickerToggle when the Choose model button is clicked', () => { + const onModelPickerToggle = vi.fn(); render( , ); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - expect( - screen.getByRole('menuitem', { name: 'qwen2.5:7b' }), - ).toBeInTheDocument(); - - fireEvent.mouseDown(document.body); - expect(screen.queryByRole('menuitem', { name: 'qwen2.5:7b' })).toBeNull(); - }); - - it('reserves space below the ask-bar row when the picker opens downward', async () => { - // Force the trigger to read as top:0 so the above-math goes negative - // and ModelPicker flips the menu below. Also override offsetHeight so - // the menu reports a positive height on rAF measurement. - const originalGetRect = Element.prototype.getBoundingClientRect; - Element.prototype.getBoundingClientRect = function () { - return { - top: 0, - left: 100, - right: 128, - bottom: 28, - width: 28, - height: 28, - x: 100, - y: 0, - toJSON() { - return {}; - }, - } as DOMRect; - }; - const originalOffsetHeight = Object.getOwnPropertyDescriptor( - HTMLElement.prototype, - 'offsetHeight', - ); - Object.defineProperty(HTMLElement.prototype, 'offsetHeight', { - configurable: true, - get() { - return 120; - }, - }); - try { - const { getByTestId } = render( - , - ); - expect(getByTestId('model-menu-spacer').style.height).toBe('0px'); - - // act-wrap to flush all state updates scheduled by the sync rAF fire. - await act(async () => { - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - }); - // Trigger a resize to force another updatePosition pass with the - // menuRef attached and offsetHeight overridden. - await act(async () => { - window.dispatchEvent(new Event('resize')); - }); - // Measured 120px menu + 8px MENU_GAP = 128px spacer height. - expect(getByTestId('model-menu-spacer').style.height).toBe('128px'); - } finally { - Element.prototype.getBoundingClientRect = originalGetRect; - if (originalOffsetHeight) { - Object.defineProperty( - HTMLElement.prototype, - 'offsetHeight', - originalOffsetHeight, - ); - } - } + expect(onModelPickerToggle).toHaveBeenCalledTimes(1); }); - it('collapses reserved space when the picker closes', async () => { + it('sets aria-expanded on model picker trigger from isModelPickerOpen prop', () => { render( { inputRef={makeRef()} activeModel="gemma4:e2b" availableModels={['gemma4:e2b', 'qwen2.5:7b']} - onModelSelect={vi.fn()} + onModelPickerToggle={vi.fn()} + isModelPickerOpen={true} />, ); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - fireEvent.keyDown(document, { key: 'Escape' }); - const spacer = screen.getByTestId('model-menu-spacer'); - expect(spacer.style.height).toBe('0px'); - }); - - it('does not reserve space when the picker opens above the trigger', async () => { - const originalGetRect = Element.prototype.getBoundingClientRect; - Element.prototype.getBoundingClientRect = function () { - return { - top: 600, - left: 100, - right: 128, - bottom: 628, - width: 28, - height: 28, - x: 100, - y: 600, - toJSON() { - return {}; - }, - } as DOMRect; - }; - try { - render( - , - ); - fireEvent.click(screen.getByRole('button', { name: 'Choose model' })); - await act(async () => { - await new Promise((resolve) => requestAnimationFrame(resolve)); - }); - const spacer = screen.getByTestId('model-menu-spacer'); - expect(spacer.style.height).toBe('0px'); - } finally { - Element.prototype.getBoundingClientRect = originalGetRect; - } + expect( + screen.getByRole('button', { name: 'Choose model' }), + ).toHaveAttribute('aria-expanded', 'true'); }); - it('renders the model picker inside a Choose model tooltip wrapper', () => { + it('renders the model picker inside a Choose model tooltip wrapper in ask-bar mode', () => { render( , ); const trigger = screen.getByRole('button', { name: 'Choose model' }); fireEvent.mouseEnter(trigger.parentElement!); - // Two tooltip-labeled texts: the aria-label on the trigger AND the - // tooltip content. Filter by tooltip portal text (not the aria-label). expect(screen.getAllByText('Choose model').length).toBeGreaterThanOrEqual( 1, ); }); - it('hides the model picker trigger when no models are available', () => { + it('hides the model picker trigger in ask-bar mode when no models are available', () => { render( , ); expect(screen.queryByRole('button', { name: 'Choose model' })).toBeNull(); @@ -1289,8 +1121,8 @@ describe('AskBarView', () => { />, ); const btn = screen.getByRole('button', { name: 'Take screenshot' }); - expect(btn.className).not.toContain('hover:text-text-primary'); - expect(btn.className).not.toContain('hover:bg-white/8'); + expect(btn.className).not.toContain('hover:text-primary'); + expect(btn.className).not.toContain('hover:bg-primary/10'); }); it('has hover classes when below max images', () => { @@ -1308,8 +1140,8 @@ describe('AskBarView', () => { />, ); const btn = screen.getByRole('button', { name: 'Take screenshot' }); - expect(btn.className).toContain('hover:text-text-primary'); - expect(btn.className).toContain('hover:bg-white/8'); + expect(btn.className).toContain('hover:text-primary'); + expect(btn.className).toContain('hover:bg-primary/10'); }); it('shows tooltip explaining limit when camera button is hovered at max images', () => { From 9def68285e42d90115b9ec4afdc2b81f9870ae7f Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Fri, 24 Apr 2026 20:15:01 -0700 Subject: [PATCH 22/42] chore: post-rebase fixes for main integration After rebasing 22 model-picker commits onto main's TOML config rewrite (20abeb0), reconcile the residual textual mismatches that survived per-commit conflict resolution: - App.tsx: drop duplicated useModelSelection() destructure that the conflict resolver inserted after a later commit had already moved the call earlier in the function so useOllama could consume it. - useConversationHistory: thread the active model slug through save() so generate_title runs against the picker selection. The frontend forwards model only into generate_title; save_conversation itself still sources the model backend-side from AppConfig. - useConversationHistory.test.tsx: add the missing MODEL fixture and update every existing save() call site to pass it. The branch's later commits referenced MODEL but the constant declaration was lost during auto-merge of the test header. - App.test.tsx: rewrite the "saves the conversation with the currently selected model" assertion to expect the slug on generate_title (not save_conversation) and wrap the save click in act() so the fire-and-forget generate_title invocation is flushed before the assertion runs. - lib.rs: prettier/cargo-fmt normalisation only. Phase 1 deliberately leaves the active-model persistence layered: SQLite app_config holds the picker selection while AppConfig holds the bootstrap. A follow-up Phase 2 commit will collapse the two into a single TOML-backed source of truth via config::writer. All gates pass: bun run test (800/800), bun run test:backend, bun run validate-build (lint, format, typecheck, release bundle). Signed-off-by: Logan Nguyen --- src-tauri/src/lib.rs | 6 +-- src/App.tsx | 3 -- src/__tests__/App.test.tsx | 11 ++++- .../__tests__/useConversationHistory.test.tsx | 40 ++++++++++--------- src/hooks/useConversationHistory.ts | 15 ++++--- 5 files changed, 39 insertions(+), 36 deletions(-) diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 48b70924..7962be53 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -749,10 +749,8 @@ pub fn run() { // even before the first picker open reconciles. let persisted_active = database::get_config(&db_conn, models::ACTIVE_MODEL_KEY) .expect("failed to read active_model from app_config"); - let initial_active_model = models::resolve_seed_active_model( - persisted_active.as_deref(), - &bootstrap_active, - ); + let initial_active_model = + models::resolve_seed_active_model(persisted_active.as_deref(), &bootstrap_active); app.manage(models::ActiveModelState(std::sync::Mutex::new( initial_active_model, ))); diff --git a/src/App.tsx b/src/App.tsx index bec9ead6..bf26db98 100644 --- a/src/App.tsx +++ b/src/App.tsx @@ -35,7 +35,6 @@ import './App.css'; /** Fallback model name used before get_model_picker_state resolves at startup. */ const DEFAULT_MODEL_FALLBACK = 'gemma4:e2b'; - const OVERLAY_VISIBILITY_EVENT = 'thuki://visibility'; const ONBOARDING_EVENT = 'thuki://onboarding'; @@ -242,8 +241,6 @@ function App() { const [selectedContext, setSelectedContext] = useState(null); const config = useConfig(); const quote = config.quote; - const { activeModel, availableModels, refreshModels, setActiveModel } = - useModelSelection(); /** * True when the window is near the screen bottom and should grow upward. diff --git a/src/__tests__/App.test.tsx b/src/__tests__/App.test.tsx index c6788941..e9eb60b9 100644 --- a/src/__tests__/App.test.tsx +++ b/src/__tests__/App.test.tsx @@ -96,10 +96,17 @@ describe('App', () => { getLastChannel()?.simulateMessage({ type: 'Done' }); }); - fireEvent.click(screen.getByLabelText('Save conversation')); + await act(async () => { + fireEvent.click(screen.getByLabelText('Save conversation')); + }); + // The picker selection is threaded into `generate_title` (which uses the + // active slug as the title-generation model) and stamped onto the + // assistant message via `model_name`. `save_conversation` itself does + // not take a top-level `model` arg; the active model is sourced + // backend-side from the loaded TOML AppConfig. expect(invoke).toHaveBeenCalledWith( - 'save_conversation', + 'generate_title', expect.objectContaining({ model: 'qwen2.5:7b' }), ); }); diff --git a/src/hooks/__tests__/useConversationHistory.test.tsx b/src/hooks/__tests__/useConversationHistory.test.tsx index 0a8ca5ff..c841f973 100644 --- a/src/hooks/__tests__/useConversationHistory.test.tsx +++ b/src/hooks/__tests__/useConversationHistory.test.tsx @@ -4,6 +4,8 @@ import { useConversationHistory } from '../useConversationHistory'; import { invoke } from '../../testUtils/mocks/tauri'; import type { Message } from '../useOllama'; +const MODEL = 'gemma4:e2b'; + const MESSAGES: Message[] = [ { id: 'u1', role: 'user', content: 'Hello', quotedText: undefined }, { id: 'a1', role: 'assistant', content: 'Hi there' }, @@ -32,7 +34,7 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(MESSAGES); + await result.current.save(MESSAGES, MODEL); }); expect(invoke).toHaveBeenCalledWith('save_conversation', { @@ -70,7 +72,7 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(MESSAGES); + await result.current.save(MESSAGES, MODEL); }); expect(result.current.isSaved).toBe(true); @@ -84,7 +86,7 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(MESSAGES); + await result.current.save(MESSAGES, MODEL); }); expect(invoke).toHaveBeenCalledWith('generate_title', { @@ -124,13 +126,13 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(MESSAGES); + await result.current.save(MESSAGES, MODEL); }); invoke.mockClear(); await act(async () => { - await result.current.save(MESSAGES); + await result.current.save(MESSAGES, MODEL); }); expect(invoke).not.toHaveBeenCalled(); @@ -153,7 +155,7 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(MESSAGES); + await result.current.save(MESSAGES, MODEL); }); invoke.mockClear(); @@ -207,7 +209,7 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(MESSAGES); + await result.current.save(MESSAGES, MODEL); }); invoke.mockClear(); @@ -394,7 +396,7 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(MESSAGES); + await result.current.save(MESSAGES, MODEL); }); invoke.mockClear(); @@ -446,7 +448,7 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(messagesWithWarnings); + await result.current.save(messagesWithWarnings, MODEL); }); expect(invoke).toHaveBeenCalledWith('save_conversation', { @@ -494,7 +496,7 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(messagesWithThinking); + await result.current.save(messagesWithThinking, MODEL); }); expect(invoke).toHaveBeenCalledWith('save_conversation', { @@ -621,7 +623,7 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(MESSAGES); + await result.current.save(MESSAGES, MODEL); }); invoke.mockClear(); @@ -657,7 +659,7 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(MESSAGES); + await result.current.save(MESSAGES, MODEL); }); expect(result.current.isSaved).toBe(true); @@ -676,7 +678,7 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(MESSAGES); + await result.current.save(MESSAGES, MODEL); }); expect(result.current.isSaved).toBe(true); @@ -700,7 +702,7 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(MESSAGES); + await result.current.save(MESSAGES, MODEL); }); invoke.mockClear(); @@ -787,7 +789,7 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(MESSAGES); + await result.current.save(MESSAGES, MODEL); }); invoke.mockClear(); @@ -834,7 +836,7 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(messagesWithMeta); + await result.current.save(messagesWithMeta, MODEL); }); expect(invoke).toHaveBeenCalledWith( @@ -878,7 +880,7 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(messagesWithTraces); + await result.current.save(messagesWithTraces, MODEL); }); expect(invoke).toHaveBeenCalledWith( @@ -901,7 +903,7 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(MESSAGES); + await result.current.save(MESSAGES, MODEL); }); invoke.mockClear(); @@ -953,7 +955,7 @@ describe('useConversationHistory', () => { const { result } = renderHook(() => useConversationHistory()); await act(async () => { - await result.current.save(MESSAGES); + await result.current.save(MESSAGES, MODEL); }); invoke.mockClear(); diff --git a/src/hooks/useConversationHistory.ts b/src/hooks/useConversationHistory.ts index 4b5a9c91..d04a294b 100644 --- a/src/hooks/useConversationHistory.ts +++ b/src/hooks/useConversationHistory.ts @@ -268,18 +268,17 @@ export function useConversationHistory() { * Subsequent calls while `isSaved` is true are no-ops - the bookmark * icon on the frontend enforces single-save semantics. * - * The active model name is sourced by the Rust `save_conversation` command - * from the managed `AppConfig` state; the frontend no longer tracks or - * forwards it. - * - * Fires `generate_title` as a fire-and-forget background task after saving; - * the frontend should schedule a `listConversations` refresh to pick up the - * AI-generated title once it arrives (~2-5 seconds). + * Fires `generate_title` as a fire-and-forget background task after saving, + * threading the active model slug through so the title is produced by the + * same model that produced the conversation. The frontend should schedule a + * `listConversations` refresh to pick up the AI-generated title once it + * arrives (~2-5 seconds). * * @param messages The complete message history to persist. + * @param model The active Ollama model slug used for title generation. */ const save = useCallback( - async (messages: Message[]): Promise => { + async (messages: Message[], model: string): Promise => { if (isSaved) return; const payloads = messages.map(toPayload); From db4b0e24e8a620126e9f500c278067589f374935 Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Fri, 24 Apr 2026 20:55:58 -0700 Subject: [PATCH 23/42] refactor(config): drop [model] available, use SQLite as active-model SOT Active model is dynamic state (whatever the user last picked from the in-app picker, validated against Ollama's live /api/tags inventory), not a static user-tunable. Storing it in TOML duplicated ground truth from Ollama and created a staleness trap: the file would happily reference a model the user had since removed via `ollama rm`. This commit removes the `[model] available` list entirely. The selected slug now lives only in the SQLite `app_config` table under `active_model`, owned by `models::ActiveModelState`. The `[model]` section keeps just `ollama_url`, which IS truly static and tunable. This matches the dominant pattern across desktop AI clients (Ollama-app, Open WebUI, Msty, LM Studio, Chatbox, BoltAI, Cherry Studio, Enchanted, Raycast AI all do the same): treat the active model as runtime UI state, persist in a KV store, query the live list from the backend on demand. Changes: - schema.rs: ModelSection drops `available: Vec` and the `active()` accessor. Only `ollama_url` remains. Doc comment explains why active-model state lives in SQLite. - loader.rs: drops the available-list resolve clause. Only the ollama_url empty-string fallback survives in `[model]`. - lib.rs: drops the bootstrap_active snapshot of TOML's first entry. ActiveModelState now seeds directly from `crate::config::defaults::DEFAULT_MODEL_NAME` when SQLite has no persisted choice. Phase 3 will gate the overlay on a real installed model so this placeholder bootstrap is never streamed to Ollama. - models.rs: get_model_picker_state drops the `app_config: State<...>` parameter. The bootstrap arg to resolve_active_model is now the compile-time DEFAULT_MODEL_NAME const. - history.rs: save_conversation now reads the active slug from ActiveModelState (the picker selection) instead of app_config.model.active() (TOML), so saved conversations are attributed to the model the user actually used, not to whatever TOML's first entry said. - tests.rs: drops 5 tests that asserted on the removed `available` field (model_section_active_falls_back_when_list_empty, model_section_active_returns_first, resolve_empty_available_list_falls_back_to_default_model, resolve_whitespace_only_entries_are_filtered, resolve_entry_whitespace_is_trimmed). Adds `resolve_unknown_model_field_is_ignored` to verify older user files containing a stray `[model] available = [...]` still parse (serde silently drops unknown fields). - docs/configurations.md: rewrites the `[model]` table to describe the SQLite-backed active-model state and the runtime /api/tags discovery. Updates the example TOML to drop `available`. Backward compatibility: existing user config.toml files that contain `[model] available = [...]` continue to load without error; the field is silently ignored. No migration required, no version bump needed. All gates pass: bun run test (800/800), bun run test:backend (474 passed), bun run validate-build (lint, format, typecheck, release bundle). Signed-off-by: Logan Nguyen --- docs/configurations.md | 20 ++++---- src-tauri/src/config/loader.rs | 29 ++++------- src-tauri/src/config/schema.rs | 33 +++++------- src-tauri/src/config/tests.rs | 93 ++++++---------------------------- src-tauri/src/history.rs | 16 +++--- src-tauri/src/lib.rs | 24 ++++----- src-tauri/src/models.rs | 8 +-- 7 files changed, 72 insertions(+), 151 deletions(-) diff --git a/docs/configurations.md b/docs/configurations.md index c8c2aa67..ff3c566a 100644 --- a/docs/configurations.md +++ b/docs/configurations.md @@ -27,10 +27,9 @@ open ~/Library/Application\ Support/com.quietnode.thuki/config.toml ```toml [model] -# First entry is the ACTIVE model used for all inference. -# Reorder the list to switch models (requires app restart in this release). -# Run `ollama pull ` before adding a model you haven't used. -available = ["gemma4:e2b", "gemma4:e4b"] +# Where Thuki finds your local Ollama server. The active model itself is +# selected from the in-app picker (which lists whatever is installed in +# Ollama via /api/tags) and is stored in Thuki's local database, not here. ollama_url = "http://127.0.0.1:11434" [prompt] @@ -81,16 +80,15 @@ Every domain below is shown as a single table that lists **all** constants Thuki ## Reference -### `[LLM models]` +### `[model]` -Which AI model Thuki uses and where to find your local Ollama server. +Where to find your local Ollama server. The active model itself is **not** a TOML setting: Thuki discovers installed models live from Ollama's `/api/tags` endpoint, lets you pick one from the in-app model picker, and stores that selection in its local SQLite database (`app_config` table). Storing the active slug in TOML would duplicate ground truth from Ollama and break the moment you remove a model with `ollama rm`, so it lives next to the conversation history instead. -| Constant | Default | Tunable? | Why not tunable | Bounds | Description | -| :----------- | :------------------------- | :------- | :-------------- | :------------- | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `available` | `["gemma4:e2b"]` | Yes | — | non-empty list | The list of Ollama models Thuki knows about. **The first model in the list is the one Thuki actually uses.** To switch models, reorder the list. Make sure to run `ollama pull ` before adding a new entry here. | -| `ollama_url` | `"http://127.0.0.1:11434"` | Yes | — | non-empty URL | The web address where Thuki finds your local Ollama server. The default works if you run Ollama on this machine with its standard port. Change this only if you moved Ollama to a different port or another machine. | +| Constant | Default | Tunable? | Why not tunable | Bounds | Description | +| :----------- | :------------------------- | :------- | :-------------- | :------------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `ollama_url` | `"http://127.0.0.1:11434"` | Yes | — | non-empty URL | The web address where Thuki finds your local Ollama server. The default works if you run Ollama on this machine with its standard port. Change this only if you moved Ollama to a different port or another machine. | -If the active model has not been pulled, the next request surfaces a "Model not found" error with the exact `ollama pull ` command to run. +If the active model has been removed from Ollama between launches, Thuki silently falls back to the first installed model the next time you open the picker. If no models are installed at all, the next request surfaces a "Model not found" error with the exact `ollama pull ` command to run. ### `[prompt]` diff --git a/src-tauri/src/config/loader.rs b/src-tauri/src/config/loader.rs index 954220a7..8149ead2 100644 --- a/src-tauri/src/config/loader.rs +++ b/src-tauri/src/config/loader.rs @@ -26,13 +26,12 @@ use super::defaults::{ BOUNDS_MAX_ITERATIONS, BOUNDS_OVERLAY_WIDTH, BOUNDS_QUOTE_MAX_CONTEXT_LENGTH, BOUNDS_QUOTE_MAX_DISPLAY_CHARS, BOUNDS_QUOTE_MAX_DISPLAY_LINES, BOUNDS_SEARXNG_MAX_RESULTS, BOUNDS_TIMEOUT_S, BOUNDS_TOP_K_URLS, DEFAULT_COLLAPSED_HEIGHT, DEFAULT_HIDE_COMMIT_DELAY_MS, - DEFAULT_JUDGE_TIMEOUT_S, DEFAULT_MAX_CHAT_HEIGHT, DEFAULT_MAX_ITERATIONS, DEFAULT_MODEL_NAME, - DEFAULT_OLLAMA_URL, DEFAULT_OVERLAY_WIDTH, DEFAULT_QUOTE_MAX_CONTEXT_LENGTH, - DEFAULT_QUOTE_MAX_DISPLAY_CHARS, DEFAULT_QUOTE_MAX_DISPLAY_LINES, - DEFAULT_READER_BATCH_TIMEOUT_S, DEFAULT_READER_PER_URL_TIMEOUT_S, DEFAULT_READER_URL, - DEFAULT_ROUTER_TIMEOUT_S, DEFAULT_SEARCH_TIMEOUT_S, DEFAULT_SEARXNG_MAX_RESULTS, - DEFAULT_SEARXNG_URL, DEFAULT_SYSTEM_PROMPT_BASE, DEFAULT_TOP_K_URLS, - SLASH_COMMAND_PROMPT_APPENDIX, + DEFAULT_JUDGE_TIMEOUT_S, DEFAULT_MAX_CHAT_HEIGHT, DEFAULT_MAX_ITERATIONS, DEFAULT_OLLAMA_URL, + DEFAULT_OVERLAY_WIDTH, DEFAULT_QUOTE_MAX_CONTEXT_LENGTH, DEFAULT_QUOTE_MAX_DISPLAY_CHARS, + DEFAULT_QUOTE_MAX_DISPLAY_LINES, DEFAULT_READER_BATCH_TIMEOUT_S, + DEFAULT_READER_PER_URL_TIMEOUT_S, DEFAULT_READER_URL, DEFAULT_ROUTER_TIMEOUT_S, + DEFAULT_SEARCH_TIMEOUT_S, DEFAULT_SEARXNG_MAX_RESULTS, DEFAULT_SEARXNG_URL, + DEFAULT_SYSTEM_PROMPT_BASE, DEFAULT_TOP_K_URLS, SLASH_COMMAND_PROMPT_APPENDIX, }; use super::error::ConfigError; use super::schema::AppConfig; @@ -110,19 +109,9 @@ fn rename_corrupt(path: &Path) { /// and composes the system prompt appendix into `prompt.resolved_system`. /// After this runs, every `AppConfig` field holds a usable value. pub(crate) fn resolve(config: &mut AppConfig) { - // Model section: empty available list or empty/whitespace entries -> default. - let cleaned: Vec = config - .model - .available - .iter() - .map(|m| m.trim().to_string()) - .filter(|m| !m.is_empty()) - .collect(); - config.model.available = if cleaned.is_empty() { - vec![DEFAULT_MODEL_NAME.to_string()] - } else { - cleaned - }; + // Model section: only the Ollama endpoint is configurable here. The + // active model is runtime UI state owned by SQLite app_config, see + // crate::models::ActiveModelState. if config.model.ollama_url.trim().is_empty() { config.model.ollama_url = DEFAULT_OLLAMA_URL.to_string(); } diff --git a/src-tauri/src/config/schema.rs b/src-tauri/src/config/schema.rs index 125374e9..2b8e2735 100644 --- a/src-tauri/src/config/schema.rs +++ b/src-tauri/src/config/schema.rs @@ -15,22 +15,26 @@ use serde::{Deserialize, Serialize}; use super::defaults::{ DEFAULT_COLLAPSED_HEIGHT, DEFAULT_HIDE_COMMIT_DELAY_MS, DEFAULT_JUDGE_TIMEOUT_S, - DEFAULT_MAX_CHAT_HEIGHT, DEFAULT_MAX_ITERATIONS, DEFAULT_MODEL_NAME, DEFAULT_OLLAMA_URL, - DEFAULT_OVERLAY_WIDTH, DEFAULT_QUOTE_MAX_CONTEXT_LENGTH, DEFAULT_QUOTE_MAX_DISPLAY_CHARS, + DEFAULT_MAX_CHAT_HEIGHT, DEFAULT_MAX_ITERATIONS, DEFAULT_OLLAMA_URL, DEFAULT_OVERLAY_WIDTH, + DEFAULT_QUOTE_MAX_CONTEXT_LENGTH, DEFAULT_QUOTE_MAX_DISPLAY_CHARS, DEFAULT_QUOTE_MAX_DISPLAY_LINES, DEFAULT_READER_BATCH_TIMEOUT_S, DEFAULT_READER_PER_URL_TIMEOUT_S, DEFAULT_READER_URL, DEFAULT_ROUTER_TIMEOUT_S, DEFAULT_SEARCH_TIMEOUT_S, DEFAULT_SEARXNG_MAX_RESULTS, DEFAULT_SEARXNG_URL, DEFAULT_TOP_K_URLS, }; -/// Model configuration. The first entry of `available` is the active model -/// used for all inference. Reorder the list (or use the future settings panel) -/// to switch models. Keeping a single list instead of separate `active` and -/// `available` fields eliminates the mismatch failure mode entirely. +/// Static, user-tunable model configuration. +/// +/// The active model selection is NOT stored here. Active-model state is +/// runtime UI state owned by [`crate::models::ActiveModelState`] and +/// persisted in the SQLite `app_config` table under +/// [`crate::models::ACTIVE_MODEL_KEY`]. Storing a model slug in TOML would +/// duplicate ground truth from Ollama's `/api/tags` and create a staleness +/// trap: the file would happily reference a model the user has since +/// removed. This section keeps only the truly static knob, the Ollama +/// endpoint URL. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] #[serde(default)] pub struct ModelSection { - /// Ollama models Thuki knows about. First entry is active. - pub available: Vec, /// HTTP base URL of the local Ollama instance. pub ollama_url: String, } @@ -38,24 +42,11 @@ pub struct ModelSection { impl Default for ModelSection { fn default() -> Self { Self { - available: vec![DEFAULT_MODEL_NAME.to_string()], ollama_url: DEFAULT_OLLAMA_URL.to_string(), } } } -impl ModelSection { - /// Returns the active model (first entry). Falls back to the compiled - /// default if the list is somehow empty at call time; the loader also - /// guarantees this never happens by calling `resolve` during load. - pub fn active(&self) -> &str { - self.available - .first() - .map(String::as_str) - .unwrap_or(DEFAULT_MODEL_NAME) - } -} - /// Prompt configuration. `system` holds only the user-editable base text. /// The slash-command appendix is composed at load time into `resolved_system` /// and is never written back to the file. `resolved_system` is computed, not diff --git a/src-tauri/src/config/tests.rs b/src-tauri/src/config/tests.rs index d738ef90..2dd67a9b 100644 --- a/src-tauri/src/config/tests.rs +++ b/src-tauri/src/config/tests.rs @@ -14,8 +14,8 @@ use std::path::PathBuf; use super::defaults::{ DEFAULT_COLLAPSED_HEIGHT, DEFAULT_HIDE_COMMIT_DELAY_MS, DEFAULT_JUDGE_TIMEOUT_S, - DEFAULT_MAX_CHAT_HEIGHT, DEFAULT_MAX_ITERATIONS, DEFAULT_MODEL_NAME, DEFAULT_OLLAMA_URL, - DEFAULT_OVERLAY_WIDTH, DEFAULT_QUOTE_MAX_CONTEXT_LENGTH, DEFAULT_QUOTE_MAX_DISPLAY_CHARS, + DEFAULT_MAX_CHAT_HEIGHT, DEFAULT_MAX_ITERATIONS, DEFAULT_OLLAMA_URL, DEFAULT_OVERLAY_WIDTH, + DEFAULT_QUOTE_MAX_CONTEXT_LENGTH, DEFAULT_QUOTE_MAX_DISPLAY_CHARS, DEFAULT_QUOTE_MAX_DISPLAY_LINES, DEFAULT_READER_BATCH_TIMEOUT_S, DEFAULT_READER_PER_URL_TIMEOUT_S, DEFAULT_READER_URL, DEFAULT_ROUTER_TIMEOUT_S, DEFAULT_SEARCH_TIMEOUT_S, DEFAULT_SEARXNG_MAX_RESULTS, DEFAULT_SEARXNG_URL, @@ -47,7 +47,6 @@ fn defaults_const_values_match_schema_defaults() { // Guard rail: a change to a default in defaults.rs must flow through to // AppConfig::default(). If this test fails, someone changed one but not both. let c = AppConfig::default(); - assert_eq!(c.model.available, vec![DEFAULT_MODEL_NAME.to_string()]); assert_eq!(c.model.ollama_url, DEFAULT_OLLAMA_URL); assert_eq!(c.prompt.system, ""); assert_eq!(c.prompt.resolved_system, ""); @@ -87,8 +86,7 @@ fn defaults_prompt_base_is_nonempty() { #[test] fn section_defaults_are_sensible() { let m = ModelSection::default(); - assert_eq!(m.available, vec![DEFAULT_MODEL_NAME.to_string()]); - assert_eq!(m.active(), DEFAULT_MODEL_NAME); + assert_eq!(m.ollama_url, DEFAULT_OLLAMA_URL); let p = PromptSection::default(); assert!(p.system.is_empty()); @@ -100,26 +98,6 @@ fn section_defaults_are_sensible() { assert_eq!(q.max_display_lines, DEFAULT_QUOTE_MAX_DISPLAY_LINES); } -#[test] -fn model_section_active_falls_back_when_list_empty() { - // Guard: loader should prevent this, but active() has a defensive fallback - // so the struct can't explode if a caller bypasses the loader. - let m = ModelSection { - available: vec![], - ollama_url: DEFAULT_OLLAMA_URL.to_string(), - }; - assert_eq!(m.active(), DEFAULT_MODEL_NAME); -} - -#[test] -fn model_section_active_returns_first() { - let m = ModelSection { - available: vec!["custom:model".to_string(), "other:model".to_string()], - ollama_url: DEFAULT_OLLAMA_URL.to_string(), - }; - assert_eq!(m.active(), "custom:model"); -} - #[test] fn app_config_serde_round_trip_matches_defaults() { let original = AppConfig::default(); @@ -138,11 +116,10 @@ fn app_config_partial_file_fills_missing_fields_with_defaults() { // Only declare one field; serde(default) fills the rest. let partial = r#" [model] - available = ["custom:only"] + ollama_url = "http://localhost:9999" "#; let parsed: AppConfig = toml::from_str(partial).expect("partial file parses"); - assert_eq!(parsed.model.available, vec!["custom:only".to_string()]); - assert_eq!(parsed.model.ollama_url, DEFAULT_OLLAMA_URL); + assert_eq!(parsed.model.ollama_url, "http://localhost:9999"); assert_eq!(parsed.window.overlay_width, DEFAULT_OVERLAY_WIDTH); assert_eq!( parsed.quote.max_display_lines, @@ -187,7 +164,7 @@ fn load_missing_file_seeds_defaults_and_returns_them() { let config = load_from_path(&path).expect("seed on first run"); assert!(path.exists(), "file should be seeded"); - assert_eq!(config.model.active(), DEFAULT_MODEL_NAME); + assert_eq!(config.model.ollama_url, DEFAULT_OLLAMA_URL); // Resolved system prompt composed from default base plus appendix. assert!(config .prompt @@ -206,7 +183,7 @@ fn load_missing_file_in_missing_parent_dir_creates_dir() { let path = config_path_in(&nested); let config = load_from_path(&path).expect("creates parent dir and seeds"); assert!(path.exists()); - assert_eq!(config.model.active(), DEFAULT_MODEL_NAME); + assert_eq!(config.model.ollama_url, DEFAULT_OLLAMA_URL); } #[test] @@ -233,18 +210,12 @@ fn load_existing_valid_file_returns_resolved_config() { &path, r#" [model] - available = ["custom:a", "custom:b"] ollama_url = "http://localhost:99999" "#, ) .unwrap(); let config = load_from_path(&path).unwrap(); - assert_eq!( - config.model.available, - vec!["custom:a".to_string(), "custom:b".to_string()] - ); - assert_eq!(config.model.active(), "custom:a"); assert_eq!(config.model.ollama_url, "http://localhost:99999"); } @@ -277,7 +248,7 @@ fn load_corrupt_file_is_renamed_and_reseeded() { std::fs::write(&path, "this is = definitely not [ valid toml").unwrap(); let config = load_from_path(&path).expect("recover from corrupt file"); - assert_eq!(config.model.active(), DEFAULT_MODEL_NAME); + assert_eq!(config.model.ollama_url, DEFAULT_OLLAMA_URL); // Original file renamed with .corrupt- prefix. let renamed_exists = std::fs::read_dir(&dir) @@ -315,7 +286,7 @@ fn load_unreadable_file_returns_in_memory_defaults() { } let config = load_from_path(&path).expect("fallback to in-memory defaults"); - assert_eq!(config.model.active(), DEFAULT_MODEL_NAME); + assert_eq!(config.model.ollama_url, DEFAULT_OLLAMA_URL); // Restore so cleanup works. let _ = std::fs::set_permissions(&path, std::fs::Permissions::from_mode(0o644)); } @@ -323,55 +294,23 @@ fn load_unreadable_file_returns_in_memory_defaults() { // ── loader: resolve (empties and bounds) ──────────────────────────────────── #[test] -fn resolve_empty_available_list_falls_back_to_default_model() { - let dir = fresh_temp_dir(); - let path = config_path_in(&dir); - std::fs::write( - &path, - r#" - [model] - available = [] - "#, - ) - .unwrap(); - let config = load_from_path(&path).unwrap(); - assert_eq!(config.model.available, vec![DEFAULT_MODEL_NAME.to_string()]); - assert_eq!(config.model.active(), DEFAULT_MODEL_NAME); -} - -#[test] -fn resolve_whitespace_only_entries_are_filtered() { - let dir = fresh_temp_dir(); - let path = config_path_in(&dir); - std::fs::write( - &path, - r#" - [model] - available = [" ", "custom:x", " ", "custom:y"] - "#, - ) - .unwrap(); - let config = load_from_path(&path).unwrap(); - assert_eq!( - config.model.available, - vec!["custom:x".to_string(), "custom:y".to_string()] - ); -} - -#[test] -fn resolve_entry_whitespace_is_trimmed() { +fn resolve_unknown_model_field_is_ignored() { + // Older config files seeded a `[model] available = [...]` list. After + // removing that field from the schema, serde must silently drop it + // rather than refusing to parse the file. let dir = fresh_temp_dir(); let path = config_path_in(&dir); std::fs::write( &path, r#" [model] - available = [" spaced:model "] + available = ["legacy:model", "another:model"] + ollama_url = "http://localhost:11434" "#, ) .unwrap(); let config = load_from_path(&path).unwrap(); - assert_eq!(config.model.available, vec!["spaced:model".to_string()]); + assert_eq!(config.model.ollama_url, "http://localhost:11434"); } #[test] diff --git a/src-tauri/src/history.rs b/src-tauri/src/history.rs index a9941aec..864a109a 100644 --- a/src-tauri/src/history.rs +++ b/src-tauri/src/history.rs @@ -16,6 +16,7 @@ use tauri::State; use crate::commands::{ChatMessage, ConversationHistory}; use crate::config::AppConfig; use crate::database; +use crate::models::ActiveModelState; /// Thread-safe wrapper around the SQLite connection. pub struct Database(pub Mutex); @@ -67,9 +68,13 @@ pub struct SaveConversationResponse { pub fn save_conversation( messages: Vec, db: State<'_, Database>, - app_config: State<'_, AppConfig>, + active_model: State<'_, ActiveModelState>, ) -> Result { let conn = db.0.lock().map_err(|e| e.to_string())?; + let model_slug = { + let guard = active_model.0.lock().map_err(|e| e.to_string())?; + guard.clone() + }; // Use the first user message (truncated) as the initial title placeholder. let placeholder_title = messages.iter().find(|m| m.role == "user").map(|m| { @@ -89,12 +94,9 @@ pub fn save_conversation( } }); - let conversation_id = database::create_conversation( - &conn, - placeholder_title.as_deref(), - app_config.model.active(), - ) - .map_err(|e| e.to_string())?; + let conversation_id = + database::create_conversation(&conn, placeholder_title.as_deref(), &model_slug) + .map_err(|e| e.to_string())?; let batch: Vec = messages .into_iter() diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 7962be53..b1393b4e 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -721,11 +721,6 @@ pub fn run() { Ok(c) => c, Err(e) => crate::config::show_fatal_dialog_and_exit(&e), }; - // Snapshot the bootstrap active-model slug from TOML before - // moving `app_config` into managed state. The picker overlay - // refreshes the live installed list on first open and may - // replace this seed. - let bootstrap_active = app_config.model.active().to_string(); app.manage(app_config); // ── Generation + conversation state ───────────────────── @@ -741,16 +736,21 @@ pub fn run() { .expect("failed to initialise SQLite database"); // ── Active-model state: seed from SQLite app_config table ── - // The installed list isn't queried here (no async runtime yet); + // The installed list isn't queried here (no async runtime yet). // get_model_picker_state reconciles against the live /api/tags - // inventory on first open and may replace this seed. - // resolve_seed_active_model trusts the persisted choice - // unconditionally, so a valid user selection survives restarts - // even before the first picker open reconciles. + // inventory on first picker open and may replace this seed. + // The placeholder DEFAULT_MODEL_NAME bootstrap is a transient + // value used only until that first reconciliation, and is the + // last-resort fallback when both the persisted slug and the + // live installed list are absent. Phase 3 will gate the + // overlay on a real installed model so that placeholder is + // never streamed to Ollama. let persisted_active = database::get_config(&db_conn, models::ACTIVE_MODEL_KEY) .expect("failed to read active_model from app_config"); - let initial_active_model = - models::resolve_seed_active_model(persisted_active.as_deref(), &bootstrap_active); + let initial_active_model = models::resolve_seed_active_model( + persisted_active.as_deref(), + crate::config::defaults::DEFAULT_MODEL_NAME, + ); app.manage(models::ActiveModelState(std::sync::Mutex::new( initial_active_model, ))); diff --git a/src-tauri/src/models.rs b/src-tauri/src/models.rs index 0d31af8a..376ddc27 100644 --- a/src-tauri/src/models.rs +++ b/src-tauri/src/models.rs @@ -243,15 +243,17 @@ pub async fn get_model_picker_state( client: tauri::State<'_, reqwest::Client>, db: tauri::State<'_, Database>, active_model: tauri::State<'_, ActiveModelState>, - app_config: tauri::State<'_, crate::config::AppConfig>, ) -> Result { let installed = fetch_installed_model_names(&client, DEFAULT_OLLAMA_URL).await?; let resolved = { let conn = db.0.lock().map_err(|e| e.to_string())?; let persisted = get_config(&conn, ACTIVE_MODEL_KEY).map_err(|e| e.to_string())?; - let resolved = - resolve_active_model(persisted.as_deref(), &installed, app_config.model.active()); + let resolved = resolve_active_model( + persisted.as_deref(), + &installed, + crate::config::defaults::DEFAULT_MODEL_NAME, + ); if should_persist_resolved(&installed, persisted.as_deref(), &resolved) { set_config(&conn, ACTIVE_MODEL_KEY, &resolved).map_err(|e| e.to_string())?; } From 7a3fbd1fd6c404ab4b1a686b56ccb1c89e29f194 Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Sat, 25 Apr 2026 03:28:23 -0500 Subject: [PATCH 24/42] feat(onboarding): backend gate for Ollama setup (Phase 3) Add the Phase 3 onboarding stage that gates the chat overlay on a working local Ollama setup with at least one installed model. Returning users with everything in order skip the gate entirely; first-time users and any user whose Ollama setup degraded since last launch see a single two-step screen owned by the frontend (ModelCheckStep, next commit). Onboarding now progresses: permissions -> model_check -> intro -> complete Backend changes: - onboarding::OnboardingStage gains a ModelCheck variant. Serialised in snake_case to match the TypeScript union the frontend routes on. Persisted SQLite value uses the same string. Forward-compat: an unrecognised future stage falls back to Permissions, the safe default that re-runs the full flow. - models::ModelSetupState enum (Ollama unreachable | no models installed | Ready { active_slug, installed }). Internally-tagged on `state` so the React side discriminates on a single string. - models::derive_model_setup_state pure helper that maps the result of probing /api/tags + the persisted active-slug preference into a ModelSetupState. Pure so all three branches are unit-testable without HTTP or Tauri runtime; the Tauri command is a thin wrapper. - models::check_model_setup async Tauri command. Idempotent. On the Ready arm, persists the resolved active slug through SQLite via should_persist_resolved (the same TOCTOU-safe gate get_model_picker_state uses, so a transient empty /api/tags response cannot clobber a valid saved choice) and mirrors the slug into ActiveModelState so subsequent ask_ollama / search_pipeline calls see it without an extra DB read. - lib::notify_frontend_ready routes to the ModelCheck stage whenever the persisted stage is Permissions or ModelCheck and live macOS permissions are granted. The actual Ollama probe runs in the frontend because notify_frontend_ready is invoked synchronously and /api/tags needs the async runtime; the brief frontend "checking" state is fine for the rare happy path and unavoidable on the gated paths. - lib::advance_past_model_check Tauri command. Writes the Intro stage and re-emits the onboarding event so the parent OnboardingView swaps to IntroStep without a window flicker. Idempotent: writing intro over intro is a harmless no-op so a frontend race cannot corrupt state. - CLAUDE.md documents the GStack design CLI fallback to hand-crafted HTML wireframes when no OpenAI API key is configured. Used during Phase 3 design exploration; relevant to every future design skill invocation on this project. Tests: - 4 new onboarding tests covering ModelCheck round-trip, snake_case serialisation parity with the frontend union, unknown-stage fallback, and startup routing. - 8 new models tests covering all 3 branches of derive_model_setup_state plus the JSON wire format (state-tag discrimination). - All 486 + 12 = 498 backend tests pass; clippy -D warnings clean; cargo fmt clean. Security and resilience invariants preserved: - /api/tags fetch keeps the existing 5s timeout and 4 MiB body cap. - Slug validation (charset, length) unchanged. - No new attack surface: the new command takes no user input and only reads from localhost:11434 and the existing SQLite app_config table. - TOCTOU window between fetch + persist gated through the same should_persist_resolved guard get_model_picker_state already uses. The frontend ModelCheckStep component lands in the next commit. Signed-off-by: Logan Nguyen --- CLAUDE.md | 10 ++ src-tauri/src/lib.rs | 79 ++++++++++--- src-tauri/src/models.rs | 223 ++++++++++++++++++++++++++++++++++++ src-tauri/src/onboarding.rs | 75 +++++++++++- 4 files changed, 366 insertions(+), 21 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index b15fad77..7935f4ab 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -152,6 +152,16 @@ Do not consider the task done if either step produces any warnings or errors. Fi Never commit files generated by superpowers skills (design specs, implementation plans, brainstorming docs). These live under `docs/superpowers/` which is gitignored. Do not stage or commit anything under that path. +## GStack Design Tooling Fallback + +When invoking GStack design skills (`/design-shotgun`, `/design-html`, `/design-review`, etc.) inside Claude Code on this project: if the design CLI fails because no OpenAI API key is configured (e.g. `setup` not run, `OPENAI_API_KEY` unset, `~/.gstack/openai.json` missing), do not block the user with a setup prompt. Automatically fall back to hand-crafted HTML wireframes that use the real Thuki design tokens read directly from the source files (`src/view/onboarding/PermissionsStep.tsx`, `src/view/onboarding/IntroStep.tsx`, `src/components/`). These wireframes are strictly more accurate to the final UI than image generation because they use the exact CSS values rather than a model's interpretation of them. + +Workflow: +1. Read the relevant source files to extract the actual design tokens (colors, spacing, fonts, border radii, gradients, shadows). +2. Write the wireframes as static HTML files in `~/.gstack/projects/quiet-node-thuki/designs/-/` so they live alongside any future image-based mockups. +3. Open the wireframes in the browser via `open file://...` for review. +4. Only mention the missing API key as a one-line aside, not as a blocker. The user can opt back into image generation later. + ## Key Design Constraints - **macOS only** — uses NSPanel, Core Graphics event taps, macOS Control key diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index b1393b4e..1d9f84f0 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -391,35 +391,46 @@ fn notify_frontend_ready(app_handle: tauri::AppHandle, db: tauri::State, + app_handle: tauri::AppHandle, +) -> Result<(), String> { + let conn = db.0.lock().map_err(|e| format!("db lock poisoned: {e}"))?; + onboarding::set_stage(&conn, &onboarding::OnboardingStage::Intro) + .map_err(|e| format!("db write failed: {e}"))?; + drop(conn); + + let _ = app_handle.emit( + ONBOARDING_EVENT, + OnboardingPayload { + stage: onboarding::OnboardingStage::Intro, + }, + ); + Ok(()) +} + // ─── Onboarding completion ─────────────────────────────────────────────────── /// Called when the user clicks "Get Started" on the intro screen. @@ -780,6 +822,8 @@ pub fn run() { #[cfg(not(coverage))] models::set_active_model, #[cfg(not(coverage))] + models::check_model_setup, + #[cfg(not(coverage))] history::save_conversation, #[cfg(not(coverage))] history::persist_message, @@ -818,7 +862,8 @@ pub fn run() { permissions::check_screen_recording_tcc_granted, #[cfg(not(coverage))] permissions::quit_and_relaunch, - finish_onboarding + finish_onboarding, + advance_past_model_check ]) .build(tauri::generate_context!()) .expect("error while building tauri application") diff --git a/src-tauri/src/models.rs b/src-tauri/src/models.rs index 376ddc27..dd3851dd 100644 --- a/src-tauri/src/models.rs +++ b/src-tauri/src/models.rs @@ -297,6 +297,121 @@ pub async fn set_active_model( Ok(()) } +// ─── Model setup gate (Phase 3 onboarding) ────────────────────────────────── + +/// Result of probing the local Ollama daemon for setup readiness. +/// +/// Drives the Phase 3 onboarding gate that fires after the user grants +/// macOS permissions but before the chat overlay is allowed to open. +/// Variants are emitted to the frontend in `snake_case` with an +/// internally-tagged `state` discriminator so the React side can route +/// on a single string field without inspecting payload shape. +#[derive(Debug, Clone, PartialEq, serde::Serialize)] +#[serde(tag = "state", rename_all = "snake_case")] +pub enum ModelSetupState { + /// `/api/tags` could not be reached. Treat as "Ollama is not installed + /// or not running"; the UI must guide the user to install or start it. + OllamaUnreachable, + /// `/api/tags` responded successfully but the installed list is empty. + /// The UI must guide the user to `ollama pull `. + NoModelsInstalled, + /// Ollama is running with at least one installed model. `active_slug` + /// is the slug we resolved (persisted preference if still installed, + /// else first installed) and `installed` is the live list for the + /// frontend to render in the picker. + Ready { + active_slug: String, + installed: Vec, + }, +} + +/// Pure state-machine derivation: maps the result of probing `/api/tags` +/// plus the persisted active-slug preference into a [`ModelSetupState`]. +/// +/// Exists as a free function so the three branches can be unit-tested +/// without spinning up an HTTP server or a Tauri runtime. The fetch +/// result and persisted preference are the only inputs; no I/O happens +/// here. The Tauri command is a thin wrapper that calls the fetcher, +/// reads the persisted slug from SQLite, then delegates here. +/// +/// Resolution rules for the Ready arm match +/// [`resolve_active_model`]: prefer the persisted slug when it is still +/// installed; otherwise fall back to the first installed slug. The +/// `bootstrap` argument is the compile-time fallback used only when +/// both inputs are absent, which by definition cannot happen on the +/// Ready arm (it would have routed to NoModelsInstalled). +pub fn derive_model_setup_state( + installed_result: Result, String>, + persisted: Option<&str>, + bootstrap: &str, +) -> ModelSetupState { + match installed_result { + Err(_) => ModelSetupState::OllamaUnreachable, + Ok(installed) if installed.is_empty() => ModelSetupState::NoModelsInstalled, + Ok(installed) => { + let active_slug = resolve_active_model(persisted, &installed, bootstrap); + ModelSetupState::Ready { + active_slug, + installed, + } + } + } +} + +/// Probes Ollama for setup readiness and returns the typed +/// [`ModelSetupState`] for the frontend onboarding gate. +/// +/// Idempotent: safe to call on every overlay open. The Ready arm also +/// commits two side effects, both intentionally bounded: +/// +/// 1. If the resolved slug differs from the persisted slug AND the live +/// installed list is non-empty, persist the resolved slug. This heals +/// the case where a user removed their previously-selected model with +/// `ollama rm` between launches. +/// 2. Mirror the resolved slug into the in-memory [`ActiveModelState`] so +/// `ask_ollama` and `search_pipeline` see it on the next request +/// without an extra DB read. +/// +/// Both writes are gated through [`should_persist_resolved`] which +/// refuses to persist when Ollama reports an empty inventory (i.e. +/// daemon is up but mid-restart), so a transient empty response cannot +/// clobber a valid persisted choice. +#[cfg_attr(coverage_nightly, coverage(off))] +#[cfg_attr(not(coverage), tauri::command)] +pub async fn check_model_setup( + client: tauri::State<'_, reqwest::Client>, + db: tauri::State<'_, Database>, + active_model: tauri::State<'_, ActiveModelState>, +) -> Result { + let installed_result = fetch_installed_model_names(&client, DEFAULT_OLLAMA_URL).await; + + let persisted = { + let conn = db.0.lock().map_err(|e| e.to_string())?; + get_config(&conn, ACTIVE_MODEL_KEY).map_err(|e| e.to_string())? + }; + + let state = derive_model_setup_state( + installed_result, + persisted.as_deref(), + crate::config::defaults::DEFAULT_MODEL_NAME, + ); + + if let ModelSetupState::Ready { + ref active_slug, + ref installed, + } = state + { + if should_persist_resolved(installed, persisted.as_deref(), active_slug) { + let conn = db.0.lock().map_err(|e| e.to_string())?; + set_config(&conn, ACTIVE_MODEL_KEY, active_slug).map_err(|e| e.to_string())?; + } + let mut guard = active_model.0.lock().map_err(|e| e.to_string())?; + *guard = active_slug.clone(); + } + + Ok(state) +} + // ─── Tests ────────────────────────────────────────────────────────────────── #[cfg(test)] @@ -748,4 +863,112 @@ mod tests { "Model is not installed in Ollama: " ); } + + // ── derive_model_setup_state (Phase 3 onboarding gate) ────────────────── + + #[test] + fn derive_setup_state_returns_unreachable_on_fetch_error() { + let state = + derive_model_setup_state(Err("connection refused".to_string()), None, "gemma4:e2b"); + assert_eq!(state, ModelSetupState::OllamaUnreachable); + } + + #[test] + fn derive_setup_state_returns_unreachable_even_when_persisted_choice_exists() { + // Past selection must NOT mask a current outage. The user needs to + // see the "Ollama not detected" screen even if SQLite remembers a slug. + let state = + derive_model_setup_state(Err("timeout".to_string()), Some("gemma4:e4b"), "gemma4:e2b"); + assert_eq!(state, ModelSetupState::OllamaUnreachable); + } + + #[test] + fn derive_setup_state_returns_no_models_when_inventory_empty() { + let state = derive_model_setup_state(Ok(vec![]), None, "gemma4:e2b"); + assert_eq!(state, ModelSetupState::NoModelsInstalled); + } + + #[test] + fn derive_setup_state_returns_no_models_even_with_stale_persisted_slug() { + // Daemon up but the user removed every model with `ollama rm`. The + // persisted slug is no longer valid; the gate must re-engage. + let state = derive_model_setup_state(Ok(vec![]), Some("removed-model:7b"), "gemma4:e2b"); + assert_eq!(state, ModelSetupState::NoModelsInstalled); + } + + #[test] + fn derive_setup_state_ready_keeps_persisted_when_still_installed() { + let state = derive_model_setup_state( + Ok(vec!["gemma4:e2b".to_string(), "llama3:8b".to_string()]), + Some("llama3:8b"), + "gemma4:e2b", + ); + assert_eq!( + state, + ModelSetupState::Ready { + active_slug: "llama3:8b".to_string(), + installed: vec!["gemma4:e2b".to_string(), "llama3:8b".to_string()], + } + ); + } + + #[test] + fn derive_setup_state_ready_falls_back_to_first_when_persisted_gone() { + let state = derive_model_setup_state( + Ok(vec!["gemma4:e4b".to_string(), "llama3:8b".to_string()]), + Some("removed-model:7b"), + "gemma4:e2b", + ); + assert_eq!( + state, + ModelSetupState::Ready { + active_slug: "gemma4:e4b".to_string(), + installed: vec!["gemma4:e4b".to_string(), "llama3:8b".to_string()], + } + ); + } + + #[test] + fn derive_setup_state_ready_uses_first_when_no_persisted_choice() { + // First-time user who somehow has models installed already (rare: + // they used Ollama for something else first). Pick the first. + let state = + derive_model_setup_state(Ok(vec!["qwen2.5:7b".to_string()]), None, "gemma4:e2b"); + assert_eq!( + state, + ModelSetupState::Ready { + active_slug: "qwen2.5:7b".to_string(), + installed: vec!["qwen2.5:7b".to_string()], + } + ); + } + + #[test] + fn model_setup_state_serializes_with_state_tag_for_frontend() { + // Wire format must be discriminated on a `state` field so the + // React side can route on a single string before pattern-matching + // payload shape. Drift here breaks the frontend dispatch. + let unreachable = serde_json::to_value(ModelSetupState::OllamaUnreachable).unwrap(); + assert_eq!( + unreachable, + serde_json::json!({"state": "ollama_unreachable"}) + ); + + let none = serde_json::to_value(ModelSetupState::NoModelsInstalled).unwrap(); + assert_eq!(none, serde_json::json!({"state": "no_models_installed"})); + + let ready = serde_json::to_value(ModelSetupState::Ready { + active_slug: "gemma4:e2b".to_string(), + installed: vec!["gemma4:e2b".to_string()], + }) + .unwrap(); + assert_eq!( + ready, + serde_json::json!({ + "state": "ready", + "active_slug": "gemma4:e2b", + "installed": ["gemma4:e2b"], + }) + ); + } } diff --git a/src-tauri/src/onboarding.rs b/src-tauri/src/onboarding.rs index 18b1bc5a..497a47ad 100644 --- a/src-tauri/src/onboarding.rs +++ b/src-tauri/src/onboarding.rs @@ -5,10 +5,19 @@ * persisted value in the `app_config` table. * * Stages progress linearly: - * "permissions" -> "intro" -> "complete" + * "permissions" -> "model_check" -> "intro" -> "complete" * * "permissions" is the implicit default when no value has been written yet. - * Once "complete", onboarding is never shown again regardless of permissions. + * "model_check" gates the user on having Ollama running with at least one + * installed model. Both stages are skipped on every subsequent launch once + * advanced past. Once "complete", onboarding is never shown again regardless + * of permissions or installed models. + * + * Backward compatibility: existing installs with persisted stages of + * "permissions", "intro", or "complete" all parse correctly. The new + * "model_check" value is unknown to older installs but the file format is + * forward-compatible (unknown stages fall back to Permissions, the safe + * default that re-runs the full flow). */ use rusqlite::Connection; @@ -19,18 +28,26 @@ use crate::database::{get_config, set_config}; const STAGE_KEY: &str = "onboarding_stage"; /// Serializable stage value sent to the frontend via the onboarding event. +/// +/// Variants are emitted in `snake_case` for the frontend to match the +/// `OnboardingStage` TypeScript union exactly. The persisted SQLite value +/// uses the same string form, so the on-disk format is identical to the +/// wire format. #[derive(Debug, Clone, PartialEq, serde::Serialize)] -#[serde(rename_all = "lowercase")] +#[serde(rename_all = "snake_case")] pub enum OnboardingStage { Permissions, + ModelCheck, Intro, Complete, } /// Reads the persisted onboarding stage. Returns `Permissions` if no value -/// has been written yet (i.e. first-ever launch). +/// has been written yet (first-ever launch) or if the persisted value is +/// not recognised (forward-compatible with future stage names). pub fn get_stage(conn: &Connection) -> rusqlite::Result { match get_config(conn, STAGE_KEY)?.as_deref() { + Some("model_check") => Ok(OnboardingStage::ModelCheck), Some("intro") => Ok(OnboardingStage::Intro), Some("complete") => Ok(OnboardingStage::Complete), _ => Ok(OnboardingStage::Permissions), @@ -41,6 +58,7 @@ pub fn get_stage(conn: &Connection) -> rusqlite::Result { pub fn set_stage(conn: &Connection, stage: &OnboardingStage) -> rusqlite::Result<()> { let value = match stage { OnboardingStage::Permissions => "permissions", + OnboardingStage::ModelCheck => "model_check", OnboardingStage::Intro => "intro", OnboardingStage::Complete => "complete", }; @@ -98,6 +116,13 @@ mod tests { assert_eq!(get_stage(&conn).unwrap(), OnboardingStage::Intro); } + #[test] + fn set_and_get_stage_round_trips_model_check() { + let conn = open_in_memory().unwrap(); + set_stage(&conn, &OnboardingStage::ModelCheck).unwrap(); + assert_eq!(get_stage(&conn).unwrap(), OnboardingStage::ModelCheck); + } + #[test] fn set_and_get_stage_round_trips_complete() { let conn = open_in_memory().unwrap(); @@ -105,6 +130,48 @@ mod tests { assert_eq!(get_stage(&conn).unwrap(), OnboardingStage::Complete); } + #[test] + fn get_stage_falls_back_to_permissions_on_unknown_value() { + // Forward-compat guard: if a future build wrote an unrecognised + // stage and the user downgrades, we must safely re-run the flow + // rather than panic or pick an arbitrary stage. + let conn = open_in_memory().unwrap(); + crate::database::set_config(&conn, STAGE_KEY, "future_stage").unwrap(); + assert_eq!(get_stage(&conn).unwrap(), OnboardingStage::Permissions); + } + + #[test] + fn compute_startup_stage_shows_model_check_when_stage_is_model_check() { + let conn = open_in_memory().unwrap(); + set_stage(&conn, &OnboardingStage::ModelCheck).unwrap(); + assert_eq!( + compute_startup_stage(&conn).unwrap(), + Some(OnboardingStage::ModelCheck) + ); + } + + #[test] + fn stage_serializes_to_snake_case_for_frontend() { + // Wire format must match the TypeScript OnboardingStage union exactly. + // Frontend routes on these strings, so any drift breaks the dispatch. + assert_eq!( + serde_json::to_string(&OnboardingStage::Permissions).unwrap(), + "\"permissions\"" + ); + assert_eq!( + serde_json::to_string(&OnboardingStage::ModelCheck).unwrap(), + "\"model_check\"" + ); + assert_eq!( + serde_json::to_string(&OnboardingStage::Intro).unwrap(), + "\"intro\"" + ); + assert_eq!( + serde_json::to_string(&OnboardingStage::Complete).unwrap(), + "\"complete\"" + ); + } + #[test] fn set_stage_overwrites_previous_value() { let conn = open_in_memory().unwrap(); From b9f28b65439965295a16c3fe3278b4da0b500b03 Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Sat, 25 Apr 2026 03:29:31 -0500 Subject: [PATCH 25/42] feat(onboarding): ModelCheckStep frontend (Phase 3) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Frontend half of the Phase 3 model-check gate. Renders the unified two-step "Set up your local AI" screen approved 2026-04-25 in the design-shotgun board, reusing the StepCard pattern from PermissionsStep so the user immediately recognises the visual rhythm they just walked through one screen prior. Solves the "one last thing... wait, ANOTHER last thing" UX failure of the earlier two-screen exploration. Frontend changes: - view/onboarding/_shared.tsx extracts the StepCard + Badge components out of PermissionsStep so ModelCheckStep can reuse them without duplication. Token values (active orange glow, done green border, waiting white border) live in this single file. PermissionsStep now imports from here. - view/onboarding/ModelCheckStep.tsx is the new gate. Probes Ollama via the backend check_model_setup command on mount; the same probe fires on every Re-check button click (no background polling, the user is the trigger). Three render paths: * Probing: panel chrome + "Checking your local Ollama setup…", step cards hidden so a brief flash never shows the wrong CTA. * Ollama unreachable: Step 1 active with brew install + open -a Ollama affordances; Step 2 waiting. * No models: Step 1 done with green Connected badge; Step 2 active with the curated 3-model list (gemma4:e2b recommended, llama3:8b, qwen2.5:7b). Each card has a Copy button that writes the ollama pull command to the clipboard. * Ready: never paints the gate. The component fires advance_past_model_check; the backend re-emits the onboarding event and OnboardingView swaps to IntroStep. IPC failures are treated as Ollama unreachable so the user always sees a recovery path. Clipboard write failures are swallowed silently because the terminal command remains visible for manual copy. - view/onboarding/index.tsx adds 'model_check' to the OnboardingStage union and routes to ModelCheckStep when the backend reports that stage. The union string matches the Rust enum's snake_case wire format exactly. Tests: - 11 ModelCheckStep tests covering both render branches, the Ready fast-path advance, IPC failure recovery, Re-check re-probe + state transition, the in-flight Re-check no-op guard (prevents double fires), all three clipboard targets (brew install, open Ollama, ollama pull ), silent clipboard failure swallowing, and the privacy footer. - 1 OnboardingView test covering the new model_check route. - All 812 frontend tests pass; eslint, prettier, tsc all clean. Build: - bun run validate-build green: lint + format + typecheck + release bundle. Tauri release build succeeded. Signed-off-by: Logan Nguyen --- src/view/onboarding/ModelCheckStep.tsx | 711 ++++++++++++++++++ src/view/onboarding/PermissionsStep.tsx | 70 +- .../__tests__/ModelCheckStep.test.tsx | 236 ++++++ src/view/onboarding/__tests__/index.test.tsx | 6 + src/view/onboarding/_shared.tsx | 100 +++ src/view/onboarding/index.tsx | 17 +- 6 files changed, 1069 insertions(+), 71 deletions(-) create mode 100644 src/view/onboarding/ModelCheckStep.tsx create mode 100644 src/view/onboarding/__tests__/ModelCheckStep.test.tsx create mode 100644 src/view/onboarding/_shared.tsx diff --git a/src/view/onboarding/ModelCheckStep.tsx b/src/view/onboarding/ModelCheckStep.tsx new file mode 100644 index 00000000..e7d38a59 --- /dev/null +++ b/src/view/onboarding/ModelCheckStep.tsx @@ -0,0 +1,711 @@ +/** + * Onboarding step that gates the chat overlay on a working local Ollama + * setup with at least one installed model. + * + * Mounts after PermissionsStep clears all macOS grants and before + * IntroStep runs. Probes the daemon via the `check_model_setup` Tauri + * command, then renders one of three states: + * + * - Ollama unreachable: Step 1 is the active card with install / + * start affordances; Step 2 is the waiting card. + * - No models installed: Step 1 collapses to a green Connected + * badge; Step 2 is the active card with the recommended-model list. + * - Ready: never visible. The component fires `advance_past_model_check` + * and the parent OnboardingView replaces it with IntroStep before + * the next paint. + * + * Every state share the same screen, the same StepCard pattern, and the + * same Re-check CTA, so the user is never surprised by a "wait, ANOTHER + * last thing" tagline. + */ + +import { motion } from 'framer-motion'; +import type React from 'react'; +import { useState, useEffect, useRef, useCallback } from 'react'; +import { invoke } from '@tauri-apps/api/core'; +import thukiLogo from '../../../src-tauri/icons/128x128.png'; +import { StepCard, Badge } from './_shared'; + +/** + * Wire-format payload returned by the `check_model_setup` Tauri command. + * + * Discriminated on `state` to match the Rust `ModelSetupState` enum + * exactly. Frontend routes solely on the `state` string; the optional + * `active_slug` and `installed` fields are present only on `ready`. + */ +type ModelSetupState = + | { state: 'ollama_unreachable' } + | { state: 'no_models_installed' } + | { state: 'ready'; active_slug: string; installed: string[] }; + +/** + * Recommended starter models surfaced in the Step 2 card. + * + * The list is intentionally short and curated: three options is enough + * for new users to feel they have a real choice without forcing them + * to research model trade-offs. Sourced from the design approved + * 2026-04-25 in `~/.gstack/projects/.../approved.json`. + */ +const RECOMMENDED_MODELS: Array<{ + slug: string; + description: string; + recommended?: boolean; +}> = [ + { + slug: 'gemma4:e2b', + description: 'Lightweight all-rounder · 1.6 GB', + recommended: true, + }, + { slug: 'llama3:8b', description: 'Stronger reasoning · 4.7 GB' }, + { slug: 'qwen2.5:7b', description: 'Code-focused · 4.4 GB' }, +]; + +/** + * Builds the `ollama pull ` command for a given model. Centralised + * so the copy-button affordance and the (future) settings panel share + * the same string and cannot drift. + */ +function buildPullCommand(slug: string): string { + return `ollama pull ${slug}`; +} + +/** Copies a string to the macOS clipboard, ignoring failures silently. */ +async function copyToClipboard(text: string): Promise { + try { + await navigator.clipboard.writeText(text); + } catch { + // Clipboard write can fail on locked sessions or denied permissions; + // there is no recovery and showing an error here would be more + // confusing than silent. The terminal command remains visible for + // the user to copy manually. + } +} + +/** + * Renders the model-check onboarding gate. + * + * Probes Ollama once on mount and again on every Re-check click. No + * background polling: the user is the trigger, which keeps idle CPU + * and IPC traffic at zero between explicit interactions. + * + * Takes no props. Parent OnboardingView routes here when the persisted + * stage is `model_check`. Stage advance to `intro` is owned by the + * backend `advance_past_model_check` command, fired from inside this + * component when the probe reports `Ready`. The backend re-emits the + * onboarding event with the new stage so the parent re-routes without + * a window flicker. + */ +export function ModelCheckStep() { + const [setupState, setSetupState] = useState(null); + const [isRechecking, setIsRechecking] = useState(false); + const mountedRef = useRef(true); + + /** + * Probes Ollama via the backend command and either advances the + * onboarding stage (Ready) or stores the gate state for rendering. + * + * Idempotent: safe to call repeatedly. The backend handles persisting + * the resolved active slug; this hook only routes UI state. + */ + const probe = useCallback(async () => { + try { + const next = await invoke('check_model_setup'); + if (!mountedRef.current) return; + if (next.state === 'ready') { + // Fire-and-forget: the backend emits the onboarding event with + // the new stage, which OnboardingView routes to IntroStep. + await invoke('advance_past_model_check'); + return; + } + setSetupState(next); + } catch { + // Treat any IPC failure as Ollama unreachable so the user sees a + // recovery path. The next Re-check click will retry. + if (!mountedRef.current) return; + setSetupState({ state: 'ollama_unreachable' }); + } + }, []); + + useEffect(() => { + mountedRef.current = true; + void probe(); + return () => { + mountedRef.current = false; + }; + }, [probe]); + + const handleRecheck = useCallback(async () => { + if (isRechecking) return; + setIsRechecking(true); + try { + await probe(); + } finally { + if (mountedRef.current) { + setIsRechecking(false); + } + } + }, [isRechecking, probe]); + + const ollamaConnected = setupState?.state === 'no_models_installed'; + const isWaitingForOllama = setupState?.state === 'ollama_unreachable'; + const isProbing = setupState === null; + const stepOneActive = isWaitingForOllama; + const stepOneDone = ollamaConnected; + const stepTwoActive = ollamaConnected; + + return ( +
+ + {/* Top edge highlight, identical to PermissionsStep / IntroStep. */} +
+ +
+ Thuki +
+ +

+ Set up your local AI +

+

+ {isProbing + ? 'Checking your local Ollama setup…' + : ollamaConnected + ? 'Almost there. Pick a model so Thuki has something to think with. You can always switch to a different model later.' + : 'Two quick things and you are in. Thuki runs Ollama locally so your conversations never leave this machine.'} +

+ +
+ + + + + + {stepOneDone ? Connected : null} + + + {stepOneActive ? ( + + void copyToClipboard('brew install ollama')} + buttonLabel="Copy" + buttonGlyph={} + /> + void copyToClipboard('open -a Ollama')} + buttonLabel="Copy" + buttonGlyph={} + /> + + ) : null} + + + + + + + + + {stepTwoActive ? ( +
+ {RECOMMENDED_MODELS.map((m) => ( + void copyToClipboard(buildPullCommand(m.slug))} + /> + ))} +
+ ) : null} +
+ + + +

+ Private by default · All inference runs on your machine +

+ +
+ ); +} + +// ─── Sub-components ────────────────────────────────────────────────────────── + +type Variant = 'active' | 'done' | 'waiting'; + +interface StepIconProps { + variant: Variant; + children: React.ReactNode; +} + +function StepIcon({ variant, children }: StepIconProps) { + const palette: Record< + Variant, + { bg: string; border: string; color: string } + > = { + active: { + bg: 'rgba(255,141,92,0.12)', + border: 'rgba(255,141,92,0.25)', + color: '#ff8d5c', + }, + done: { + bg: 'rgba(34,197,94,0.12)', + border: 'rgba(34,197,94,0.2)', + color: '#22c55e', + }, + waiting: { + bg: 'rgba(255,255,255,0.04)', + border: 'rgba(255,255,255,0.08)', + color: 'rgba(255,255,255,0.4)', + }, + }; + const p = palette[variant]; + return ( +
+ {children} +
+ ); +} + +interface StepTextProps { + eyebrow: string; + eyebrowVariant: Variant; + title: string; + titleMuted: boolean; +} + +function StepText({ + eyebrow, + eyebrowVariant, + title, + titleMuted, +}: StepTextProps) { + const eyebrowColor: Record = { + active: 'rgba(255,141,92,0.8)', + done: 'rgba(34,197,94,0.8)', + waiting: 'rgba(255,255,255,0.4)', + }; + return ( +
+
+ {eyebrow} +
+

+ {title} +

+
+ ); +} + +function ActionRow({ children }: { children: React.ReactNode }) { + return ( +
+ {children} +
+ ); +} + +interface ActionCardProps { + title: string; + desc: string; + primary?: boolean; + onClick: () => void; + buttonLabel: string; + buttonGlyph: React.ReactNode; +} + +function ActionCard({ + title, + desc, + primary, + onClick, + buttonLabel, + buttonGlyph, +}: ActionCardProps) { + return ( +
+
+

+ {title} +

+

+ {desc} +

+
+ +
+ ); +} + +interface ModelCardProps { + slug: string; + description: string; + recommended: boolean; + onCopy: () => void; +} + +function ModelCard({ slug, description, recommended, onCopy }: ModelCardProps) { + return ( +
+
+ {recommended ? ( +
+ RECOMMENDED +
+ ) : null} +

+ {slug} +

+

+ {description} +

+
+ +
+ ); +} + +// ─── Glyphs ────────────────────────────────────────────────────────────────── + +function ShieldCheckGlyph() { + return ( + + + + + ); +} + +function CubeGlyph() { + return ( + + + + + ); +} + +function CopyGlyph() { + return ( + + + + + ); +} diff --git a/src/view/onboarding/PermissionsStep.tsx b/src/view/onboarding/PermissionsStep.tsx index e8cd5f40..b49c4408 100644 --- a/src/view/onboarding/PermissionsStep.tsx +++ b/src/view/onboarding/PermissionsStep.tsx @@ -3,6 +3,7 @@ import type React from 'react'; import { useState, useEffect, useRef, useCallback } from 'react'; import { invoke } from '@tauri-apps/api/core'; import thukiLogo from '../../../src-tauri/icons/128x128.png'; +import { StepCard, Badge } from './_shared'; /** How often to poll for permission grants after the user requests them. */ const POLL_INTERVAL_MS = 500; @@ -541,72 +542,3 @@ function CTAButton({ ); } - -interface StepCardProps { - active: boolean; - done: boolean; - children: React.ReactNode; -} - -function StepCard({ active, done, children }: StepCardProps) { - const borderColor = done - ? 'rgba(34,197,94,0.2)' - : active - ? 'rgba(255,141,92,0.4)' - : 'rgba(255,255,255,0.06)'; - - const background = done - ? 'rgba(34,197,94,0.05)' - : active - ? 'rgba(255,141,92,0.07)' - : 'rgba(255,255,255,0.03)'; - - return ( -
- {children} -
- ); -} - -interface BadgeProps { - color: 'green'; - children: React.ReactNode; -} - -function Badge({ color, children }: BadgeProps) { - const styles: Record = { - green: { - color: '#22c55e', - background: 'rgba(34,197,94,0.1)', - border: '1px solid rgba(34,197,94,0.2)', - }, - }; - - return ( - - {children} - - ); -} diff --git a/src/view/onboarding/__tests__/ModelCheckStep.test.tsx b/src/view/onboarding/__tests__/ModelCheckStep.test.tsx new file mode 100644 index 00000000..e64301cb --- /dev/null +++ b/src/view/onboarding/__tests__/ModelCheckStep.test.tsx @@ -0,0 +1,236 @@ +import { + render, + screen, + fireEvent, + act, + waitFor, +} from '@testing-library/react'; +import { describe, it, expect, beforeEach, beforeAll, vi } from 'vitest'; +import { ModelCheckStep } from '../ModelCheckStep'; +import { + invoke, + enableChannelCaptureWithResponses, +} from '../../../testUtils/mocks/tauri'; + +const READY_RESPONSE = { + state: 'ready', + active_slug: 'gemma4:e2b', + installed: ['gemma4:e2b'], +}; + +// happy-dom does not provide navigator.clipboard, and Object.defineProperty +// on Navigator can collide with property descriptors set elsewhere in the +// test suite. Stash a single writeText spy on a permissive shape so each +// test can assert on it without redefining the host property. +const writeText = vi.fn().mockResolvedValue(undefined); + +beforeAll(() => { + if (!('clipboard' in navigator)) { + Object.defineProperty(navigator, 'clipboard', { + configurable: true, + writable: true, + value: { writeText }, + }); + } else { + Object.assign(navigator.clipboard, { writeText }); + } +}); + +describe('ModelCheckStep', () => { + beforeEach(() => { + invoke.mockClear(); + writeText.mockReset(); + writeText.mockResolvedValue(undefined); + }); + + it('shows Step 1 active and Step 2 waiting on Ollama unreachable', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'ollama_unreachable' }, + }); + + render(); + await act(async () => {}); + + expect(screen.getByText('Set up your local AI')).toBeInTheDocument(); + expect(screen.getByText('Install & start Ollama')).toBeInTheDocument(); + expect(screen.getByText('STEP 1 · ACTION NEEDED')).toBeInTheDocument(); + expect(screen.getByText('STEP 2 · WAITING')).toBeInTheDocument(); + expect(screen.getByText('brew install ollama')).toBeInTheDocument(); + expect(screen.getByText('open -a Ollama')).toBeInTheDocument(); + }); + + it('shows Step 1 done and Step 2 active on no_models_installed', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'no_models_installed' }, + }); + + render(); + await act(async () => {}); + + expect(screen.getByText('Ollama is running')).toBeInTheDocument(); + expect(screen.getByText('Connected')).toBeInTheDocument(); + expect(screen.getByText('STEP 1 · DONE')).toBeInTheDocument(); + expect(screen.getByText('STEP 2 · ACTION NEEDED')).toBeInTheDocument(); + expect(screen.getByText('gemma4:e2b')).toBeInTheDocument(); + expect(screen.getByText('llama3:8b')).toBeInTheDocument(); + expect(screen.getByText('qwen2.5:7b')).toBeInTheDocument(); + expect(screen.getByText('RECOMMENDED')).toBeInTheDocument(); + }); + + it('fires advance_past_model_check when Ready', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: READY_RESPONSE, + advance_past_model_check: undefined, + }); + + render(); + await act(async () => {}); + + await waitFor(() => { + expect(invoke).toHaveBeenCalledWith('advance_past_model_check'); + }); + }); + + it('treats IPC failure as Ollama unreachable so the user sees a recovery path', async () => { + invoke.mockRejectedValueOnce(new Error('ipc broken')); + + render(); + await act(async () => {}); + + expect(screen.getByText('Install & start Ollama')).toBeInTheDocument(); + expect(screen.getByText('STEP 1 · ACTION NEEDED')).toBeInTheDocument(); + }); + + it('Re-check button re-runs the probe and updates state', async () => { + let calls = 0; + invoke.mockImplementation(async (name: string) => { + if (name === 'check_model_setup') { + calls += 1; + return calls === 1 + ? { state: 'ollama_unreachable' } + : { state: 'no_models_installed' }; + } + return undefined; + }); + + render(); + await act(async () => {}); + + expect(screen.getByText('Install & start Ollama')).toBeInTheDocument(); + + await act(async () => { + fireEvent.click(screen.getByLabelText('Re-check setup')); + }); + + expect(screen.getByText('Ollama is running')).toBeInTheDocument(); + expect(screen.getByText('Connected')).toBeInTheDocument(); + }); + + it('Re-check button is no-op while a probe is in flight', async () => { + let probeCalls = 0; + let resolveSecond: (value: unknown) => void = () => {}; + invoke.mockImplementation(async (name: string) => { + if (name === 'check_model_setup') { + probeCalls += 1; + if (probeCalls === 1) return { state: 'ollama_unreachable' }; + return new Promise((resolve) => { + resolveSecond = resolve; + }); + } + return undefined; + }); + + render(); + await act(async () => {}); + + await act(async () => { + fireEvent.click(screen.getByLabelText('Re-check setup')); + }); + expect(probeCalls).toBe(2); + + await act(async () => { + fireEvent.click(screen.getByLabelText('Re-check setup')); + }); + expect(probeCalls).toBe(2); + + await act(async () => { + resolveSecond({ state: 'no_models_installed' }); + }); + }); + + it('copies brew install command when Install Ollama copy button is clicked', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'ollama_unreachable' }, + }); + + render(); + await act(async () => {}); + + await act(async () => { + fireEvent.click(screen.getByLabelText('Copy install ollama command')); + }); + expect(writeText).toHaveBeenCalledWith('brew install ollama'); + }); + + it('copies open command when Already installed copy button is clicked', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'ollama_unreachable' }, + }); + + render(); + await act(async () => {}); + + await act(async () => { + fireEvent.click(screen.getByLabelText('Copy already installed? command')); + }); + expect(writeText).toHaveBeenCalledWith('open -a Ollama'); + }); + + it('copies the pull command for a recommended model', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'no_models_installed' }, + }); + + render(); + await act(async () => {}); + + await act(async () => { + fireEvent.click( + screen.getByLabelText('Copy install command for llama3:8b'), + ); + }); + expect(writeText).toHaveBeenCalledWith('ollama pull llama3:8b'); + }); + + it('swallows clipboard write errors silently', async () => { + writeText.mockReset(); + writeText.mockRejectedValue(new Error('denied')); + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'ollama_unreachable' }, + }); + + render(); + await act(async () => {}); + + await expect( + act(async () => { + fireEvent.click(screen.getByLabelText('Copy install ollama command')); + }), + ).resolves.not.toThrow(); + }); + + it('renders the privacy footer', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'ollama_unreachable' }, + }); + + render(); + await act(async () => {}); + + expect( + screen.getByText( + 'Private by default · All inference runs on your machine', + ), + ).toBeInTheDocument(); + }); +}); diff --git a/src/view/onboarding/__tests__/index.test.tsx b/src/view/onboarding/__tests__/index.test.tsx index 751fe408..59f8ac73 100644 --- a/src/view/onboarding/__tests__/index.test.tsx +++ b/src/view/onboarding/__tests__/index.test.tsx @@ -19,4 +19,10 @@ describe('OnboardingView (orchestrator)', () => { render(); expect(screen.getByText('Before you dive in')).toBeInTheDocument(); }); + + it('renders ModelCheckStep when stage is model_check', async () => { + render(); + await act(async () => {}); + expect(screen.getByText('Set up your local AI')).toBeInTheDocument(); + }); }); diff --git a/src/view/onboarding/_shared.tsx b/src/view/onboarding/_shared.tsx new file mode 100644 index 00000000..68b82ad7 --- /dev/null +++ b/src/view/onboarding/_shared.tsx @@ -0,0 +1,100 @@ +/** + * Shared building blocks for onboarding steps. + * + * Extracted from PermissionsStep so ModelCheckStep (and any future + * onboarding screen) can reuse the same active / done / waiting visual + * language. The token values here are the source of truth for the + * onboarding visual system; do not duplicate them inline in step + * components. + */ + +import type React from 'react'; + +export interface StepCardProps { + /** Orange-glow treatment indicating the user must act on this step now. */ + active: boolean; + /** Green-tinted "done" treatment with a thin success border. */ + done: boolean; + children: React.ReactNode; +} + +/** + * Container that applies the onboarding step visual treatment. + * + * Three mutually exclusive states: + * - done: green border + green-tint background, no glow. + * - active && !done: warm orange border + orange-tint background + + * soft outer glow + 1px inner top highlight. + * - !active && !done: subtle white border + faint white-tint + * background, no glow. Used for "waiting" steps that the user + * cannot act on yet. + */ +export function StepCard({ active, done, children }: StepCardProps) { + const borderColor = done + ? 'rgba(34,197,94,0.2)' + : active + ? 'rgba(255,141,92,0.4)' + : 'rgba(255,255,255,0.06)'; + + const background = done + ? 'rgba(34,197,94,0.05)' + : active + ? 'rgba(255,141,92,0.07)' + : 'rgba(255,255,255,0.03)'; + + return ( +
+ {children} +
+ ); +} + +export interface BadgeProps { + color: 'green'; + children: React.ReactNode; +} + +/** + * Inline status pill rendered to the right of a done step's title. + * + * Single-color today (`green` for the success / connected state). Add + * new colors as discrete variants rather than accepting arbitrary CSS, + * which keeps the badge palette under one rule. + */ +export function Badge({ color, children }: BadgeProps) { + const styles: Record = { + green: { + color: '#22c55e', + background: 'rgba(34,197,94,0.1)', + border: '1px solid rgba(34,197,94,0.2)', + }, + }; + + return ( + + {children} + + ); +} diff --git a/src/view/onboarding/index.tsx b/src/view/onboarding/index.tsx index 9b2987e5..c5a20042 100644 --- a/src/view/onboarding/index.tsx +++ b/src/view/onboarding/index.tsx @@ -1,7 +1,13 @@ import { IntroStep } from './IntroStep'; +import { ModelCheckStep } from './ModelCheckStep'; import { PermissionsStep } from './PermissionsStep'; -export type OnboardingStage = 'permissions' | 'intro'; +/** + * Stage values mirror the Rust `OnboardingStage` enum exactly. The + * backend emits these strings as the `stage` field on the + * `thuki://onboarding` event; any drift here breaks the dispatch. + */ +export type OnboardingStage = 'permissions' | 'model_check' | 'intro'; interface Props { stage: OnboardingStage; @@ -14,7 +20,7 @@ interface Props { * Renders the correct step based on the persisted onboarding stage emitted * by the backend at startup. The stage advances on the backend: * - * permissions -> (quit+reopen) -> intro -> complete (normal app) + * permissions -> (quit+reopen) -> model_check -> (advance) -> intro -> complete * * When stage is "complete" the backend never emits the onboarding event, * so this component is never rendered. @@ -23,5 +29,12 @@ export function OnboardingView({ stage, onComplete }: Props) { if (stage === 'intro') { return ; } + if (stage === 'model_check') { + // ModelCheckStep advances to `intro` via the backend + // `advance_past_model_check` command, which re-emits the onboarding + // event. No callback wiring needed here. + void onComplete; // referenced for parity; unused by ModelCheckStep + return ; + } return ; } From dceb61f1f9efb712d06207effdf25998367cfd14 Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Sat, 25 Apr 2026 14:36:03 -0500 Subject: [PATCH 26/42] feat(onboarding): redesign ModelCheckStep with rail layout + verified models MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Vertical timeline rail spans both steps with numbered nodes. Step 1 hosts a two-tab install hero (Install Ollama / Already Installed?) with a fixed-height code box that stays stable across tab switches. Step 2 lists three multimodal starter models verified against ollama.com/library: gemma4:e4b (Google), llama3.2-vision:11b (Meta), and phi4:14b (Microsoft). Other UX polish: - Step 1 done state shows "Listening on 127.0.0.1:11434" + "live" badge - Centered helper sub-line under code box ("Paste this in Terminal or visit Ollama docs") - Step 2 helper block: "Paste the command in Terminal / or / Browse all models on ollama.com" linking to ollama.com/search - Each model slug is a clickable link that opens the model's library page in the user's default browser - Icon-only copy button no longer expands on success (border flips green) - "Re-check setup" renamed to "Verify setup" / "Verifying…" 100% frontend coverage maintained; 831 tests pass. Signed-off-by: Logan Nguyen --- src/view/onboarding/ModelCheckStep.tsx | 1039 ++++++++++------- .../__tests__/ModelCheckStep.test.tsx | 468 +++++++- 2 files changed, 1072 insertions(+), 435 deletions(-) diff --git a/src/view/onboarding/ModelCheckStep.tsx b/src/view/onboarding/ModelCheckStep.tsx index e7d38a59..4701638f 100644 --- a/src/view/onboarding/ModelCheckStep.tsx +++ b/src/view/onboarding/ModelCheckStep.tsx @@ -2,125 +2,120 @@ * Onboarding step that gates the chat overlay on a working local Ollama * setup with at least one installed model. * - * Mounts after PermissionsStep clears all macOS grants and before - * IntroStep runs. Probes the daemon via the `check_model_setup` Tauri - * command, then renders one of three states: + * Layout: + * - Vertical timeline rail with numbered nodes connected by a thin line. + * - Step 1 active shows a single title row, then a two-tab install hero + * (Install Ollama / Already Installed?) above a single code box that + * swaps its command per tab. A short sub-line below the box invites + * the user to paste the command or visit the Ollama docs. + * - Step 2 active hosts a compact list of starter models, all rendered + * equal — no badge, no hierarchy. The user picks whichever fits. * - * - Ollama unreachable: Step 1 is the active card with install / - * start affordances; Step 2 is the waiting card. - * - No models installed: Step 1 collapses to a green Connected - * badge; Step 2 is the active card with the recommended-model list. - * - Ready: never visible. The component fires `advance_past_model_check` - * and the parent OnboardingView replaces it with IntroStep before - * the next paint. - * - * Every state share the same screen, the same StepCard pattern, and the - * same Re-check CTA, so the user is never surprised by a "wait, ANOTHER - * last thing" tagline. + * Probes Ollama via the `check_model_setup` Tauri command on mount and on + * every Re-check click. Background polling is intentionally absent so + * idle CPU and IPC stay at zero between explicit user actions. */ -import { motion } from 'framer-motion'; +import { AnimatePresence, motion } from 'framer-motion'; import type React from 'react'; import { useState, useEffect, useRef, useCallback } from 'react'; import { invoke } from '@tauri-apps/api/core'; import thukiLogo from '../../../src-tauri/icons/128x128.png'; -import { StepCard, Badge } from './_shared'; +import { Badge } from './_shared'; + +const OLLAMA_DOCS_URL = 'https://ollama.com/download'; +const OLLAMA_SEARCH_URL = 'https://ollama.com/search'; +const OLLAMA_LISTEN_ADDR = '127.0.0.1:11434'; -/** - * Wire-format payload returned by the `check_model_setup` Tauri command. - * - * Discriminated on `state` to match the Rust `ModelSetupState` enum - * exactly. Frontend routes solely on the `state` string; the optional - * `active_slug` and `installed` fields are present only on `ready`. - */ type ModelSetupState = | { state: 'ollama_unreachable' } | { state: 'no_models_installed' } | { state: 'ready'; active_slug: string; installed: string[] }; +interface InstallTab { + id: string; + label: string; + command: string; +} + /** - * Recommended starter models surfaced in the Step 2 card. - * - * The list is intentionally short and curated: three options is enough - * for new users to feel they have a real choice without forcing them - * to research model trade-offs. Sourced from the design approved - * 2026-04-25 in `~/.gstack/projects/.../approved.json`. + * Install routes shown above the Step 1 code box. The first entry is the + * default selection. `command` is the exact string copied to the + * clipboard when the copy pill is clicked. + */ +const INSTALL_TABS: InstallTab[] = [ + { + id: 'install', + label: 'Install Ollama', + command: 'curl -fsSL https://ollama.com/install.sh | sh', + }, + { + id: 'already-installed', + label: 'Already Installed?', + command: 'open -a Ollama', + }, +]; + +/** + * Starter models offered in Step 2. All entries support text and image + * input (vision / multimodal). Sizes are pulled from the official Ollama + * library (ollama.com/library) and reflect the default tag at time of + * authoring. All entries are intentionally peers — no recommended + * badge — so the user picks whichever fits their hardware. */ -const RECOMMENDED_MODELS: Array<{ +const STARTER_MODELS: Array<{ slug: string; description: string; - recommended?: boolean; + size: string; }> = [ + { slug: 'gemma4:e4b', description: 'Google · vision', size: '9.6 GB' }, { - slug: 'gemma4:e2b', - description: 'Lightweight all-rounder · 1.6 GB', - recommended: true, + slug: 'llama3.2-vision:11b', + description: 'Meta · vision', + size: '7.8 GB', }, - { slug: 'llama3:8b', description: 'Stronger reasoning · 4.7 GB' }, - { slug: 'qwen2.5:7b', description: 'Code-focused · 4.4 GB' }, + { slug: 'phi4:14b', description: 'Microsoft · text', size: '9.1 GB' }, ]; /** - * Builds the `ollama pull ` command for a given model. Centralised - * so the copy-button affordance and the (future) settings panel share - * the same string and cannot drift. + * Builds the public Ollama library URL for a model slug. Drops the `:tag` + * suffix so the destination shows every available variant rather than + * pinning the user to one quantisation. Both `gemma4` and `gemma4:e4b` + * resolve, but the bare-name URL is the more useful landing. */ +function buildOllamaLibraryUrl(slug: string): string { + const base = slug.split(':')[0]; + return `https://ollama.com/library/${base}`; +} + function buildPullCommand(slug: string): string { return `ollama pull ${slug}`; } -/** Copies a string to the macOS clipboard, ignoring failures silently. */ -async function copyToClipboard(text: string): Promise { +async function copyToClipboard(text: string): Promise { try { await navigator.clipboard.writeText(text); + return true; } catch { - // Clipboard write can fail on locked sessions or denied permissions; - // there is no recovery and showing an error here would be more - // confusing than silent. The terminal command remains visible for - // the user to copy manually. + return false; } } -/** - * Renders the model-check onboarding gate. - * - * Probes Ollama once on mount and again on every Re-check click. No - * background polling: the user is the trigger, which keeps idle CPU - * and IPC traffic at zero between explicit interactions. - * - * Takes no props. Parent OnboardingView routes here when the persisted - * stage is `model_check`. Stage advance to `intro` is owned by the - * backend `advance_past_model_check` command, fired from inside this - * component when the probe reports `Ready`. The backend re-emits the - * onboarding event with the new stage so the parent re-routes without - * a window flicker. - */ export function ModelCheckStep() { const [setupState, setSetupState] = useState(null); const [isRechecking, setIsRechecking] = useState(false); const mountedRef = useRef(true); - /** - * Probes Ollama via the backend command and either advances the - * onboarding stage (Ready) or stores the gate state for rendering. - * - * Idempotent: safe to call repeatedly. The backend handles persisting - * the resolved active slug; this hook only routes UI state. - */ const probe = useCallback(async () => { try { const next = await invoke('check_model_setup'); if (!mountedRef.current) return; if (next.state === 'ready') { - // Fire-and-forget: the backend emits the onboarding event with - // the new stage, which OnboardingView routes to IntroStep. await invoke('advance_past_model_check'); return; } setSetupState(next); } catch { - // Treat any IPC failure as Ollama unreachable so the user sees a - // recovery path. The next Re-check click will retry. if (!mountedRef.current) return; setSetupState({ state: 'ollama_unreachable' }); } @@ -135,7 +130,6 @@ export function ModelCheckStep() { }, [probe]); const handleRecheck = useCallback(async () => { - if (isRechecking) return; setIsRechecking(true); try { await probe(); @@ -144,14 +138,17 @@ export function ModelCheckStep() { setIsRechecking(false); } } - }, [isRechecking, probe]); + }, [probe]); const ollamaConnected = setupState?.state === 'no_models_installed'; const isWaitingForOllama = setupState?.state === 'ollama_unreachable'; const isProbing = setupState === null; - const stepOneActive = isWaitingForOllama; - const stepOneDone = ollamaConnected; - const stepTwoActive = ollamaConnected; + + const titleSub = isProbing + ? 'Checking your local Ollama setup…' + : ollamaConnected + ? "Almost there. Let's pick a brain for Thuki." + : 'Runs Ollama locally. Your chats stay on this machine.'; return (
Thuki Set up your local AI @@ -227,122 +224,32 @@ export function ModelCheckStep() {

- {isProbing - ? 'Checking your local Ollama setup…' - : ollamaConnected - ? 'Almost there. Pick a model so Thuki has something to think with. You can always switch to a different model later.' - : 'Two quick things and you are in. Thuki runs Ollama locally so your conversations never leave this machine.'} + {titleSub}

-
- - - - - - {stepOneDone ? Connected : null} - - - {stepOneActive ? ( - - void copyToClipboard('brew install ollama')} - buttonLabel="Copy" - buttonGlyph={} - /> - void copyToClipboard('open -a Ollama')} - buttonLabel="Copy" - buttonGlyph={} - /> - - ) : null} - - - - - - - - - {stepTwoActive ? ( -
- {RECOMMENDED_MODELS.map((m) => ( - void copyToClipboard(buildPullCommand(m.slug))} - /> - ))} -
- ) : null} -
+ {!isProbing ? ( + + ) : null}

@@ -375,33 +283,82 @@ export function ModelCheckStep() { ); } -// ─── Sub-components ────────────────────────────────────────────────────────── +// ─── Rail ──────────────────────────────────────────────────────────────────── -type Variant = 'active' | 'done' | 'waiting'; +interface RailProps { + stepOneActive: boolean; + stepOneDone: boolean; + stepTwoActive: boolean; +} -interface StepIconProps { - variant: Variant; - children: React.ReactNode; +/** + * Two-step vertical timeline. The connecting line is rendered once as an + * absolute element behind the node column so it spans the full rail + * regardless of how tall each row's content grows. + */ +function Rail({ stepOneActive, stepOneDone, stepTwoActive }: RailProps) { + return ( +

+ + ); +} + +type NodeVariant = 'active' | 'done' | 'wait'; + +interface RailNodeProps { + number: number; + variant: NodeVariant; + topGap?: number; } -function StepIcon({ variant, children }: StepIconProps) { +function RailNode({ number, variant, topGap = 0 }: RailNodeProps) { const palette: Record< - Variant, + NodeVariant, { bg: string; border: string; color: string } > = { active: { - bg: 'rgba(255,141,92,0.12)', - border: 'rgba(255,141,92,0.25)', + bg: 'rgba(255,141,92,0.1)', + border: 'rgba(255,141,92,0.4)', color: '#ff8d5c', }, done: { bg: 'rgba(34,197,94,0.12)', - border: 'rgba(34,197,94,0.2)', + border: 'rgba(34,197,94,0.4)', color: '#22c55e', }, - waiting: { - bg: 'rgba(255,255,255,0.04)', - border: 'rgba(255,255,255,0.08)', + wait: { + bg: 'rgba(255,255,255,0.03)', + border: 'rgba(255,255,255,0.1)', color: 'rgba(255,255,255,0.4)', }, }; @@ -409,95 +366,271 @@ function StepIcon({ variant, children }: StepIconProps) { return (
- {children} +
+ {variant === 'done' ? '✓' : number} +
); } -interface StepTextProps { - eyebrow: string; - eyebrowVariant: Variant; - title: string; - titleMuted: boolean; +// ─── Row 1: install Ollama ─────────────────────────────────────────────────── + +interface RowOneProps { + active: boolean; + done: boolean; } -function StepText({ - eyebrow, - eyebrowVariant, - title, - titleMuted, -}: StepTextProps) { - const eyebrowColor: Record = { - active: 'rgba(255,141,92,0.8)', - done: 'rgba(34,197,94,0.8)', - waiting: 'rgba(255,255,255,0.4)', - }; +function RowOne({ active, done }: RowOneProps) { + const [selectedTabIdx, setSelectedTabIdx] = useState(0); + const tab = INSTALL_TABS[selectedTabIdx]; + return ( -
+
- {eyebrow} +
+

+ {done ? 'Ollama is running' : 'Install & start Ollama'} +

+ {done ? ( +

+ Listening on {OLLAMA_LISTEN_ADDR} +

+ ) : null} +
+ {done ? live : null}
+ + {active ? ( + <> +
+
+ {INSTALL_TABS.map((t, i) => ( + setSelectedTabIdx(i)} + /> + ))} +
+
+ + + $ + + {tab.command} + + +
+
+
+ + Paste this in Terminal or visit + + + Ollama docs ↗ + +
+ + ) : null} +
+ ); +} + +// ─── Row 2: pull a starter model ───────────────────────────────────────────── + +function RowTwo({ active }: { active: boolean }) { + return ( +

- {title} + Pull a starter model

-
- ); -} -function ActionRow({ children }: { children: React.ReactNode }) { - return ( -
- {children} + {active ? ( + <> +

+ You can swap or add more later. +

+
+ {STARTER_MODELS.map((m, i) => ( + + ))} +
+ +
+ + Paste the command in Terminal + + or + + Browse all models on ollama.com ↗ + +
+ + ) : null}
); } -interface ActionCardProps { - title: string; - desc: string; - primary?: boolean; - onClick: () => void; - buttonLabel: string; - buttonGlyph: React.ReactNode; +interface ModelRowProps { + slug: string; + description: string; + size: string; + isLast: boolean; } -function ActionCard({ - title, - desc, - primary, - onClick, - buttonLabel, - buttonGlyph, -}: ActionCardProps) { +function ModelRow({ slug, description, size, isLast }: ModelRowProps) { return (
+

- {title} -

-

- {desc} + {description} · {size}

- +
); } -interface ModelCardProps { - slug: string; - description: string; - recommended: boolean; - onCopy: () => void; +/** + * Renders the model slug as an inline button styled like text. Click + * opens the model's Ollama library page in the user's default browser + * via the `open_url` Tauri command. Hover lifts the slug to brand + * orange with a subtle underline so it reads as discoverable without + * shouting. + */ +function SlugLink({ slug }: { slug: string }) { + const [hover, setHover] = useState(false); + return ( + + ); +} + +// ─── Tab + copy + docs link ────────────────────────────────────────────────── + +interface DocsLinkProps { + ariaLabel: string; + url: string; + children: React.ReactNode; } -function ModelCard({ slug, description, recommended, onCopy }: ModelCardProps) { +function DocsLink({ ariaLabel, url, children }: DocsLinkProps) { + const [hover, setHover] = useState(false); return ( -
void invoke('open_url', { url })} + onMouseEnter={() => setHover(true)} + onMouseLeave={() => setHover(false)} + aria-label={ariaLabel} style={{ - display: 'flex', - alignItems: 'center', - justifyContent: 'space-between', - gap: 10, - padding: '10px 12px', - background: recommended - ? 'radial-gradient(ellipse 100% 100% at 0% 0%, rgba(255,141,92,0.08) 0%, transparent 70%), rgba(0,0,0,0.18)' - : 'rgba(0,0,0,0.18)', - border: `1px solid ${recommended ? 'rgba(255,141,92,0.25)' : 'rgba(255,255,255,0.06)'}`, - borderRadius: 10, + background: 'transparent', + border: 'none', + padding: 0, + fontFamily: 'inherit', + fontSize: 11, + fontWeight: 500, + color: hover ? '#ff8d5c' : 'rgba(255,141,92,0.7)', + cursor: 'pointer', + transition: 'color 160ms ease', }} > -
- {recommended ? ( -
- RECOMMENDED -
- ) : null} -

- {slug} -

-

- {description} -

-
- -
+ {children} + ); } -// ─── Glyphs ────────────────────────────────────────────────────────────────── +interface TabButtonProps { + label: string; + selected: boolean; + onClick: () => void; +} + +function TabButton({ label, selected, onClick }: TabButtonProps) { + const [hover, setHover] = useState(false); + const borderColor = selected + ? 'rgba(255, 141, 92, 0.28)' + : hover + ? 'rgba(255, 255, 255, 0.1)' + : 'transparent'; + const bg = selected + ? 'rgba(255, 141, 92, 0.1)' + : hover + ? 'rgba(255, 255, 255, 0.04)' + : 'rgba(255, 255, 255, 0.025)'; + const color = selected + ? '#ff8d5c' + : hover + ? 'rgba(255,255,255,0.85)' + : 'rgba(255,255,255,0.55)'; -function ShieldCheckGlyph() { return ( - - - - + ); } -function CubeGlyph() { +const COPIED_RESET_MS = 1500; + +interface CopyButtonProps { + command: string; + ariaLabel: string; + label?: string; + iconOnly?: boolean; +} + +function CopyButton({ + command, + ariaLabel, + label = 'Copy', + iconOnly = false, +}: CopyButtonProps) { + const [hover, setHover] = useState(false); + const [copied, setCopied] = useState(false); + const timeoutRef = useRef(null); + + useEffect(() => { + return () => { + if (timeoutRef.current !== null) { + window.clearTimeout(timeoutRef.current); + } + }; + }, []); + + const handleClick = useCallback(async () => { + const ok = await copyToClipboard(command); + if (!ok) return; + setCopied(true); + if (timeoutRef.current !== null) { + window.clearTimeout(timeoutRef.current); + } + timeoutRef.current = window.setTimeout(() => { + setCopied(false); + timeoutRef.current = null; + }, COPIED_RESET_MS); + }, [command]); + + const borderColor = copied + ? 'rgba(34,197,94,0.55)' + : hover + ? 'rgba(255,141,92,0.55)' + : 'rgba(255,255,255,0.12)'; + const labelColor = + hover || copied ? 'rgba(255,255,255,0.95)' : 'rgba(255,255,255,0.7)'; + const glyphColor = copied + ? '#22c55e' + : hover + ? '#ff8d5c' + : 'rgba(255,255,255,0.7)'; + return ( - - - - + ); } +// ─── Glyphs ────────────────────────────────────────────────────────────────── + function CopyGlyph() { return ( @@ -709,3 +920,17 @@ function CopyGlyph() { ); } + +function CheckGlyph() { + return ( + + + + ); +} diff --git a/src/view/onboarding/__tests__/ModelCheckStep.test.tsx b/src/view/onboarding/__tests__/ModelCheckStep.test.tsx index e64301cb..13459af8 100644 --- a/src/view/onboarding/__tests__/ModelCheckStep.test.tsx +++ b/src/view/onboarding/__tests__/ModelCheckStep.test.tsx @@ -4,6 +4,7 @@ import { fireEvent, act, waitFor, + cleanup, } from '@testing-library/react'; import { describe, it, expect, beforeEach, beforeAll, vi } from 'vitest'; import { ModelCheckStep } from '../ModelCheckStep'; @@ -14,14 +15,10 @@ import { const READY_RESPONSE = { state: 'ready', - active_slug: 'gemma4:e2b', - installed: ['gemma4:e2b'], + active_slug: 'gemma4:e4b', + installed: ['gemma4:e4b'], }; -// happy-dom does not provide navigator.clipboard, and Object.defineProperty -// on Navigator can collide with property descriptors set elsewhere in the -// test suite. Stash a single writeText spy on a permissive shape so each -// test can assert on it without redefining the host property. const writeText = vi.fn().mockResolvedValue(undefined); beforeAll(() => { @@ -52,11 +49,18 @@ describe('ModelCheckStep', () => { await act(async () => {}); expect(screen.getByText('Set up your local AI')).toBeInTheDocument(); + expect( + screen.getByText('Runs Ollama locally. Your chats stay on this machine.'), + ).toBeInTheDocument(); expect(screen.getByText('Install & start Ollama')).toBeInTheDocument(); - expect(screen.getByText('STEP 1 · ACTION NEEDED')).toBeInTheDocument(); - expect(screen.getByText('STEP 2 · WAITING')).toBeInTheDocument(); - expect(screen.getByText('brew install ollama')).toBeInTheDocument(); - expect(screen.getByText('open -a Ollama')).toBeInTheDocument(); + expect( + screen.queryByText('STEP 1 · ACTION NEEDED'), + ).not.toBeInTheDocument(); + expect(screen.queryByText('STEP 2 · WAITING')).not.toBeInTheDocument(); + expect(screen.getByText('Pull a starter model')).toBeInTheDocument(); + expect( + screen.getByText('curl -fsSL https://ollama.com/install.sh | sh'), + ).toBeInTheDocument(); }); it('shows Step 1 done and Step 2 active on no_models_installed', async () => { @@ -68,13 +72,25 @@ describe('ModelCheckStep', () => { await act(async () => {}); expect(screen.getByText('Ollama is running')).toBeInTheDocument(); - expect(screen.getByText('Connected')).toBeInTheDocument(); - expect(screen.getByText('STEP 1 · DONE')).toBeInTheDocument(); - expect(screen.getByText('STEP 2 · ACTION NEEDED')).toBeInTheDocument(); - expect(screen.getByText('gemma4:e2b')).toBeInTheDocument(); - expect(screen.getByText('llama3:8b')).toBeInTheDocument(); - expect(screen.getByText('qwen2.5:7b')).toBeInTheDocument(); - expect(screen.getByText('RECOMMENDED')).toBeInTheDocument(); + expect( + screen.getByText('Listening on 127.0.0.1:11434'), + ).toBeInTheDocument(); + expect(screen.getByText('live')).toBeInTheDocument(); + expect(screen.queryByText('Connected')).not.toBeInTheDocument(); + expect(screen.queryByText('STEP 1 · DONE')).not.toBeInTheDocument(); + expect( + screen.queryByText('STEP 2 · ACTION NEEDED'), + ).not.toBeInTheDocument(); + expect( + screen.getByText("Almost there. Let's pick a brain for Thuki."), + ).toBeInTheDocument(); + expect( + screen.getByText('You can swap or add more later.'), + ).toBeInTheDocument(); + expect(screen.getByText('gemma4:e4b')).toBeInTheDocument(); + expect(screen.getByText('llama3.2-vision:11b')).toBeInTheDocument(); + expect(screen.getByText('phi4:14b')).toBeInTheDocument(); + expect(screen.queryByText('RECOMMENDED')).not.toBeInTheDocument(); }); it('fires advance_past_model_check when Ready', async () => { @@ -98,7 +114,6 @@ describe('ModelCheckStep', () => { await act(async () => {}); expect(screen.getByText('Install & start Ollama')).toBeInTheDocument(); - expect(screen.getByText('STEP 1 · ACTION NEEDED')).toBeInTheDocument(); }); it('Re-check button re-runs the probe and updates state', async () => { @@ -119,11 +134,11 @@ describe('ModelCheckStep', () => { expect(screen.getByText('Install & start Ollama')).toBeInTheDocument(); await act(async () => { - fireEvent.click(screen.getByLabelText('Re-check setup')); + fireEvent.click(screen.getByLabelText('Verify setup')); }); expect(screen.getByText('Ollama is running')).toBeInTheDocument(); - expect(screen.getByText('Connected')).toBeInTheDocument(); + expect(screen.getByText('live')).toBeInTheDocument(); }); it('Re-check button is no-op while a probe is in flight', async () => { @@ -144,12 +159,12 @@ describe('ModelCheckStep', () => { await act(async () => {}); await act(async () => { - fireEvent.click(screen.getByLabelText('Re-check setup')); + fireEvent.click(screen.getByLabelText('Verify setup')); }); expect(probeCalls).toBe(2); await act(async () => { - fireEvent.click(screen.getByLabelText('Re-check setup')); + fireEvent.click(screen.getByLabelText('Verify setup')); }); expect(probeCalls).toBe(2); @@ -158,7 +173,7 @@ describe('ModelCheckStep', () => { }); }); - it('copies brew install command when Install Ollama copy button is clicked', async () => { + it('copies the selected install command (Install Ollama default)', async () => { enableChannelCaptureWithResponses({ check_model_setup: { state: 'ollama_unreachable' }, }); @@ -169,10 +184,39 @@ describe('ModelCheckStep', () => { await act(async () => { fireEvent.click(screen.getByLabelText('Copy install ollama command')); }); - expect(writeText).toHaveBeenCalledWith('brew install ollama'); + expect(writeText).toHaveBeenCalledWith( + 'curl -fsSL https://ollama.com/install.sh | sh', + ); + }); + + it('switching tabs swaps the displayed install command', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'ollama_unreachable' }, + }); + + render(); + await act(async () => {}); + + expect( + screen.getByText('curl -fsSL https://ollama.com/install.sh | sh'), + ).toBeInTheDocument(); + + await act(async () => { + fireEvent.click( + screen.getByRole('button', { name: 'Already Installed?' }), + ); + }); + expect(screen.getByText('open -a Ollama')).toBeInTheDocument(); + + await act(async () => { + fireEvent.click(screen.getByRole('button', { name: 'Install Ollama' })); + }); + expect( + screen.getByText('curl -fsSL https://ollama.com/install.sh | sh'), + ).toBeInTheDocument(); }); - it('copies open command when Already installed copy button is clicked', async () => { + it('copies the open command after switching to the Already Installed? tab', async () => { enableChannelCaptureWithResponses({ check_model_setup: { state: 'ollama_unreachable' }, }); @@ -180,13 +224,53 @@ describe('ModelCheckStep', () => { render(); await act(async () => {}); + await act(async () => { + fireEvent.click( + screen.getByRole('button', { name: 'Already Installed?' }), + ); + }); await act(async () => { fireEvent.click(screen.getByLabelText('Copy already installed? command')); }); expect(writeText).toHaveBeenCalledWith('open -a Ollama'); }); - it('copies the pull command for a recommended model', async () => { + it('lights up the active tab with the brand orange', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'ollama_unreachable' }, + }); + + render(); + await act(async () => {}); + + const installTab = screen.getByRole('button', { name: 'Install Ollama' }); + expect(installTab.style.color).toContain('255, 141, 92'); + + const alreadyTab = screen.getByRole('button', { + name: 'Already Installed?', + }); + expect(alreadyTab.style.color).not.toContain('255, 141, 92'); + }); + + it('hovering an inactive tab brightens the label', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'ollama_unreachable' }, + }); + + render(); + await act(async () => {}); + + const alreadyTab = screen.getByRole('button', { + name: 'Already Installed?', + }); + const before = alreadyTab.style.color; + fireEvent.mouseEnter(alreadyTab); + expect(alreadyTab.style.color).not.toBe(before); + fireEvent.mouseLeave(alreadyTab); + expect(alreadyTab.style.color).toBe(before); + }); + + it('copies the pull command for a starter model', async () => { enableChannelCaptureWithResponses({ check_model_setup: { state: 'no_models_installed' }, }); @@ -196,10 +280,57 @@ describe('ModelCheckStep', () => { await act(async () => { fireEvent.click( - screen.getByLabelText('Copy install command for llama3:8b'), + screen.getByLabelText('Copy install command for phi4:14b'), ); }); - expect(writeText).toHaveBeenCalledWith('ollama pull llama3:8b'); + expect(writeText).toHaveBeenCalledWith('ollama pull phi4:14b'); + }); + + it('renders each starter model with its description and size', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'no_models_installed' }, + }); + + render(); + await act(async () => {}); + + expect(screen.getByText('Google · vision · 9.6 GB')).toBeInTheDocument(); + expect(screen.getByText('Meta · vision · 7.8 GB')).toBeInTheDocument(); + expect(screen.getByText('Microsoft · text · 9.1 GB')).toBeInTheDocument(); + }); + + it('clicking a model slug opens its Ollama library page', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'no_models_installed' }, + open_url: undefined, + }); + + render(); + await act(async () => {}); + + await act(async () => { + fireEvent.click(screen.getByLabelText('Open gemma4:e4b on Ollama')); + }); + + expect(invoke).toHaveBeenCalledWith('open_url', { + url: 'https://ollama.com/library/gemma4', + }); + }); + + it('lights up the slug link on pointer hover', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'no_models_installed' }, + }); + + render(); + await act(async () => {}); + + const link = screen.getByLabelText('Open phi4:14b on Ollama'); + const initialColor = link.style.color; + fireEvent.mouseEnter(link); + expect(link.style.color).not.toBe(initialColor); + fireEvent.mouseLeave(link); + expect(link.style.color).toBe(initialColor); }); it('swallows clipboard write errors silently', async () => { @@ -233,4 +364,285 @@ describe('ModelCheckStep', () => { ), ).toBeInTheDocument(); }); + + it('renders the Step 1 sub-line below the code box with the Ollama docs link', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'ollama_unreachable' }, + }); + + render(); + await act(async () => {}); + + expect( + screen.getByText('Paste this in Terminal or visit'), + ).toBeInTheDocument(); + expect( + screen.getByLabelText('Open Ollama documentation'), + ).toBeInTheDocument(); + }); + + it('opens the Ollama docs URL when its sub-line link is clicked', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'ollama_unreachable' }, + open_url: undefined, + }); + + render(); + await act(async () => {}); + + await act(async () => { + fireEvent.click(screen.getByLabelText('Open Ollama documentation')); + }); + + expect(invoke).toHaveBeenCalledWith('open_url', { + url: 'https://ollama.com/download', + }); + }); + + it('opens the Ollama library URL when the Browse link is clicked', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'no_models_installed' }, + open_url: undefined, + }); + + render(); + await act(async () => {}); + + await act(async () => { + fireEvent.click(screen.getByLabelText('Browse all models on Ollama')); + }); + + expect(invoke).toHaveBeenCalledWith('open_url', { + url: 'https://ollama.com/search', + }); + }); + + it('renders the Step 2 helper block under the model list', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'no_models_installed' }, + }); + + render(); + await act(async () => {}); + + expect( + screen.getByText('Paste the command in Terminal'), + ).toBeInTheDocument(); + expect(screen.getByText('or')).toBeInTheDocument(); + expect( + screen.getByText('Browse all models on ollama.com ↗'), + ).toBeInTheDocument(); + }); + + it('lights up sub-line doc links on pointer hover', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'ollama_unreachable' }, + }); + + render(); + await act(async () => {}); + + const link = screen.getByLabelText('Open Ollama documentation'); + const initialColor = link.style.color; + fireEvent.mouseEnter(link); + expect(link.style.color).not.toBe(initialColor); + fireEvent.mouseLeave(link); + expect(link.style.color).toBe(initialColor); + }); + + it('icon-only install copy button shows only the green check on success (no Copied text)', async () => { + vi.useFakeTimers(); + try { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'ollama_unreachable' }, + }); + + render(); + await act(async () => {}); + + await act(async () => { + fireEvent.click(screen.getByLabelText('Copy install ollama command')); + }); + + expect(screen.queryByText('Copied')).not.toBeInTheDocument(); + const button = screen.getByLabelText('Copy install ollama command'); + expect(button.style.borderColor).toContain('34, 197, 94'); + + await act(async () => { + vi.advanceTimersByTime(1500); + }); + + expect(button.style.borderColor).not.toContain('34, 197, 94'); + } finally { + vi.useRealTimers(); + } + }); + + it('model-row copy button swaps into a Copied confirmation after a successful copy', async () => { + vi.useFakeTimers(); + try { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'no_models_installed' }, + }); + + render(); + await act(async () => {}); + + await act(async () => { + fireEvent.click( + screen.getByLabelText('Copy install command for gemma4:e4b'), + ); + }); + + expect(screen.getByText('Copied')).toBeInTheDocument(); + + await act(async () => { + vi.advanceTimersByTime(1500); + }); + + expect(screen.queryByText('Copied')).not.toBeInTheDocument(); + expect(screen.getAllByText('Copy').length).toBeGreaterThan(0); + } finally { + vi.useRealTimers(); + } + }); + + it('clears the previous Copied timer when the model-row copy button is clicked twice quickly', async () => { + vi.useFakeTimers(); + try { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'no_models_installed' }, + }); + + render(); + await act(async () => {}); + + const button = screen.getByLabelText('Copy install command for phi4:14b'); + + await act(async () => { + fireEvent.click(button); + }); + expect(screen.getByText('Copied')).toBeInTheDocument(); + + await act(async () => { + vi.advanceTimersByTime(800); + }); + await act(async () => { + fireEvent.click(button); + }); + expect(screen.getByText('Copied')).toBeInTheDocument(); + + await act(async () => { + vi.advanceTimersByTime(800); + }); + expect(screen.getByText('Copied')).toBeInTheDocument(); + + await act(async () => { + vi.advanceTimersByTime(800); + }); + expect(screen.queryByText('Copied')).not.toBeInTheDocument(); + } finally { + vi.useRealTimers(); + } + }); + + it('lights up the copy button border on pointer hover', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'ollama_unreachable' }, + }); + + render(); + await act(async () => {}); + + const button = screen.getByLabelText('Copy install ollama command'); + fireEvent.mouseEnter(button); + expect(button.style.borderColor).toContain('255, 141, 92'); + fireEvent.mouseLeave(button); + expect(button.style.borderColor).toContain('255, 255, 255'); + }); + + it('drops the probe success when the component unmounts mid-flight', async () => { + let resolveProbe: (value: unknown) => void = () => {}; + invoke.mockImplementation(async (name: string) => { + if (name === 'check_model_setup') { + return new Promise((resolve) => { + resolveProbe = resolve; + }); + } + return undefined; + }); + + const { unmount } = render(); + unmount(); + + await act(async () => { + resolveProbe({ state: 'no_models_installed' }); + }); + + expect(invoke).not.toHaveBeenCalledWith('advance_past_model_check'); + }); + + it('drops the probe failure when the component unmounts mid-flight', async () => { + let rejectProbe: (reason: unknown) => void = () => {}; + invoke.mockImplementation(async (name: string) => { + if (name === 'check_model_setup') { + return new Promise((_resolve, reject) => { + rejectProbe = reject; + }); + } + return undefined; + }); + + const { unmount } = render(); + unmount(); + + await act(async () => { + rejectProbe(new Error('late failure')); + }); + }); + + it('skips re-render when the recheck probe finishes after unmount', async () => { + let calls = 0; + let resolveSecond: (value: unknown) => void = () => {}; + invoke.mockImplementation(async (name: string) => { + if (name === 'check_model_setup') { + calls += 1; + if (calls === 1) return { state: 'ollama_unreachable' }; + return new Promise((resolve) => { + resolveSecond = resolve; + }); + } + return undefined; + }); + + render(); + await act(async () => {}); + + await act(async () => { + fireEvent.click(screen.getByLabelText('Verify setup')); + }); + + cleanup(); + + await act(async () => { + resolveSecond({ state: 'no_models_installed' }); + }); + }); + + it('does not show the Copied confirmation when the clipboard write fails', async () => { + writeText.mockReset(); + writeText.mockRejectedValue(new Error('denied')); + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'ollama_unreachable' }, + }); + + render(); + await act(async () => {}); + + await act(async () => { + fireEvent.click(screen.getByLabelText('Copy install ollama command')); + }); + + expect(screen.queryByText('Copied')).not.toBeInTheDocument(); + }); }); From 7462b82250b679b63acc9e17723f2ece6b5c8539 Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Sat, 25 Apr 2026 14:39:53 -0500 Subject: [PATCH 27/42] feat(onboarding): rewrite IntroStep title and subtitle "Before you dive in / You'll get the hang of it quickly" was vague and landed awkwardly after the multi-step setup. Replace with the clearer, more confident "You're all set / Five quick tips and you're chatting in seconds." which earns the Get Started CTA right below. Signed-off-by: Logan Nguyen --- src/__tests__/App.test.tsx | 4 ++-- src/view/onboarding/IntroStep.tsx | 4 ++-- src/view/onboarding/__tests__/IntroStep.test.tsx | 4 ++-- src/view/onboarding/__tests__/index.test.tsx | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/__tests__/App.test.tsx b/src/__tests__/App.test.tsx index e9eb60b9..f02b387e 100644 --- a/src/__tests__/App.test.tsx +++ b/src/__tests__/App.test.tsx @@ -4893,13 +4893,13 @@ describe('App', () => { emitTauriEvent('thuki://onboarding', { stage: 'intro' }); }); - expect(screen.getByText('Before you dive in')).toBeInTheDocument(); + expect(screen.getByText("You're all set")).toBeInTheDocument(); await act(async () => { fireEvent.click(screen.getByRole('button', { name: /get started/i })); }); - expect(screen.queryByText('Before you dive in')).toBeNull(); + expect(screen.queryByText("You're all set")).toBeNull(); }); }); }); diff --git a/src/view/onboarding/IntroStep.tsx b/src/view/onboarding/IntroStep.tsx index ccaeb971..47673e2c 100644 --- a/src/view/onboarding/IntroStep.tsx +++ b/src/view/onboarding/IntroStep.tsx @@ -64,7 +64,7 @@ export function IntroStep({ onComplete }: Props) { margin: '0 0 6px', }} > - Before you dive in + {"You're all set"}

- {"You'll get the hang of it quickly."} + {"Five quick tips and you're chatting in seconds."}

diff --git a/src/view/onboarding/__tests__/IntroStep.test.tsx b/src/view/onboarding/__tests__/IntroStep.test.tsx index 742dcc07..80b2a752 100644 --- a/src/view/onboarding/__tests__/IntroStep.test.tsx +++ b/src/view/onboarding/__tests__/IntroStep.test.tsx @@ -10,13 +10,13 @@ describe('IntroStep', () => { it('renders the title', () => { render(); - expect(screen.getByText('Before you dive in')).toBeInTheDocument(); + expect(screen.getByText("You're all set")).toBeInTheDocument(); }); it('renders the subtitle', () => { render(); expect( - screen.getByText("You'll get the hang of it quickly."), + screen.getByText("Five quick tips and you're chatting in seconds."), ).toBeInTheDocument(); }); diff --git a/src/view/onboarding/__tests__/index.test.tsx b/src/view/onboarding/__tests__/index.test.tsx index 59f8ac73..28efe58b 100644 --- a/src/view/onboarding/__tests__/index.test.tsx +++ b/src/view/onboarding/__tests__/index.test.tsx @@ -17,7 +17,7 @@ describe('OnboardingView (orchestrator)', () => { it('renders IntroStep when stage is intro', () => { render(); - expect(screen.getByText('Before you dive in')).toBeInTheDocument(); + expect(screen.getByText("You're all set")).toBeInTheDocument(); }); it('renders ModelCheckStep when stage is model_check', async () => { From 3a709387cbd9fb15897514d2c7fd67909d2174eb Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Sat, 25 Apr 2026 18:02:31 -0500 Subject: [PATCH 28/42] feat(onboarding): read Ollama listen address from runtime config Replace the hardcoded "Listening on 127.0.0.1:11434" string with the configured `model.ollama_url` value (host:port) read via useConfig(). Custom users running Ollama on a remote host or non-default port now see their actual address. Falls back to the raw URL string when the value isn't parseable so the UI never shows an empty line. Signed-off-by: Logan Nguyen --- src/view/onboarding/ModelCheckStep.tsx | 19 +++++++- .../__tests__/ModelCheckStep.test.tsx | 44 +++++++++++++++++++ 2 files changed, 61 insertions(+), 2 deletions(-) diff --git a/src/view/onboarding/ModelCheckStep.tsx b/src/view/onboarding/ModelCheckStep.tsx index 4701638f..510dca3e 100644 --- a/src/view/onboarding/ModelCheckStep.tsx +++ b/src/view/onboarding/ModelCheckStep.tsx @@ -21,11 +21,25 @@ import type React from 'react'; import { useState, useEffect, useRef, useCallback } from 'react'; import { invoke } from '@tauri-apps/api/core'; import thukiLogo from '../../../src-tauri/icons/128x128.png'; +import { useConfig } from '../../contexts/ConfigContext'; import { Badge } from './_shared'; const OLLAMA_DOCS_URL = 'https://ollama.com/download'; const OLLAMA_SEARCH_URL = 'https://ollama.com/search'; -const OLLAMA_LISTEN_ADDR = '127.0.0.1:11434'; + +/** + * Extracts the `host:port` segment from an Ollama daemon URL for display. + * Falls back to the raw input when the URL cannot be parsed (e.g. user + * config holds a non-URL string), so the UI never shows a confusing + * empty value. + */ +function formatListenAddr(url: string): string { + try { + return new URL(url).host; + } catch { + return url; + } +} type ModelSetupState = | { state: 'ollama_unreachable' } @@ -404,6 +418,7 @@ interface RowOneProps { } function RowOne({ active, done }: RowOneProps) { + const config = useConfig(); const [selectedTabIdx, setSelectedTabIdx] = useState(0); const tab = INSTALL_TABS[selectedTabIdx]; @@ -440,7 +455,7 @@ function RowOne({ active, done }: RowOneProps) { letterSpacing: '-0.1px', }} > - Listening on {OLLAMA_LISTEN_ADDR} + Listening on {formatListenAddr(config.model.ollamaUrl)}

) : null}
diff --git a/src/view/onboarding/__tests__/ModelCheckStep.test.tsx b/src/view/onboarding/__tests__/ModelCheckStep.test.tsx index 13459af8..a8dffc42 100644 --- a/src/view/onboarding/__tests__/ModelCheckStep.test.tsx +++ b/src/view/onboarding/__tests__/ModelCheckStep.test.tsx @@ -8,6 +8,10 @@ import { } from '@testing-library/react'; import { describe, it, expect, beforeEach, beforeAll, vi } from 'vitest'; import { ModelCheckStep } from '../ModelCheckStep'; +import { + ConfigProviderForTest, + DEFAULT_CONFIG, +} from '../../../contexts/ConfigContext'; import { invoke, enableChannelCaptureWithResponses, @@ -93,6 +97,46 @@ describe('ModelCheckStep', () => { expect(screen.queryByText('RECOMMENDED')).not.toBeInTheDocument(); }); + it('renders the configured Ollama URL host:port in the listening line', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'no_models_installed' }, + }); + + render( + + + , + ); + await act(async () => {}); + + expect(screen.getByText('Listening on 10.0.0.5:9000')).toBeInTheDocument(); + }); + + it('falls back to the raw Ollama URL string when it is not parseable', async () => { + enableChannelCaptureWithResponses({ + check_model_setup: { state: 'no_models_installed' }, + }); + + render( + + + , + ); + await act(async () => {}); + + expect(screen.getByText('Listening on not-a-url')).toBeInTheDocument(); + }); + it('fires advance_past_model_check when Ready', async () => { enableChannelCaptureWithResponses({ check_model_setup: READY_RESPONSE, From b11002f7ea6bc973aa61b7ad99656db04c58bfe5 Mon Sep 17 00:00:00 2001 From: Logan Nguyen Date: Sat, 25 Apr 2026 18:36:14 -0500 Subject: [PATCH 29/42] fix(askbar): caret drift on multi-line input The mirror div behind the transparent textarea wrapped text using CSS pre-wrap+break-words, which never matched the native textarea's wrap algorithm exactly. Once content exceeded the 144px height cap and the textarea began scrolling, sub-pixel wrap differences accumulated and the caret drifted above the visible text. Drop the mirror+transparent-textarea pattern. Render the textarea with its own visible text so caret and glyphs share one element. Slash command discoverability is preserved by the existing suggestion popover. Signed-off-by: Logan Nguyen --- src/App.css | 15 +++ src/view/AskBarView.tsx | 75 +----------- src/view/__tests__/AskBarView.test.tsx | 161 ------------------------- 3 files changed, 17 insertions(+), 234 deletions(-) diff --git a/src/App.css b/src/App.css index 522bd923..e64b8e1c 100644 --- a/src/App.css +++ b/src/App.css @@ -170,6 +170,21 @@ body { scrollbar-gutter: stable; } +/* ─── AskBar Textarea ─── + * Hide the native scrollbar so the textarea's wrapping width matches the + * highlight mirror div behind it. Without this, once content exceeds the + * 144px cap and the textarea begins scrolling, the system scrollbar + * consumes a few pixels of content width, causing wrapped lines in the + * textarea and the mirror to diverge — the caret then floats above the + * visible text. The hook syncs scrollTop so caret-follow still works. + */ +.askbar-textarea { + scrollbar-width: none; +} +.askbar-textarea::-webkit-scrollbar { + display: none; +} + /* ─── Markdown Body: Prose Defaults ─── * Tailwind's preflight resets list-style to none globally. Streamdown adds no * list CSS of its own, so
    /
      inside .markdown-body lose their markers. diff --git a/src/view/AskBarView.tsx b/src/view/AskBarView.tsx index 7a9e7445..d6936064 100644 --- a/src/view/AskBarView.tsx +++ b/src/view/AskBarView.tsx @@ -1,6 +1,6 @@ import { motion, AnimatePresence } from 'framer-motion'; import type React from 'react'; -import { useCallback, useEffect, useMemo, useRef, useState } from 'react'; +import { useCallback, useEffect, useMemo, useState } from 'react'; import { formatQuotedText } from '../utils/formatQuote'; import { useConfig } from '../contexts/ConfigContext'; import { ImageThumbnails } from '../components/ImageThumbnails'; @@ -141,55 +141,6 @@ const CAMERA_ICON = ( ); -/** - * Renders text with command triggers highlighted in violet for the mirror div. - * Only the first occurrence of each command is highlighted; duplicates render plain. - */ -export function renderHighlightedText(text: string): React.ReactNode { - const parts: React.ReactNode[] = []; - let remaining = text; - const highlighted = new Set(); - - while (remaining.length > 0) { - let earliest = -1; - let matchedTrigger = ''; - for (const cmd of COMMANDS) { - if (highlighted.has(cmd.trigger)) continue; - const idx = remaining.indexOf(cmd.trigger); - if (idx !== -1 && (earliest === -1 || idx < earliest)) { - const before = idx === 0 || remaining[idx - 1] === ' '; - const after = - idx + cmd.trigger.length >= remaining.length || - remaining[idx + cmd.trigger.length] === ' '; - if (before && after) { - earliest = idx; - matchedTrigger = cmd.trigger; - } - } - } - - if (earliest === -1) { - parts.push({remaining}); - break; - } - - if (earliest > 0) { - parts.push( - {remaining.slice(0, earliest)}, - ); - } - parts.push( - - {matchedTrigger} - , - ); - highlighted.add(matchedTrigger); - remaining = remaining.slice(earliest + matchedTrigger.length); - } - - return <>{parts}; -} - /** * Maximum number of manually attached images per message. The backend allows * one additional image from /screen capture, for a total of 4 per message @@ -278,9 +229,6 @@ export function AskBarView({ onModelPickerToggle, isModelPickerOpen, }: AskBarViewProps) { - /** Ref to the mirror div behind the textarea for command highlighting. */ - const mirrorRef = useRef(null); - /** Quote display limits resolved from the managed AppConfig. */ const quote = useConfig().quote; @@ -503,14 +451,6 @@ export function AskBarView({ ], ); - /** Syncs the mirror div scroll position with the textarea. */ - const handleTextareaScroll = useCallback(() => { - /* v8 ignore start -- both refs are always set by React when this fires */ - if (!mirrorRef.current || !inputRef.current) return; - /* v8 ignore stop */ - mirrorRef.current.scrollTop = inputRef.current.scrollTop; - }, [inputRef]); - /** Handles clipboard paste - extracts image items from clipboardData. */ const handlePaste = useCallback( (e: React.ClipboardEvent) => { @@ -637,28 +577,17 @@ export function AskBarView({ )}
      - {/* Mirror div: renders the same text with highlighted commands. - Sits behind the transparent textarea so colored spans show through. */} -