From d1a7d5b28e67556a47d1cee5b16f4726f19fffdb Mon Sep 17 00:00:00 2001 From: geoffsee <> Date: Sun, 31 Aug 2025 19:59:09 -0400 Subject: [PATCH] fix format error --- Cargo.lock | 14 ++++++------ crates/chat-ui/README.md | 2 +- crates/chat-ui/src/app.rs | 32 ++++++++++++++------------- crates/chat-ui/src/main.rs | 7 ++---- crates/predict-otron-9000/src/main.rs | 22 ++++++------------ 5 files changed, 34 insertions(+), 43 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 48bb7ee..ddfafa3 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -893,7 +893,7 @@ checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" [[package]] name = "cli" -version = "0.1.2" +version = "0.1.3" [[package]] name = "codee" @@ -1471,7 +1471,7 @@ dependencies = [ [[package]] name = "embeddings-engine" -version = "0.1.2" +version = "0.1.3" dependencies = [ "async-openai", "axum", @@ -2093,7 +2093,7 @@ dependencies = [ [[package]] name = "gemma-runner" -version = "0.1.2" +version = "0.1.3" dependencies = [ "anyhow", "candle-core 0.9.1 (git+https://github.com/huggingface/candle.git)", @@ -2274,7 +2274,7 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "helm-chart-tool" -version = "0.1.2" +version = "0.1.3" dependencies = [ "anyhow", "clap", @@ -2684,7 +2684,7 @@ dependencies = [ [[package]] name = "inference-engine" -version = "0.1.2" +version = "0.1.3" dependencies = [ "ab_glyph", "anyhow", @@ -3177,7 +3177,7 @@ checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "llama-runner" -version = "0.1.2" +version = "0.1.3" dependencies = [ "anyhow", "candle-core 0.9.1 (git+https://github.com/huggingface/candle.git)", @@ -4056,7 +4056,7 @@ dependencies = [ [[package]] name = "predict-otron-9000" -version = "0.1.2" +version = "0.1.3" dependencies = [ "axum", "chat-ui", diff --git a/crates/chat-ui/README.md b/crates/chat-ui/README.md index edb321e..c37bc08 100644 --- a/crates/chat-ui/README.md +++ b/crates/chat-ui/README.md @@ -29,7 +29,7 @@ cd crates/chat-ui This starts the development server on port 8788 with auto-reload capabilities. ### Usage -1. Start the predict-otron-9000 server: `./scripts/run_server.sh` +1. Start the predict-otron-9000 server: `./scripts/run.sh` 2. Start the chat-ui: `cd crates/chat-ui && ./run.sh` 3. Navigate to `http://localhost:8788` 4. Start chatting with your AI models! diff --git a/crates/chat-ui/src/app.rs b/crates/chat-ui/src/app.rs index 43dd2a5..6a540a6 100644 --- a/crates/chat-ui/src/app.rs +++ b/crates/chat-ui/src/app.rs @@ -12,7 +12,6 @@ pub struct AppConfig { impl Default for AppConfig { fn default() -> Self { - let conf = get_configuration(Some(concat!(env!("CARGO_MANIFEST_DIR"), "/Cargo.toml"))) .expect("failed to read config"); @@ -41,6 +40,7 @@ pub fn create_router(leptos_options: LeptosOptions) -> Router { .with_state(leptos_options) } +use gloo_net::http::Request; use leptos::prelude::*; use leptos_meta::{provide_meta_context, MetaTags, Stylesheet, Title}; use leptos_router::{ @@ -48,7 +48,6 @@ use leptos_router::{ StaticSegment, }; use serde::{Deserialize, Serialize}; -use gloo_net::http::Request; use web_sys::console; // Remove spawn_local import as we'll use different approach @@ -122,7 +121,10 @@ pub async fn fetch_models() -> Result, String> { } // API client function to send chat completion requests -pub async fn send_chat_completion(messages: Vec, model: String) -> Result { +pub async fn send_chat_completion( + messages: Vec, + model: String, +) -> Result { let request = ChatRequest { model, messages, @@ -206,20 +208,20 @@ pub fn App() -> impl IntoView { fn ChatPage() -> impl IntoView { // State for conversation messages let messages = RwSignal::new(Vec::::new()); - + // State for current user input let input_text = RwSignal::new(String::new()); - + // State for loading indicator let is_loading = RwSignal::new(false); - + // State for error messages let error_message = RwSignal::new(Option::::None); - + // State for available models and selected model let available_models = RwSignal::new(Vec::::new()); let selected_model = RwSignal::new(String::from("gemma-3-1b-it")); // Default model - + // Client-side only: Fetch models on component mount #[cfg(target_arch = "wasm32")] { @@ -249,7 +251,7 @@ fn ChatPage() -> impl IntoView { role: "user".to_string(), content: user_input.clone(), }; - + messages.update(|msgs| msgs.push(user_message.clone())); input_text.set(String::new()); is_loading.set(true); @@ -259,11 +261,11 @@ fn ChatPage() -> impl IntoView { #[cfg(target_arch = "wasm32")] { use leptos::task::spawn_local; - + // Prepare messages for API call let current_messages = messages.get(); let current_model = selected_model.get(); - + // Spawn async task to call API spawn_local(async move { match send_chat_completion(current_messages, current_model).await { @@ -307,7 +309,7 @@ fn ChatPage() -> impl IntoView {

"Predict-Otron-9000 Chat"

-
- +
impl IntoView { } } /> - + {move || { if is_loading.get() { view! { @@ -379,7 +381,7 @@ fn ChatPage() -> impl IntoView { on:keydown=on_key_down class:disabled=move || is_loading.get() /> -