Files
predict-otron-9001/crates/predict-otron-9000/Cargo.toml

52 lines
1.8 KiB
TOML

[package]
name = "predict-otron-9000"
version.workspace = true
edition = "2024"
[[bin]]
name = "predict-otron-9000"
path = "src/main.rs"
[dependencies]
# Axum web framework
axum = "0.8.4"
tokio = { version = "1.45.1", features = ["full"] }
tower = "0.5.2"
tower-http = { version = "0.6.6", features = ["trace", "cors", "fs"] }
serde = { version = "1.0.219", features = ["derive"] }
serde_json = "1.0.140"
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
uuid = { version = "1.7.0", features = ["v4"] }
reqwest = { version = "0.12", features = ["json"] }
rust-embed = { version = "8.7.2", features = ["include-exclude", "axum"] }
# Dependencies for embeddings functionality
embeddings-engine = { path = "../embeddings-engine" }
# Dependencies for inference functionality
inference-engine = { path = "../inference-engine" }
# Dependencies for leptos web app
#leptos-app = { path = "../leptos-app", features = ["ssr"] }
chat-ui = { path = "../chat-ui", features = ["ssr", "hydrate"], optional = true }
mime_guess = "2.0.5"
log = "0.4.27"
# generates kubernetes manifests
[package.metadata.kube]
image = "ghcr.io/geoffsee/predict-otron-9000:latest"
replicas = 1
port = 8080
cmd = ["./bin/predict-otron-9000"]
# SERVER_CONFIG Example: {\"serverMode\":\"HighAvailability\",\"services\":{\"inference_url\":\"http://custom-inference:9000\",\"embeddings_url\":\"http://custom-embeddings:9001\"}}
# you can generate this via node to avoid toil
# const server_config = {serverMode: "HighAvailability", services: {inference_url: "http://custom-inference:9000", embeddings_url: "http://custom-embeddings:9001"} };
# console.log(JSON.stringify(server_config).replace(/"/g, '\\"'));
env = { SERVER_CONFIG = "<your-json-value-here>" }
[features]
default = ["ui"]
ui = ["dep:chat-ui"]