fix clippy errors

This commit is contained in:
geoffsee
2025-09-04 13:53:00 -04:00
parent c1c583faab
commit fb5098eba6
11 changed files with 15 additions and 38 deletions

View File

@@ -25,7 +25,7 @@ fn run_build() -> io::Result<()> {
let output_path = out_dir.join("client-cli");
let bun_tgt = BunTarget::from_cargo_env()
.map_err(|e| io::Error::new(io::ErrorKind::Other, e.to_string()))?;
.map_err(|e| io::Error::other(e.to_string()))?;
// Optional: warn if using a Bun target thats marked unsupported in your chart
if matches!(bun_tgt, BunTarget::WindowsArm64) {
@@ -54,13 +54,12 @@ fn run_build() -> io::Result<()> {
if !install_status.success() {
let code = install_status.code().unwrap_or(1);
return Err(io::Error::new(
io::ErrorKind::Other,
return Err(io::Error::other(
format!("bun install failed with status {code}"),
));
}
let target = env::var("TARGET").unwrap();
let _target = env::var("TARGET").unwrap();
// --- bun build (in ./package), emit to OUT_DIR, keep temps inside OUT_DIR ---
let mut build = Command::new("bun")
@@ -87,7 +86,7 @@ fn run_build() -> io::Result<()> {
} else {
let code = status.code().unwrap_or(1);
warn(&format!("bun build failed with status: {code}"));
return Err(io::Error::new(io::ErrorKind::Other, "bun build failed"));
return Err(io::Error::other("bun build failed"));
}
// Ensure the output is executable (after it exists)

View File

@@ -191,8 +191,6 @@ impl TextGeneration {
// Make sure stdout isn't holding anything (if caller also prints).
std::io::stdout().flush()?;
let mut _generated_tokens = 0usize;
let eos_token = match self.tokenizer.get_token("<eos>") {
Some(token) => token,
None => anyhow::bail!("cannot find the <eos> token"),
@@ -207,7 +205,7 @@ impl TextGeneration {
let start_gen = std::time::Instant::now();
for index in 0..sample_len {
for (_generated_tokens, index) in (0..sample_len).enumerate() {
let context_size = if index > 0 { 1 } else { tokens.len() };
let start_pos = tokens.len().saturating_sub(context_size);
let ctxt = &tokens[start_pos..];
@@ -229,7 +227,6 @@ impl TextGeneration {
let next_token = self.logits_processor.sample(&logits)?;
tokens.push(next_token);
_generated_tokens += 1;
if next_token == eos_token || next_token == eot_token {
break;

View File

@@ -67,7 +67,7 @@ pub fn run_cli() -> anyhow::Result<()> {
let cfg = GemmaInferenceConfig {
tracing: args.tracing,
prompt: args.prompt,
model: args.model,
model: Some(args.model),
cpu: args.cpu,
dtype: args.dtype,
model_id: args.model_id,

View File

@@ -1,6 +1,5 @@
pub mod llama_api;
use clap::ValueEnum;
pub use llama_api::{run_llama_inference, LlamaInferenceConfig, WhichModel};
// Re-export constants and types that might be needed

View File

@@ -126,7 +126,7 @@ pub fn hub_load_safetensors(
.iter()
.map(|v| {
repo.get(v)
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))
.map_err(|e| std::io::Error::other(e))
})
.collect::<Result<Vec<_>, std::io::Error>>()?;
Ok(safetensors_files)