mirror of
https://github.com/geoffsee/predict-otron-9001.git
synced 2025-09-08 22:46:44 +00:00

Removed `test_request.sh`, deprecated functionality, and unused imports; introduced a new CLI tool (`cli.ts`) for testing inference engine and adjusted handling of non-streaming/streaming chat completions. - Add CPU fallback support for text generation when primary device is unsupported - Introduce `execute_with_fallback` method to handle device compatibility and shape mismatch errors - Extend unit tests to reproduce tensor shape mismatch errors specific to model configurations - Increase HTTP timeout limits in `curl_chat_stream.sh` script for reliable API testing chat completion endpoint functions with gemma3 (no streaming) Add benchmarking guide with HTML reporting, Leptos chat crate, and middleware for metrics tracking
32 lines
1.1 KiB
TypeScript
Executable File
32 lines
1.1 KiB
TypeScript
Executable File
// #!/usr/bin/env bun
|
|
//
|
|
// import OpenAI from "openai";
|
|
// import {describe, test, expect} from "bun:test";
|
|
//
|
|
// async function requestActualOpenAI(userPrompt: string) {
|
|
// const openai = new OpenAI();
|
|
// return await openai.chat.completions.create({
|
|
// model: "gpt-4o",
|
|
// max_tokens: 100,
|
|
// messages: [{name: "user_1", role: "user", content: userPrompt}]
|
|
// }).then(result => result.choices[0].message);
|
|
// }
|
|
//
|
|
// // Exists as a smoke test.
|
|
// describe("Actual OpenAI Completions", () => {
|
|
// test("Should return a valid message", async () => {
|
|
// const userPrompt = "Who was the 16th president of the United States?";
|
|
// const result = await requestActualOpenAI(userPrompt);
|
|
//
|
|
// console.log({
|
|
// test: "hitting actual openai to ensure basic functionality",
|
|
// modelResponse: result.content,
|
|
// userPrompt
|
|
// });
|
|
//
|
|
// expect(result.annotations).toEqual([])
|
|
// expect(result.content).toBeDefined();
|
|
// expect(result.refusal).toEqual(null);
|
|
// expect(result.role).toEqual("assistant");
|
|
// })
|
|
// })
|