adds eslint

This commit is contained in:
geoffsee
2025-06-24 17:29:52 -04:00
committed by Geoff Seemueller
parent 9698fc6f3b
commit 02c3253343
169 changed files with 4896 additions and 4804 deletions

View File

@@ -1,83 +1,86 @@
export type GenericEnv = Record<string, any>;
export class ProviderRepository {
#providers: {name: string, key: string, endpoint: string}[] = [];
#env: Record<string, any>;
#providers: { name: string; key: string; endpoint: string }[] = [];
#env: Record<string, any>;
constructor(env: GenericEnv) {
this.#env = env
this.setProviders(env);
}
constructor(env: GenericEnv) {
this.#env = env;
this.setProviders(env);
}
static OPENAI_COMPAT_ENDPOINTS = {
xai: 'https://api.x.ai/v1',
groq: 'https://api.groq.com/openai/v1',
google: 'https://generativelanguage.googleapis.com/v1beta/openai',
fireworks: 'https://api.fireworks.ai/inference/v1',
cohere: 'https://api.cohere.ai/compatibility/v1',
cloudflare: 'https://api.cloudflare.com/client/v4/accounts/{CLOUDFLARE_ACCOUNT_ID}/ai/v1',
anthropic: 'https://api.anthropic.com/v1',
openai: 'https://api.openai.com/v1',
cerebras: 'https://api.cerebras.com/v1',
ollama: "http://localhost:11434/v1",
mlx: "http://localhost:10240/v1",
}
static OPENAI_COMPAT_ENDPOINTS = {
xai: 'https://api.x.ai/v1',
groq: 'https://api.groq.com/openai/v1',
google: 'https://generativelanguage.googleapis.com/v1beta/openai',
fireworks: 'https://api.fireworks.ai/inference/v1',
cohere: 'https://api.cohere.ai/compatibility/v1',
cloudflare: 'https://api.cloudflare.com/client/v4/accounts/{CLOUDFLARE_ACCOUNT_ID}/ai/v1',
anthropic: 'https://api.anthropic.com/v1',
openai: 'https://api.openai.com/v1',
cerebras: 'https://api.cerebras.com/v1',
ollama: 'http://localhost:11434/v1',
mlx: 'http://localhost:10240/v1',
};
static async getModelFamily(model: any, env: Env) {
const allModels = await env.KV_STORAGE.get("supportedModels");
const models = JSON.parse(allModels);
const modelData = models.filter(m => m.id === model)
return modelData[0].provider;
}
static async getModelFamily(model: any, env: Env) {
const allModels = await env.KV_STORAGE.get('supportedModels');
const models = JSON.parse(allModels);
const modelData = models.filter(m => m.id === model);
return modelData[0].provider;
}
static async getModelMeta(meta, env) {
const allModels = await env.KV_STORAGE.get("supportedModels");
const models = JSON.parse(allModels);
return models.filter(m => m.id === meta.model).pop()
}
static async getModelMeta(meta, env) {
const allModels = await env.KV_STORAGE.get('supportedModels');
const models = JSON.parse(allModels);
return models.filter(m => m.id === meta.model).pop();
}
getProviders(): {name: string, key: string, endpoint: string}[] {
return this.#providers;
}
getProviders(): { name: string; key: string; endpoint: string }[] {
return this.#providers;
}
setProviders(env: Record<string, any>) {
let envKeys = Object.keys(env);
for (let i = 0; i < envKeys.length; i++) {
if (envKeys[i].endsWith('KEY')) {
const detectedProvider = envKeys[i].split('_')[0].toLowerCase();
const detectedProviderValue = env[envKeys[i]];
if(detectedProviderValue) {
switch (detectedProvider) {
case 'anthropic':
this.#providers.push({
name: 'anthropic',
key: env.ANTHROPIC_API_KEY,
endpoint: ProviderRepository.OPENAI_COMPAT_ENDPOINTS['anthropic']
});
break;
case 'gemini':
this.#providers.push({
name: 'google',
key: env.GEMINI_API_KEY,
endpoint: ProviderRepository.OPENAI_COMPAT_ENDPOINTS['google']
});
break;
case 'cloudflare':
this.#providers.push({
name: 'cloudflare',
key: env.CLOUDFLARE_API_KEY,
endpoint: ProviderRepository.OPENAI_COMPAT_ENDPOINTS[detectedProvider].replace("{CLOUDFLARE_ACCOUNT_ID}", env.CLOUDFLARE_ACCOUNT_ID)
})
default:
this.#providers.push({
name: detectedProvider,
key: env[envKeys[i]],
endpoint: ProviderRepository.OPENAI_COMPAT_ENDPOINTS[detectedProvider]
});
}
}
}
setProviders(env: Record<string, any>) {
const envKeys = Object.keys(env);
for (let i = 0; i < envKeys.length; i++) {
if (envKeys[i].endsWith('KEY')) {
const detectedProvider = envKeys[i].split('_')[0].toLowerCase();
const detectedProviderValue = env[envKeys[i]];
if (detectedProviderValue) {
switch (detectedProvider) {
case 'anthropic':
this.#providers.push({
name: 'anthropic',
key: env.ANTHROPIC_API_KEY,
endpoint: ProviderRepository.OPENAI_COMPAT_ENDPOINTS['anthropic'],
});
break;
case 'gemini':
this.#providers.push({
name: 'google',
key: env.GEMINI_API_KEY,
endpoint: ProviderRepository.OPENAI_COMPAT_ENDPOINTS['google'],
});
break;
case 'cloudflare':
this.#providers.push({
name: 'cloudflare',
key: env.CLOUDFLARE_API_KEY,
endpoint: ProviderRepository.OPENAI_COMPAT_ENDPOINTS[detectedProvider].replace(
'{CLOUDFLARE_ACCOUNT_ID}',
env.CLOUDFLARE_ACCOUNT_ID,
),
});
break;
default:
this.#providers.push({
name: detectedProvider,
key: env[envKeys[i]],
endpoint: ProviderRepository.OPENAI_COMPAT_ENDPOINTS[detectedProvider],
});
}
}
}
}
}
}

View File

@@ -1,6 +1,11 @@
import { describe, it, expect, vi } from 'vitest';
import { BaseChatProvider, CommonProviderParams, ChatStreamProvider } from '../chat-stream-provider.ts';
import { OpenAI } from 'openai';
import { describe, it, expect, vi } from 'vitest';
import {
BaseChatProvider,
CommonProviderParams,
ChatStreamProvider,
} from '../chat-stream-provider.ts';
// Create a concrete implementation of BaseChatProvider for testing
class TestChatProvider extends BaseChatProvider {
@@ -29,16 +34,16 @@ vi.mock('../../lib/chat-sdk', () => ({
buildAssistantPrompt: vi.fn().mockReturnValue('Assistant prompt'),
buildMessageChain: vi.fn().mockReturnValue([
{ role: 'system', content: 'System prompt' },
{ role: 'user', content: 'User message' }
])
}
{ role: 'user', content: 'User message' },
]),
},
}));
describe('ChatStreamProvider', () => {
it('should define the required interface', () => {
// Verify the interface has the required method
const mockProvider: ChatStreamProvider = {
handleStream: vi.fn()
handleStream: vi.fn(),
};
expect(mockProvider.handleStream).toBeDefined();

View File

@@ -1,6 +1,7 @@
import {OpenAI} from "openai";
import {BaseChatProvider, CommonProviderParams} from "./chat-stream-provider.ts";
import {ProviderRepository} from "./_ProviderRepository";
import { OpenAI } from 'openai';
import { ProviderRepository } from './_ProviderRepository';
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
export class CerebrasChatProvider extends BaseChatProvider {
getOpenAIClient(param: CommonProviderParams): OpenAI {
@@ -11,8 +12,8 @@ export class CerebrasChatProvider extends BaseChatProvider {
}
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
// models provided by cerebras do not follow standard tune params
// they must be individually configured
// models provided by cerebras do not follow standard tune params
// they must be individually configured
// const tuningParams = {
// temperature: 0.86,
// top_p: 0.98,
@@ -24,18 +25,18 @@ export class CerebrasChatProvider extends BaseChatProvider {
return {
model: param.model,
messages: safeMessages,
stream: true
// ...tuningParams
stream: true,
// ...tuningParams
};
}
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
dataCallback({ type: "chat", data: chunk });
if (chunk.choices && chunk.choices[0]?.finish_reason === 'stop') {
dataCallback({ type: 'chat', data: chunk });
return true;
}
dataCallback({ type: "chat", data: chunk });
dataCallback({ type: 'chat', data: chunk });
return false;
}
}

View File

@@ -1,5 +1,6 @@
import { OpenAI } from "openai";
import ChatSdk from "../lib/chat-sdk.ts";
import { OpenAI } from 'openai';
import ChatSdk from '../lib/chat-sdk.ts';
export interface CommonProviderParams {
openai?: OpenAI; // Optional for providers that use a custom client.
@@ -14,10 +15,7 @@ export interface CommonProviderParams {
}
export interface ChatStreamProvider {
handleStream(
param: CommonProviderParams,
dataCallback: (data: any) => void,
): Promise<any>;
handleStream(param: CommonProviderParams, dataCallback: (data: any) => void): Promise<any>;
}
export abstract class BaseChatProvider implements ChatStreamProvider {
@@ -25,17 +23,14 @@ export abstract class BaseChatProvider implements ChatStreamProvider {
abstract getStreamParams(param: CommonProviderParams, safeMessages: any[]): any;
abstract async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean>;
async handleStream(
param: CommonProviderParams,
dataCallback: (data: any) => void,
) {
async handleStream(param: CommonProviderParams, dataCallback: (data: any) => void) {
const assistantPrompt = ChatSdk.buildAssistantPrompt({ maxTokens: param.maxTokens });
const safeMessages = await ChatSdk.buildMessageChain(param.messages, {
systemPrompt: param.systemPrompt,
model: param.model,
assistantPrompt,
toolResults: param.preprocessedContext,
env: param.env
env: param.env,
});
const client = this.getOpenAIClient(param);
@@ -47,4 +42,4 @@ export abstract class BaseChatProvider implements ChatStreamProvider {
if (shouldBreak) break;
}
}
}
}

View File

@@ -1,14 +1,16 @@
import Anthropic from "@anthropic-ai/sdk";
import {OpenAI} from "openai";
import Anthropic from '@anthropic-ai/sdk';
import {
_NotCustomized,
ISimpleType,
ModelPropertiesDeclarationToProperties,
ModelSnapshotType2,
UnionStringArray,
} from "mobx-state-tree";
import ChatSdk from "../lib/chat-sdk.ts";
import {BaseChatProvider, CommonProviderParams} from "./chat-stream-provider.ts";
} from 'mobx-state-tree';
import { OpenAI } from 'openai';
import ChatSdk from '../lib/chat-sdk.ts';
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
export class ClaudeChatProvider extends BaseChatProvider {
private anthropic: Anthropic | null = null;
@@ -33,20 +35,20 @@ export class ClaudeChatProvider extends BaseChatProvider {
stream: true,
model: param.model,
messages: safeMessages,
...claudeTuningParams
...claudeTuningParams,
};
}
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
if (chunk.type === "message_stop") {
if (chunk.type === 'message_stop') {
dataCallback({
type: "chat",
type: 'chat',
data: {
choices: [
{
delta: { content: "" },
delta: { content: '' },
logprobs: null,
finish_reason: "stop",
finish_reason: 'stop',
},
],
},
@@ -54,15 +56,12 @@ export class ClaudeChatProvider extends BaseChatProvider {
return true;
}
dataCallback({ type: "chat", data: chunk });
dataCallback({ type: 'chat', data: chunk });
return false;
}
// Override the base handleStream method to use Anthropic client instead of OpenAI
async handleStream(
param: CommonProviderParams,
dataCallback: (data: any) => void,
) {
async handleStream(param: CommonProviderParams, dataCallback: (data: any) => void) {
const assistantPrompt = ChatSdk.buildAssistantPrompt({ maxTokens: param.maxTokens });
const safeMessages = ChatSdk.buildMessageChain(param.messages, {
systemPrompt: param.systemPrompt,
@@ -75,7 +74,7 @@ export class ClaudeChatProvider extends BaseChatProvider {
const streamParams = this.getStreamParams(param, safeMessages);
if (!this.anthropic) {
throw new Error("Anthropic client not initialized");
throw new Error('Anthropic client not initialized');
}
const stream = await this.anthropic.messages.create(streamParams);

View File

@@ -1,12 +1,16 @@
import {OpenAI} from "openai";
import {BaseChatProvider, CommonProviderParams} from "./chat-stream-provider.ts";
import {ProviderRepository} from "./_ProviderRepository";
import { OpenAI } from 'openai';
import { ProviderRepository } from './_ProviderRepository';
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
export class CloudflareAiChatProvider extends BaseChatProvider {
getOpenAIClient(param: CommonProviderParams): OpenAI {
return new OpenAI({
apiKey: param.env.CLOUDFLARE_API_KEY,
baseURL: ProviderRepository.OPENAI_COMPAT_ENDPOINTS.cloudflare.replace("{CLOUDFLARE_ACCOUNT_ID}", param.env.CLOUDFLARE_ACCOUNT_ID),
baseURL: ProviderRepository.OPENAI_COMPAT_ENDPOINTS.cloudflare.replace(
'{CLOUDFLARE_ACCOUNT_ID}',
param.env.CLOUDFLARE_ACCOUNT_ID,
),
});
}
@@ -18,36 +22,36 @@ export class CloudflareAiChatProvider extends BaseChatProvider {
};
// Set max_tokens based on model
if (this.getModelPrefix(param.model) === "@cf/meta") {
generationParams["max_tokens"] = 4096;
if (this.getModelPrefix(param.model) === '@cf/meta') {
generationParams['max_tokens'] = 4096;
}
if (this.getModelPrefix(param.model) === "@hf/mistral") {
generationParams["max_tokens"] = 4096;
if (this.getModelPrefix(param.model) === '@hf/mistral') {
generationParams['max_tokens'] = 4096;
}
if (param.model.toLowerCase().includes("hermes-2-pro-mistral-7b")) {
generationParams["max_tokens"] = 1000;
if (param.model.toLowerCase().includes('hermes-2-pro-mistral-7b')) {
generationParams['max_tokens'] = 1000;
}
if (param.model.toLowerCase().includes("openhermes-2.5-mistral-7b-awq")) {
generationParams["max_tokens"] = 1000;
if (param.model.toLowerCase().includes('openhermes-2.5-mistral-7b-awq')) {
generationParams['max_tokens'] = 1000;
}
if (param.model.toLowerCase().includes("deepseek-coder-6.7b-instruct-awq")) {
generationParams["max_tokens"] = 590;
if (param.model.toLowerCase().includes('deepseek-coder-6.7b-instruct-awq')) {
generationParams['max_tokens'] = 590;
}
if (param.model.toLowerCase().includes("deepseek-math-7b-instruct")) {
generationParams["max_tokens"] = 512;
if (param.model.toLowerCase().includes('deepseek-math-7b-instruct')) {
generationParams['max_tokens'] = 512;
}
if (param.model.toLowerCase().includes("neural-chat-7b-v3-1-awq")) {
generationParams["max_tokens"] = 590;
if (param.model.toLowerCase().includes('neural-chat-7b-v3-1-awq')) {
generationParams['max_tokens'] = 590;
}
if (param.model.toLowerCase().includes("openchat-3.5-0106")) {
generationParams["max_tokens"] = 2000;
if (param.model.toLowerCase().includes('openchat-3.5-0106')) {
generationParams['max_tokens'] = 2000;
}
return generationParams;
@@ -56,38 +60,36 @@ export class CloudflareAiChatProvider extends BaseChatProvider {
private getModelPrefix(model: string): string {
let modelPrefix = `@cf/meta`;
if (model.toLowerCase().includes("llama")) {
if (model.toLowerCase().includes('llama')) {
modelPrefix = `@cf/meta`;
}
if (model.toLowerCase().includes("hermes-2-pro-mistral-7b")) {
if (model.toLowerCase().includes('hermes-2-pro-mistral-7b')) {
modelPrefix = `@hf/nousresearch`;
}
if (model.toLowerCase().includes("mistral-7b-instruct")) {
if (model.toLowerCase().includes('mistral-7b-instruct')) {
modelPrefix = `@hf/mistral`;
}
if (model.toLowerCase().includes("gemma")) {
if (model.toLowerCase().includes('gemma')) {
modelPrefix = `@cf/google`;
}
if (model.toLowerCase().includes("deepseek")) {
if (model.toLowerCase().includes('deepseek')) {
modelPrefix = `@cf/deepseek-ai`;
}
if (model.toLowerCase().includes("openchat-3.5-0106")) {
if (model.toLowerCase().includes('openchat-3.5-0106')) {
modelPrefix = `@cf/openchat`;
}
const isNueralChat = model
.toLowerCase()
.includes("neural-chat-7b-v3-1-awq");
const isNueralChat = model.toLowerCase().includes('neural-chat-7b-v3-1-awq');
if (
isNueralChat ||
model.toLowerCase().includes("openhermes-2.5-mistral-7b-awq") ||
model.toLowerCase().includes("zephyr-7b-beta-awq") ||
model.toLowerCase().includes("deepseek-coder-6.7b-instruct-awq")
model.toLowerCase().includes('openhermes-2.5-mistral-7b-awq') ||
model.toLowerCase().includes('zephyr-7b-beta-awq') ||
model.toLowerCase().includes('deepseek-coder-6.7b-instruct-awq')
) {
modelPrefix = `@hf/thebloke`;
}
@@ -100,12 +102,12 @@ export class CloudflareAiChatProvider extends BaseChatProvider {
}
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
dataCallback({ type: "chat", data: chunk });
if (chunk.choices && chunk.choices[0]?.finish_reason === 'stop') {
dataCallback({ type: 'chat', data: chunk });
return true;
}
dataCallback({ type: "chat", data: chunk });
dataCallback({ type: 'chat', data: chunk });
return false;
}
}

View File

@@ -1,4 +1,3 @@
import { OpenAI } from "openai";
import {
_NotCustomized,
castToSnapshot,
@@ -7,11 +6,14 @@ import {
ModelPropertiesDeclarationToProperties,
ModelSnapshotType2,
UnionStringArray,
} from "mobx-state-tree";
import Message from "../models/Message.ts";
import ChatSdk from "../lib/chat-sdk.ts";
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
import {ProviderRepository} from "./_ProviderRepository";
} from 'mobx-state-tree';
import { OpenAI } from 'openai';
import ChatSdk from '../lib/chat-sdk.ts';
import Message from '../models/Message.ts';
import { ProviderRepository } from './_ProviderRepository';
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
export class FireworksAiChatProvider extends BaseChatProvider {
getOpenAIClient(param: CommonProviderParams): OpenAI {
@@ -22,9 +24,9 @@ export class FireworksAiChatProvider extends BaseChatProvider {
}
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
let modelPrefix = "accounts/fireworks/models/";
if (param.model.toLowerCase().includes("yi-")) {
modelPrefix = "accounts/yi-01-ai/models/";
let modelPrefix = 'accounts/fireworks/models/';
if (param.model.toLowerCase().includes('yi-')) {
modelPrefix = 'accounts/yi-01-ai/models/';
}
return {
@@ -35,12 +37,12 @@ export class FireworksAiChatProvider extends BaseChatProvider {
}
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
dataCallback({ type: "chat", data: chunk });
if (chunk.choices && chunk.choices[0]?.finish_reason === 'stop') {
dataCallback({ type: 'chat', data: chunk });
return true;
}
dataCallback({ type: "chat", data: chunk });
dataCallback({ type: 'chat', data: chunk });
return false;
}
}

View File

@@ -1,8 +1,10 @@
import { OpenAI } from "openai";
import ChatSdk from "../lib/chat-sdk.ts";
import { StreamParams } from "../services/ChatService.ts";
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
import {ProviderRepository} from "./_ProviderRepository";
import { OpenAI } from 'openai';
import ChatSdk from '../lib/chat-sdk.ts';
import { StreamParams } from '../services/ChatService.ts';
import { ProviderRepository } from './_ProviderRepository';
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
export class GoogleChatProvider extends BaseChatProvider {
getOpenAIClient(param: CommonProviderParams): OpenAI {
@@ -21,14 +23,14 @@ export class GoogleChatProvider extends BaseChatProvider {
}
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
if (chunk.choices?.[0]?.finish_reason === "stop") {
if (chunk.choices?.[0]?.finish_reason === 'stop') {
dataCallback({
type: "chat",
type: 'chat',
data: {
choices: [
{
delta: { content: chunk.choices[0].delta.content || "" },
finish_reason: "stop",
delta: { content: chunk.choices[0].delta.content || '' },
finish_reason: 'stop',
index: chunk.choices[0].index,
},
],
@@ -37,11 +39,11 @@ export class GoogleChatProvider extends BaseChatProvider {
return true;
} else {
dataCallback({
type: "chat",
type: 'chat',
data: {
choices: [
{
delta: { content: chunk.choices?.[0]?.delta?.content || "" },
delta: { content: chunk.choices?.[0]?.delta?.content || '' },
finish_reason: null,
index: chunk.choices?.[0]?.index || 0,
},
@@ -56,10 +58,7 @@ export class GoogleChatProvider extends BaseChatProvider {
export class GoogleChatSdk {
private static provider = new GoogleChatProvider();
static async handleGoogleStream(
param: StreamParams,
dataCallback: (data) => void,
) {
static async handleGoogleStream(param: StreamParams, dataCallback: (data) => void) {
return this.provider.handleStream(
{
systemPrompt: param.systemPrompt,

View File

@@ -1,13 +1,14 @@
import { OpenAI } from "openai";
import {
_NotCustomized,
ISimpleType,
ModelPropertiesDeclarationToProperties,
ModelSnapshotType2,
UnionStringArray,
} from "mobx-state-tree";
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
import {ProviderRepository} from "./_ProviderRepository";
} from 'mobx-state-tree';
import { OpenAI } from 'openai';
import { ProviderRepository } from './_ProviderRepository';
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
export class GroqChatProvider extends BaseChatProvider {
getOpenAIClient(param: CommonProviderParams): OpenAI {
@@ -30,17 +31,17 @@ export class GroqChatProvider extends BaseChatProvider {
model: param.model,
messages: safeMessages,
stream: true,
...tuningParams
...tuningParams,
};
}
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
dataCallback({ type: "chat", data: chunk });
if (chunk.choices && chunk.choices[0]?.finish_reason === 'stop') {
dataCallback({ type: 'chat', data: chunk });
return true;
}
dataCallback({ type: "chat", data: chunk });
dataCallback({ type: 'chat', data: chunk });
return false;
}
}

View File

@@ -1,95 +1,97 @@
import { OpenAI } from "openai";
import { Utils } from "../lib/utils";
import { ChatCompletionCreateParamsStreaming } from "openai/resources/chat/completions/completions";
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider";
import { OpenAI } from 'openai';
import { ChatCompletionCreateParamsStreaming } from 'openai/resources/chat/completions/completions';
import { Utils } from '../lib/utils';
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider';
export class MlxOmniChatProvider extends BaseChatProvider {
getOpenAIClient(param: CommonProviderParams): OpenAI {
return new OpenAI({
baseURL: "http://localhost:10240",
apiKey: param.env.MLX_API_KEY,
});
getOpenAIClient(param: CommonProviderParams): OpenAI {
return new OpenAI({
baseURL: 'http://localhost:10240',
apiKey: param.env.MLX_API_KEY,
});
}
getStreamParams(
param: CommonProviderParams,
safeMessages: any[],
): ChatCompletionCreateParamsStreaming {
const baseTuningParams = {
temperature: 0.86,
top_p: 0.98,
presence_penalty: 0.1,
frequency_penalty: 0.3,
max_tokens: param.maxTokens as number,
};
const getTuningParams = () => {
return baseTuningParams;
};
let completionRequest: ChatCompletionCreateParamsStreaming = {
model: param.model,
stream: true,
messages: safeMessages,
};
const client = this.getOpenAIClient(param);
const isLocal = client.baseURL.includes('localhost');
if (isLocal) {
completionRequest['messages'] = Utils.normalizeWithBlanks(safeMessages);
completionRequest['stream_options'] = {
include_usage: true,
};
} else {
completionRequest = { ...completionRequest, ...getTuningParams() };
}
getStreamParams(param: CommonProviderParams, safeMessages: any[]): ChatCompletionCreateParamsStreaming {
const baseTuningParams = {
temperature: 0.86,
top_p: 0.98,
presence_penalty: 0.1,
frequency_penalty: 0.3,
max_tokens: param.maxTokens as number,
};
return completionRequest;
}
const getTuningParams = () => {
return baseTuningParams;
};
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
const isLocal = chunk.usage !== undefined;
let completionRequest: ChatCompletionCreateParamsStreaming = {
model: param.model,
stream: true,
messages: safeMessages
};
const client = this.getOpenAIClient(param);
const isLocal = client.baseURL.includes("localhost");
if(isLocal) {
completionRequest["messages"] = Utils.normalizeWithBlanks(safeMessages);
completionRequest["stream_options"] = {
include_usage: true
};
} else {
completionRequest = {...completionRequest, ...getTuningParams()};
}
return completionRequest;
if (isLocal && chunk.usage) {
dataCallback({
type: 'chat',
data: {
choices: [
{
delta: { content: '' },
logprobs: null,
finish_reason: 'stop',
},
],
},
});
return true; // Break the stream
}
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
const isLocal = chunk.usage !== undefined;
if (isLocal && chunk.usage) {
dataCallback({
type: "chat",
data: {
choices: [
{
delta: { content: "" },
logprobs: null,
finish_reason: "stop",
},
],
},
});
return true; // Break the stream
}
dataCallback({ type: "chat", data: chunk });
return false; // Continue the stream
}
dataCallback({ type: 'chat', data: chunk });
return false; // Continue the stream
}
}
export class MlxOmniChatSdk {
private static provider = new MlxOmniChatProvider();
private static provider = new MlxOmniChatProvider();
static async handleMlxOmniStream(
ctx: any,
dataCallback: (data: any) => any,
) {
if (!ctx.messages?.length) {
return new Response("No messages provided", { status: 400 });
}
return this.provider.handleStream(
{
systemPrompt: ctx.systemPrompt,
preprocessedContext: ctx.preprocessedContext,
maxTokens: ctx.maxTokens,
messages: Utils.normalizeWithBlanks(ctx.messages),
model: ctx.model,
env: ctx.env
},
dataCallback,
);
static async handleMlxOmniStream(ctx: any, dataCallback: (data: any) => any) {
if (!ctx.messages?.length) {
return new Response('No messages provided', { status: 400 });
}
return this.provider.handleStream(
{
systemPrompt: ctx.systemPrompt,
preprocessedContext: ctx.preprocessedContext,
maxTokens: ctx.maxTokens,
messages: Utils.normalizeWithBlanks(ctx.messages),
model: ctx.model,
env: ctx.env,
},
dataCallback,
);
}
}

View File

@@ -1,74 +1,75 @@
import { OpenAI } from "openai";
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
import {ProviderRepository} from "./_ProviderRepository";
import { OpenAI } from 'openai';
import { ProviderRepository } from './_ProviderRepository';
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
export class OllamaChatProvider extends BaseChatProvider {
getOpenAIClient(param: CommonProviderParams): OpenAI {
return new OpenAI({
baseURL: param.env.OLLAMA_API_ENDPOINT ?? ProviderRepository.OPENAI_COMPAT_ENDPOINTS.ollama ,
apiKey: param.env.OLLAMA_API_KEY,
});
getOpenAIClient(param: CommonProviderParams): OpenAI {
return new OpenAI({
baseURL: param.env.OLLAMA_API_ENDPOINT ?? ProviderRepository.OPENAI_COMPAT_ENDPOINTS.ollama,
apiKey: param.env.OLLAMA_API_KEY,
});
}
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
const tuningParams = {
temperature: 0.75,
};
const getTuningParams = () => {
return tuningParams;
};
return {
model: param.model,
messages: safeMessages,
stream: true,
...getTuningParams(),
};
}
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
if (chunk.choices && chunk.choices[0]?.finish_reason === 'stop') {
dataCallback({ type: 'chat', data: chunk });
return true;
}
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
const tuningParams = {
temperature: 0.75,
};
const getTuningParams = () => {
return tuningParams;
};
return {
model: param.model,
messages: safeMessages,
stream: true,
...getTuningParams(),
};
}
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
dataCallback({ type: "chat", data: chunk });
return true;
}
dataCallback({ type: "chat", data: chunk });
return false;
}
dataCallback({ type: 'chat', data: chunk });
return false;
}
}
export class OllamaChatSdk {
private static provider = new OllamaChatProvider();
private static provider = new OllamaChatProvider();
static async handleOllamaStream(
ctx: {
openai: OpenAI;
systemPrompt: any;
preprocessedContext: any;
maxTokens: unknown | number | undefined;
messages: any;
disableWebhookGeneration: boolean;
model: any;
env: Env;
},
dataCallback: (data: any) => any,
) {
if (!ctx.messages?.length) {
return new Response("No messages provided", { status: 400 });
}
return this.provider.handleStream(
{
systemPrompt: ctx.systemPrompt,
preprocessedContext: ctx.preprocessedContext,
maxTokens: ctx.maxTokens,
messages: ctx.messages,
model: ctx.model,
env: ctx.env,
disableWebhookGeneration: ctx.disableWebhookGeneration,
},
dataCallback,
);
static async handleOllamaStream(
ctx: {
openai: OpenAI;
systemPrompt: any;
preprocessedContext: any;
maxTokens: unknown | number | undefined;
messages: any;
disableWebhookGeneration: boolean;
model: any;
env: Env;
},
dataCallback: (data: any) => any,
) {
if (!ctx.messages?.length) {
return new Response('No messages provided', { status: 400 });
}
return this.provider.handleStream(
{
systemPrompt: ctx.systemPrompt,
preprocessedContext: ctx.preprocessedContext,
maxTokens: ctx.maxTokens,
messages: ctx.messages,
model: ctx.model,
env: ctx.env,
disableWebhookGeneration: ctx.disableWebhookGeneration,
},
dataCallback,
);
}
}

View File

@@ -1,16 +1,21 @@
import { OpenAI } from "openai";
import { Utils } from "../lib/utils.ts";
import { ChatCompletionCreateParamsStreaming } from "openai/resources/chat/completions/completions";
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
import { OpenAI } from 'openai';
import { ChatCompletionCreateParamsStreaming } from 'openai/resources/chat/completions/completions';
import { Utils } from '../lib/utils.ts';
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
export class OpenAiChatProvider extends BaseChatProvider {
getOpenAIClient(param: CommonProviderParams): OpenAI {
return param.openai as OpenAI;
}
getStreamParams(param: CommonProviderParams, safeMessages: any[]): ChatCompletionCreateParamsStreaming {
getStreamParams(
param: CommonProviderParams,
safeMessages: any[],
): ChatCompletionCreateParamsStreaming {
const isO1 = () => {
if (param.model === "o1-preview" || param.model === "o1-mini") {
if (param.model === 'o1-preview' || param.model === 'o1-mini') {
return true;
}
};
@@ -27,8 +32,8 @@ export class OpenAiChatProvider extends BaseChatProvider {
const getTuningParams = () => {
if (isO1()) {
tuningParams["temperature"] = 1;
tuningParams["max_completion_tokens"] = (param.maxTokens as number) + 10000;
tuningParams['temperature'] = 1;
tuningParams['max_completion_tokens'] = (param.maxTokens as number) + 10000;
return tuningParams;
}
return gpt4oTuningParams;
@@ -37,19 +42,19 @@ export class OpenAiChatProvider extends BaseChatProvider {
let completionRequest: ChatCompletionCreateParamsStreaming = {
model: param.model,
stream: true,
messages: safeMessages
messages: safeMessages,
};
const client = this.getOpenAIClient(param);
const isLocal = client.baseURL.includes("localhost");
const isLocal = client.baseURL.includes('localhost');
if(isLocal) {
completionRequest["messages"] = Utils.normalizeWithBlanks(safeMessages);
completionRequest["stream_options"] = {
include_usage: true
if (isLocal) {
completionRequest['messages'] = Utils.normalizeWithBlanks(safeMessages);
completionRequest['stream_options'] = {
include_usage: true,
};
} else {
completionRequest = {...completionRequest, ...getTuningParams()};
completionRequest = { ...completionRequest, ...getTuningParams() };
}
return completionRequest;
@@ -60,13 +65,13 @@ export class OpenAiChatProvider extends BaseChatProvider {
if (isLocal && chunk.usage) {
dataCallback({
type: "chat",
type: 'chat',
data: {
choices: [
{
delta: { content: "" },
delta: { content: '' },
logprobs: null,
finish_reason: "stop",
finish_reason: 'stop',
},
],
},
@@ -74,7 +79,7 @@ export class OpenAiChatProvider extends BaseChatProvider {
return true; // Break the stream
}
dataCallback({ type: "chat", data: chunk });
dataCallback({ type: 'chat', data: chunk });
return false; // Continue the stream
}
}
@@ -95,7 +100,7 @@ export class OpenAiChatSdk {
dataCallback: (data: any) => any,
) {
if (!ctx.messages?.length) {
return new Response("No messages provided", { status: 400 });
return new Response('No messages provided', { status: 400 });
}
return this.provider.handleStream(

View File

@@ -1,73 +1,74 @@
import { OpenAI } from "openai";
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
import { OpenAI } from 'openai';
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
export class XaiChatProvider extends BaseChatProvider {
getOpenAIClient(param: CommonProviderParams): OpenAI {
return new OpenAI({
baseURL: "https://api.x.ai/v1",
apiKey: param.env.XAI_API_KEY,
});
getOpenAIClient(param: CommonProviderParams): OpenAI {
return new OpenAI({
baseURL: 'https://api.x.ai/v1',
apiKey: param.env.XAI_API_KEY,
});
}
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
const tuningParams = {
temperature: 0.75,
};
const getTuningParams = () => {
return tuningParams;
};
return {
model: param.model,
messages: safeMessages,
stream: true,
...getTuningParams(),
};
}
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
if (chunk.choices && chunk.choices[0]?.finish_reason === 'stop') {
dataCallback({ type: 'chat', data: chunk });
return true;
}
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
const tuningParams = {
temperature: 0.75,
};
const getTuningParams = () => {
return tuningParams;
};
return {
model: param.model,
messages: safeMessages,
stream: true,
...getTuningParams(),
};
}
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
dataCallback({ type: "chat", data: chunk });
return true;
}
dataCallback({ type: "chat", data: chunk });
return false;
}
dataCallback({ type: 'chat', data: chunk });
return false;
}
}
export class XaiChatSdk {
private static provider = new XaiChatProvider();
private static provider = new XaiChatProvider();
static async handleXaiStream(
ctx: {
openai: OpenAI;
systemPrompt: any;
preprocessedContext: any;
maxTokens: unknown | number | undefined;
messages: any;
disableWebhookGeneration: boolean;
model: any;
env: Env;
},
dataCallback: (data: any) => any,
) {
if (!ctx.messages?.length) {
return new Response("No messages provided", { status: 400 });
}
return this.provider.handleStream(
{
systemPrompt: ctx.systemPrompt,
preprocessedContext: ctx.preprocessedContext,
maxTokens: ctx.maxTokens,
messages: ctx.messages,
model: ctx.model,
env: ctx.env,
disableWebhookGeneration: ctx.disableWebhookGeneration,
},
dataCallback,
);
static async handleXaiStream(
ctx: {
openai: OpenAI;
systemPrompt: any;
preprocessedContext: any;
maxTokens: unknown | number | undefined;
messages: any;
disableWebhookGeneration: boolean;
model: any;
env: Env;
},
dataCallback: (data: any) => any,
) {
if (!ctx.messages?.length) {
return new Response('No messages provided', { status: 400 });
}
return this.provider.handleStream(
{
systemPrompt: ctx.systemPrompt,
preprocessedContext: ctx.preprocessedContext,
maxTokens: ctx.maxTokens,
messages: ctx.messages,
model: ctx.model,
env: ctx.env,
disableWebhookGeneration: ctx.disableWebhookGeneration,
},
dataCallback,
);
}
}