improves interoperability of model providers, local and remote providers can be used together seemlessly

This commit is contained in:
geoffsee
2025-06-09 13:54:37 -04:00
committed by Geoff Seemueller
parent ad7dc5c0a6
commit f29bb6779c
14 changed files with 526 additions and 234 deletions

View File

@@ -0,0 +1,76 @@
export class ProviderRepository {
#providers: {name: string, key: string, endpoint: string}[] = [];
constructor(env: Record<string, any>) {
this.setProviders(env);
}
static OPENAI_COMPAT_ENDPOINTS = {
xai: 'https://api.x.ai/v1',
groq: 'https://api.groq.com/openai/v1',
google: 'https://generativelanguage.googleapis.com/v1beta/openai',
fireworks: 'https://api.fireworks.ai/inference/v1',
cohere: 'https://api.cohere.ai/compatibility/v1',
cloudflare: 'https://api.cloudflare.com/client/v4/accounts/{CLOUDFLARE_ACCOUNT_ID}/ai/v1',
anthropic: 'https://api.anthropic.com/v1/',
openai: 'https://api.openai.com/v1/',
cerebras: 'https://api.cerebras.com/v1/',
ollama: "http://localhost:11434",
mlx: "http://localhost:10240",
}
static async getModelFamily(model, env: Env) {
const allModels = await env.KV_STORAGE.get("supportedModels");
const models = JSON.parse(allModels);
const modelData = models.filter(m => m.id === model)
console.log({modelData})
return modelData[0].provider;
}
static async getModelMeta(meta, env) {
const allModels = await env.KV_STORAGE.get("supportedModels");
const models = JSON.parse(allModels);
return models.filter(m => m.id === meta.model).pop()
}
getProviders(): {name: string, key: string, endpoint: string}[] {
return this.#providers;
}
setProviders(env: Record<string, any>) {
let envKeys = Object.keys(env);
for (let i = 0; i < envKeys.length; i++) {
if (envKeys[i].endsWith('KEY')) {
const detectedProvider = envKeys[i].split('_')[0].toLowerCase();
switch (detectedProvider) {
case 'anthropic':
this.#providers.push({
name: 'anthropic',
key: env.ANTHROPIC_API_KEY,
endpoint: OPENAI_COMPAT_ENDPOINTS['anthropic']
});
break;
case 'gemini':
this.#providers.push({
name: 'google',
key: env.GEMINI_API_KEY,
endpoint: OPENAI_COMPAT_ENDPOINTS['google']
});
break;
case 'cloudflare':
this.#providers.push({
name: 'cloudflare',
key: env.CLOUDFLARE_API_KEY,
endpoint: OPENAI_COMPAT_ENDPOINTS[detectedProvider].replace("{CLOUDFLARE_ACCOUNT_ID}", env.CLOUDFLARE_ACCOUNT_ID)
})
default:
this.#providers.push({
name: detectedProvider,
key: env[envKeys[i]],
endpoint: OPENAI_COMPAT_ENDPOINTS[detectedProvider]
});
}
}
}
}
}

View File

@@ -1,10 +1,11 @@
import {OpenAI} from "openai";
import {BaseChatProvider, CommonProviderParams} from "./chat-stream-provider.ts";
import {ProviderRepository} from "./_ProviderRepository";
export class CerebrasChatProvider extends BaseChatProvider {
getOpenAIClient(param: CommonProviderParams): OpenAI {
return new OpenAI({
baseURL: "https://api.cerebras.ai/v1",
baseURL: ProviderRepository.OPENAI_COMPAT_ENDPOINTS.cerebras,
apiKey: param.env.CEREBRAS_API_KEY,
});
}

View File

@@ -35,6 +35,7 @@ export abstract class BaseChatProvider implements ChatStreamProvider {
model: param.model,
assistantPrompt,
toolResults: param.preprocessedContext,
env: param.env
});
const client = this.getOpenAIClient(param);

View File

@@ -69,6 +69,7 @@ export class ClaudeChatProvider extends BaseChatProvider {
model: param.model,
assistantPrompt,
toolResults: param.preprocessedContext,
env: param.env,
});
const streamParams = this.getStreamParams(param, safeMessages);

View File

@@ -1,13 +1,12 @@
import {OpenAI} from "openai";
import {BaseChatProvider, CommonProviderParams} from "./chat-stream-provider.ts";
import {ProviderRepository} from "./_ProviderRepository";
export class CloudflareAiChatProvider extends BaseChatProvider {
getOpenAIClient(param: CommonProviderParams): OpenAI {
const cfAiURL = `https://api.cloudflare.com/client/v4/accounts/${param.env.CLOUDFLARE_ACCOUNT_ID}/ai/v1`;
return new OpenAI({
apiKey: param.env.CLOUDFLARE_API_KEY,
baseURL: cfAiURL,
baseURL: ProviderRepository.OPENAI_COMPAT_ENDPOINTS.cloudflare.replace("{CLOUDFLARE_ACCOUNT_ID}", param.env.CLOUDFLARE_ACCOUNT_ID),
});
}

View File

@@ -11,12 +11,13 @@ import {
import Message from "../models/Message.ts";
import ChatSdk from "../lib/chat-sdk.ts";
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
import {ProviderRepository} from "./_ProviderRepository";
export class FireworksAiChatProvider extends BaseChatProvider {
getOpenAIClient(param: CommonProviderParams): OpenAI {
return new OpenAI({
apiKey: param.env.FIREWORKS_API_KEY,
baseURL: "https://api.fireworks.ai/inference/v1",
baseURL: ProviderRepository.OPENAI_COMPAT_ENDPOINTS.fireworks,
});
}

View File

@@ -2,11 +2,12 @@ import { OpenAI } from "openai";
import ChatSdk from "../lib/chat-sdk.ts";
import { StreamParams } from "../services/ChatService.ts";
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
import {ProviderRepository} from "./_ProviderRepository";
export class GoogleChatProvider extends BaseChatProvider {
getOpenAIClient(param: CommonProviderParams): OpenAI {
return new OpenAI({
baseURL: "https://generativelanguage.googleapis.com/v1beta/openai",
baseURL: ProviderRepository.OPENAI_COMPAT_ENDPOINTS.google,
apiKey: param.env.GEMINI_API_KEY,
});
}

View File

@@ -7,11 +7,12 @@ import {
UnionStringArray,
} from "mobx-state-tree";
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
import {ProviderRepository} from "./_ProviderRepository";
export class GroqChatProvider extends BaseChatProvider {
getOpenAIClient(param: CommonProviderParams): OpenAI {
return new OpenAI({
baseURL: "https://api.groq.com/openai/v1",
baseURL: ProviderRepository.OPENAI_COMPAT_ENDPOINTS.groq,
apiKey: param.env.GROQ_API_KEY,
});
}

View File

@@ -0,0 +1,73 @@
import { OpenAI } from "openai";
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
export class MlxOmniChatProvider extends BaseChatProvider {
getOpenAIClient(param: CommonProviderParams): OpenAI {
return new OpenAI({
baseURL: param.env.MLX_API_ENDPOINT ?? "http://localhost:10240",
apiKey: param.env.MLX_API_KEY,
});
}
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
const tuningParams = {
temperature: 0.75,
};
const getTuningParams = () => {
return tuningParams;
};
return {
model: param.model,
messages: safeMessages,
stream: true,
...getTuningParams(),
};
}
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
dataCallback({ type: "chat", data: chunk });
return true;
}
dataCallback({ type: "chat", data: chunk });
return false;
}
}
export class MlxOmniChatSdk {
private static provider = new MlxOmniChatProvider();
static async handleMlxOmniStream(
ctx: {
openai: OpenAI;
systemPrompt: any;
preprocessedContext: any;
maxTokens: unknown | number | undefined;
messages: any;
disableWebhookGeneration: boolean;
model: any;
env: Env;
},
dataCallback: (data: any) => any,
) {
if (!ctx.messages?.length) {
return new Response("No messages provided", { status: 400 });
}
return this.provider.handleStream(
{
systemPrompt: ctx.systemPrompt,
preprocessedContext: ctx.preprocessedContext,
maxTokens: ctx.maxTokens,
messages: ctx.messages,
model: ctx.model,
env: ctx.env,
disableWebhookGeneration: ctx.disableWebhookGeneration,
},
dataCallback,
);
}
}

View File

@@ -0,0 +1,73 @@
import { OpenAI } from "openai";
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
export class OllamaChatProvider extends BaseChatProvider {
getOpenAIClient(param: CommonProviderParams): OpenAI {
return new OpenAI({
baseURL: param.env.OLLAMA_API_ENDPOINT ?? ,
apiKey: param.env.OLLAMA_API_KEY,
});
}
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
const tuningParams = {
temperature: 0.75,
};
const getTuningParams = () => {
return tuningParams;
};
return {
model: param.model,
messages: safeMessages,
stream: true,
...getTuningParams(),
};
}
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
dataCallback({ type: "chat", data: chunk });
return true;
}
dataCallback({ type: "chat", data: chunk });
return false;
}
}
export class OllamaChatSdk {
private static provider = new OllamaChatProvider();
static async handleOllamaStream(
ctx: {
openai: OpenAI;
systemPrompt: any;
preprocessedContext: any;
maxTokens: unknown | number | undefined;
messages: any;
disableWebhookGeneration: boolean;
model: any;
env: Env;
},
dataCallback: (data: any) => any,
) {
if (!ctx.messages?.length) {
return new Response("No messages provided", { status: 400 });
}
return this.provider.handleStream(
{
systemPrompt: ctx.systemPrompt,
preprocessedContext: ctx.preprocessedContext,
maxTokens: ctx.maxTokens,
messages: ctx.messages,
model: ctx.model,
env: ctx.env,
disableWebhookGeneration: ctx.disableWebhookGeneration,
},
dataCallback,
);
}
}