mirror of
https://github.com/geoffsee/open-gsio.git
synced 2025-09-08 22:56:46 +00:00
adds eslint
This commit is contained in:

committed by
Geoff Seemueller

parent
9698fc6f3b
commit
02c3253343
@@ -1,14 +1,14 @@
|
||||
import { types, Instance, getMembers } from "mobx-state-tree";
|
||||
import ContactService from "./services/ContactService.ts";
|
||||
import AssetService from "./services/AssetService.ts";
|
||||
import MetricsService from "./services/MetricsService.ts";
|
||||
import ChatService from "./services/ChatService.ts";
|
||||
import TransactionService from "./services/TransactionService.ts";
|
||||
import FeedbackService from "./services/FeedbackService.ts";
|
||||
import { types, Instance, getMembers } from 'mobx-state-tree';
|
||||
|
||||
import AssetService from './services/AssetService.ts';
|
||||
import ChatService from './services/ChatService.ts';
|
||||
import ContactService from './services/ContactService.ts';
|
||||
import FeedbackService from './services/FeedbackService.ts';
|
||||
import MetricsService from './services/MetricsService.ts';
|
||||
import TransactionService from './services/TransactionService.ts';
|
||||
|
||||
const RequestContext = types
|
||||
.model("RequestContext", {
|
||||
.model('RequestContext', {
|
||||
chatService: ChatService,
|
||||
contactService: types.optional(ContactService, {}),
|
||||
assetService: types.optional(AssetService, {}),
|
||||
@@ -16,20 +16,20 @@ const RequestContext = types
|
||||
transactionService: types.optional(TransactionService, {}),
|
||||
feedbackService: types.optional(FeedbackService, {}),
|
||||
})
|
||||
.actions((self) => {
|
||||
.actions(self => {
|
||||
const services = Object.keys(getMembers(self).properties);
|
||||
|
||||
return {
|
||||
setEnv(env: Env) {
|
||||
services.forEach((service) => {
|
||||
if (typeof self[service]?.setEnv === "function") {
|
||||
services.forEach(service => {
|
||||
if (typeof self[service]?.setEnv === 'function') {
|
||||
self[service].setEnv(env);
|
||||
}
|
||||
});
|
||||
},
|
||||
setCtx(ctx: ExecutionContext) {
|
||||
services.forEach((service) => {
|
||||
if (typeof self[service]?.setCtx === "function") {
|
||||
services.forEach(service => {
|
||||
if (typeof self[service]?.setCtx === 'function') {
|
||||
self[service].setCtx(ctx);
|
||||
}
|
||||
});
|
||||
@@ -54,7 +54,7 @@ const createRequestContext = (env, ctx) => {
|
||||
activeStreams: undefined,
|
||||
maxTokens: 16384,
|
||||
systemPrompt:
|
||||
"You are an assistant designed to provide accurate, concise, and context-aware responses while demonstrating your advanced reasoning capabilities.",
|
||||
'You are an assistant designed to provide accurate, concise, and context-aware responses while demonstrating your advanced reasoning capabilities.',
|
||||
}),
|
||||
});
|
||||
instance.setEnv(env);
|
||||
|
@@ -1,5 +1,7 @@
|
||||
import { type Instance } from 'mobx-state-tree';
|
||||
import { renderPage } from 'vike/server';
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { getSnapshot, Instance } from 'mobx-state-tree';
|
||||
|
||||
import AssetService from '../services/AssetService.ts';
|
||||
|
||||
// Define types for testing
|
||||
@@ -11,29 +13,31 @@ vi.mock('vike/server', () => ({
|
||||
}));
|
||||
|
||||
// Import the mocked renderPage function for assertions
|
||||
import { renderPage } from 'vike/server';
|
||||
|
||||
// Mock global types
|
||||
vi.stubGlobal('ReadableStream', class MockReadableStream {});
|
||||
vi.stubGlobal('Response', class MockResponse {
|
||||
status: number;
|
||||
headers: Headers;
|
||||
body: any;
|
||||
vi.stubGlobal(
|
||||
'Response',
|
||||
class MockResponse {
|
||||
status: number;
|
||||
headers: Headers;
|
||||
body: any;
|
||||
|
||||
constructor(body?: any, init?: ResponseInit) {
|
||||
this.body = body;
|
||||
this.status = init?.status || 200;
|
||||
this.headers = new Headers(init?.headers);
|
||||
}
|
||||
constructor(body?: any, init?: ResponseInit) {
|
||||
this.body = body;
|
||||
this.status = init?.status || 200;
|
||||
this.headers = new Headers(init?.headers);
|
||||
}
|
||||
|
||||
clone() {
|
||||
return this;
|
||||
}
|
||||
clone() {
|
||||
return this;
|
||||
}
|
||||
|
||||
async text() {
|
||||
return this.body?.toString() || '';
|
||||
}
|
||||
});
|
||||
async text() {
|
||||
return this.body?.toString() || '';
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
describe('AssetService', () => {
|
||||
let assetService: AssetServiceInstance;
|
||||
|
@@ -1,9 +1,10 @@
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
|
||||
import { createRouter } from '../api-router.ts';
|
||||
|
||||
// Mock the vike/server module
|
||||
vi.mock('vike/server', () => ({
|
||||
renderPage: vi.fn()
|
||||
renderPage: vi.fn(),
|
||||
}));
|
||||
|
||||
describe('api-router', () => {
|
||||
@@ -13,4 +14,4 @@ describe('api-router', () => {
|
||||
expect(router).toBeDefined();
|
||||
expect(typeof router.handle).toBe('function');
|
||||
});
|
||||
});
|
||||
});
|
||||
|
@@ -1,47 +1,41 @@
|
||||
import { Router, withParams } from "itty-router";
|
||||
import { createRequestContext } from "./RequestContext";
|
||||
import { Router, withParams } from 'itty-router';
|
||||
|
||||
import { createRequestContext } from './RequestContext';
|
||||
|
||||
export function createRouter() {
|
||||
return (
|
||||
Router()
|
||||
|
||||
.get("/assets/*", (r, e, c) => {
|
||||
.get('/assets/*', (r, e, c) => {
|
||||
const { assetService } = createRequestContext(e, c);
|
||||
return assetService.handleStaticAssets(r, e, c);
|
||||
})
|
||||
|
||||
.post("/api/contact", (r, e, c) => {
|
||||
.post('/api/contact', (r, e, c) => {
|
||||
const { contactService } = createRequestContext(e, c);
|
||||
return contactService.handleContact(r);
|
||||
})
|
||||
|
||||
.post("/api/chat", (r, e, c) => {
|
||||
.post('/api/chat', (r, e, c) => {
|
||||
const { chatService } = createRequestContext(e, c);
|
||||
return chatService.handleChatRequest(r);
|
||||
})
|
||||
|
||||
.get(
|
||||
"/api/streams/:streamId",
|
||||
withParams,
|
||||
async ({ streamId }, env, ctx) => {
|
||||
const { chatService } = createRequestContext(env, ctx);
|
||||
return chatService.handleSseStream(streamId); // Handles SSE for streamId
|
||||
},
|
||||
)
|
||||
.get('/api/streams/:streamId', withParams, async ({ streamId }, env, ctx) => {
|
||||
const { chatService } = createRequestContext(env, ctx);
|
||||
return chatService.handleSseStream(streamId); // Handles SSE for streamId
|
||||
})
|
||||
|
||||
.get("/api/models",
|
||||
async (req, env, ctx) => {
|
||||
const { chatService } = createRequestContext(env, ctx);
|
||||
return chatService.getSupportedModels();
|
||||
},
|
||||
)
|
||||
.get('/api/models', async (req, env, ctx) => {
|
||||
const { chatService } = createRequestContext(env, ctx);
|
||||
return chatService.getSupportedModels();
|
||||
})
|
||||
|
||||
.post("/api/feedback", async (r, e, c) => {
|
||||
.post('/api/feedback', async (r, e, c) => {
|
||||
const { feedbackService } = createRequestContext(e, c);
|
||||
return feedbackService.handleFeedback(r);
|
||||
})
|
||||
|
||||
.post("/api/tx", async (r, e, c) => {
|
||||
.post('/api/tx', async (r, e, c) => {
|
||||
const { transactionService } = createRequestContext(e, c);
|
||||
return transactionService.handleTransact(r);
|
||||
})
|
||||
@@ -57,35 +51,34 @@ export function createRouter() {
|
||||
// return documentService.handleGetDocument(r)
|
||||
// })
|
||||
|
||||
.get("/api/metrics*", async (r, e, c) => {
|
||||
.get('/api/metrics*', async (r, e, c) => {
|
||||
const { metricsService } = createRequestContext(e, c);
|
||||
return metricsService.handleMetricsRequest(r);
|
||||
})
|
||||
|
||||
.post("/api/metrics*", async (r, e, c) => {
|
||||
const { metricsService } = createRequestContext(e, c);
|
||||
return metricsService.handleMetricsRequest(r);
|
||||
})
|
||||
.post('/api/metrics*', async (r, e, c) => {
|
||||
const { metricsService } = createRequestContext(e, c);
|
||||
return metricsService.handleMetricsRequest(r);
|
||||
})
|
||||
|
||||
// renders the app
|
||||
.all("^(?!/api/)(?!/assets/).*$", async (r, e, c) => {
|
||||
// renders the app
|
||||
.all('^(?!/api/)(?!/assets/).*$', async (r, e, c) => {
|
||||
const { assetService } = createRequestContext(e, c);
|
||||
|
||||
const { assetService } = createRequestContext(e, c);
|
||||
// First attempt to serve pre-rendered HTML
|
||||
const preRenderedHtml = await assetService.handleStaticAssets(r, e);
|
||||
if (preRenderedHtml !== null) {
|
||||
return preRenderedHtml;
|
||||
}
|
||||
|
||||
// First attempt to serve pre-rendered HTML
|
||||
const preRenderedHtml = await assetService.handleStaticAssets(r, e);
|
||||
if (preRenderedHtml !== null) {
|
||||
return preRenderedHtml;
|
||||
}
|
||||
// If no pre-rendered HTML, attempt SSR
|
||||
const ssrResponse = await assetService.handleSsr(r.url, r.headers, e);
|
||||
if (ssrResponse !== null) {
|
||||
return ssrResponse;
|
||||
}
|
||||
|
||||
// If no pre-rendered HTML, attempt SSR
|
||||
const ssrResponse = await assetService.handleSsr(r.url, r.headers, e);
|
||||
if (ssrResponse !== null) {
|
||||
return ssrResponse;
|
||||
}
|
||||
|
||||
// Finally, proxy to static assets if nothing else matched
|
||||
return assetService.handleStaticAssets(r, e);
|
||||
// Finally, proxy to static assets if nothing else matched
|
||||
return assetService.handleStaticAssets(r, e);
|
||||
})
|
||||
);
|
||||
}
|
||||
|
@@ -1,11 +1,9 @@
|
||||
// handles builds the server into js
|
||||
await Bun.build({
|
||||
entrypoints: [
|
||||
"./server.ts",
|
||||
],
|
||||
outdir: './build',
|
||||
minify: true,
|
||||
target: 'node',
|
||||
splitting: true,
|
||||
throw: true
|
||||
});
|
||||
entrypoints: ['./server.ts'],
|
||||
outdir: './build',
|
||||
minify: true,
|
||||
target: 'node',
|
||||
splitting: true,
|
||||
throw: true,
|
||||
});
|
||||
|
@@ -1 +1 @@
|
||||
export const OPEN_GSIO_DATA_DIR = process.env.OPEN_GSIO_DATA_DIR ?? ".open-gsio";
|
||||
export const OPEN_GSIO_DATA_DIR = process.env.OPEN_GSIO_DATA_DIR ?? '.open-gsio';
|
||||
|
@@ -1,5 +1,8 @@
|
||||
import { DurableObject } from "cloudflare:workers";
|
||||
import {ProviderRepository} from "../providers/_ProviderRepository";
|
||||
// @ts-expect-error - is only available in certain build contexts
|
||||
// eslint-disable-next-line import/no-unresolved
|
||||
import { DurableObject } from 'cloudflare:workers';
|
||||
|
||||
import { ProviderRepository } from '../providers/_ProviderRepository';
|
||||
|
||||
export default class ServerCoordinator extends DurableObject {
|
||||
env;
|
||||
@@ -12,7 +15,6 @@ export default class ServerCoordinator extends DurableObject {
|
||||
|
||||
// Public method to calculate dynamic max tokens
|
||||
async dynamicMaxTokens(model, input, maxOuputTokens) {
|
||||
|
||||
const modelMeta = ProviderRepository.getModelMeta(model, this.env);
|
||||
|
||||
// The token‑limit information is stored in three different keys:
|
||||
@@ -20,11 +22,11 @@ export default class ServerCoordinator extends DurableObject {
|
||||
// context_window
|
||||
// context_length
|
||||
|
||||
if('max_completion_tokens' in modelMeta) {
|
||||
if ('max_completion_tokens' in modelMeta) {
|
||||
return modelMeta.max_completion_tokens;
|
||||
} else if('context_window' in modelMeta) {
|
||||
} else if ('context_window' in modelMeta) {
|
||||
return modelMeta.context_window;
|
||||
} else if('context_length' in modelMeta) {
|
||||
} else if ('context_length' in modelMeta) {
|
||||
return modelMeta.context_length;
|
||||
} else {
|
||||
return 8096;
|
||||
@@ -33,9 +35,7 @@ export default class ServerCoordinator extends DurableObject {
|
||||
|
||||
// Public method to retrieve conversation history
|
||||
async getConversationHistory(conversationId) {
|
||||
const history = await this.env.KV_STORAGE.get(
|
||||
`conversations:${conversationId}`,
|
||||
);
|
||||
const history = await this.env.KV_STORAGE.get(`conversations:${conversationId}`);
|
||||
|
||||
return JSON.parse(history) || [];
|
||||
}
|
||||
@@ -44,18 +44,15 @@ export default class ServerCoordinator extends DurableObject {
|
||||
async saveConversationHistory(conversationId, message) {
|
||||
const history = await this.getConversationHistory(conversationId);
|
||||
history.push(message);
|
||||
await this.env.KV_STORAGE.put(
|
||||
`conversations:${conversationId}`,
|
||||
JSON.stringify(history),
|
||||
);
|
||||
await this.env.KV_STORAGE.put(`conversations:${conversationId}`, JSON.stringify(history));
|
||||
}
|
||||
|
||||
async saveStreamData(streamId, data, ttl = 10) {
|
||||
const expirationTimestamp = Date.now() + ttl * 1000;
|
||||
// await this.state.storage.put(streamId, { data, expirationTimestamp });
|
||||
await this.env.KV_STORAGE.put(
|
||||
`streams:${streamId}`,
|
||||
JSON.stringify({ data, expirationTimestamp }),
|
||||
`streams:${streamId}`,
|
||||
JSON.stringify({ data, expirationTimestamp }),
|
||||
);
|
||||
}
|
||||
|
||||
|
@@ -1,74 +1,71 @@
|
||||
import {BunSqliteKVNamespace} from "../storage/BunSqliteKVNamespace";
|
||||
|
||||
import { BunSqliteKVNamespace } from '../storage/BunSqliteKVNamespace';
|
||||
|
||||
class BunDurableObject {
|
||||
state;
|
||||
env;
|
||||
state;
|
||||
env;
|
||||
|
||||
constructor(state, env) {
|
||||
this.state = state;
|
||||
this.env = env;
|
||||
}
|
||||
constructor(state, env) {
|
||||
this.state = state;
|
||||
this.env = env;
|
||||
}
|
||||
|
||||
public static idFromName(name: string) {
|
||||
return name.split("~")[1];
|
||||
}
|
||||
public static idFromName(name: string) {
|
||||
return name.split('~')[1];
|
||||
}
|
||||
|
||||
public static get(objectId) {
|
||||
const env = getEnvForObjectId(objectId, this.env);
|
||||
const state = {};
|
||||
return new SiteCoordinator(state, env)
|
||||
}
|
||||
public static get(objectId) {
|
||||
const env = getEnvForObjectId(objectId, this.env);
|
||||
const state = {};
|
||||
return new SiteCoordinator(state, env);
|
||||
}
|
||||
}
|
||||
|
||||
type ObjectId = string;
|
||||
|
||||
function getEnvForObjectId(objectId: ObjectId, env: any): any {
|
||||
return {
|
||||
...env,
|
||||
KV_STORAGE: new BunSqliteKVNamespace()
|
||||
}
|
||||
return {
|
||||
...env,
|
||||
KV_STORAGE: new BunSqliteKVNamespace(),
|
||||
};
|
||||
}
|
||||
|
||||
export default class SiteCoordinator extends BunDurableObject {
|
||||
state;
|
||||
env;
|
||||
constructor(state: any, env: any) {
|
||||
super(state, env);
|
||||
this.state = state;
|
||||
this.env = env;
|
||||
state;
|
||||
env;
|
||||
constructor(state: any, env: any) {
|
||||
super(state, env);
|
||||
this.state = state;
|
||||
this.env = env;
|
||||
}
|
||||
|
||||
async dynamicMaxTokens(input: any, maxOuputTokens: any) {
|
||||
return 2000;
|
||||
}
|
||||
|
||||
async saveStreamData(streamId: string, data: any, ttl = 10) {
|
||||
const expirationTimestamp = Date.now() + ttl * 1000;
|
||||
await this.env.KV_STORAGE.put(
|
||||
`streams:${streamId}`,
|
||||
JSON.stringify({ data, expirationTimestamp }),
|
||||
);
|
||||
}
|
||||
|
||||
async getStreamData(streamId: string) {
|
||||
const streamEntry = await this.env.KV_STORAGE.get(`streams:${streamId}`);
|
||||
if (!streamEntry) {
|
||||
return null;
|
||||
}
|
||||
|
||||
async dynamicMaxTokens(input: any, maxOuputTokens: any) {
|
||||
return 2000;
|
||||
const { data, expirationTimestamp } = JSON.parse(streamEntry);
|
||||
if (Date.now() > expirationTimestamp) {
|
||||
await this.deleteStreamData(`streams:${streamId}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
|
||||
async saveStreamData(streamId: string, data: any, ttl = 10) {
|
||||
const expirationTimestamp = Date.now() + ttl * 1000;
|
||||
await this.env.KV_STORAGE.put(
|
||||
`streams:${streamId}`,
|
||||
JSON.stringify({ data, expirationTimestamp }),
|
||||
);
|
||||
}
|
||||
|
||||
async getStreamData(streamId: string) {
|
||||
const streamEntry = await this.env.KV_STORAGE.get(`streams:${streamId}`);
|
||||
if (!streamEntry) {
|
||||
return null;
|
||||
}
|
||||
|
||||
const { data, expirationTimestamp } = JSON.parse(streamEntry);
|
||||
if (Date.now() > expirationTimestamp) {
|
||||
await this.deleteStreamData(`streams:${streamId}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
async deleteStreamData(streamId: string) {
|
||||
await this.env.KV_STORAGE.delete(`streams:${streamId}`);
|
||||
}
|
||||
async deleteStreamData(streamId: string) {
|
||||
await this.env.KV_STORAGE.delete(`streams:${streamId}`);
|
||||
}
|
||||
}
|
||||
|
@@ -1,5 +1,5 @@
|
||||
import {createRouter} from "./api-router.ts";
|
||||
import { createRouter } from './api-router.ts';
|
||||
|
||||
export default {
|
||||
Router: createRouter
|
||||
}
|
||||
Router: createRouter,
|
||||
};
|
||||
|
@@ -1,4 +1,5 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
|
||||
import { AssistantSdk } from '../assistant-sdk.ts';
|
||||
import { Utils } from '../utils.ts';
|
||||
|
||||
@@ -6,17 +7,17 @@ import { Utils } from '../utils.ts';
|
||||
vi.mock('../utils', () => ({
|
||||
Utils: {
|
||||
selectEquitably: vi.fn(),
|
||||
getCurrentDate: vi.fn()
|
||||
}
|
||||
getCurrentDate: vi.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock('../prompts/few_shots', () => ({
|
||||
default: {
|
||||
'a': 'A1',
|
||||
'question1': 'answer1',
|
||||
'question2': 'answer2',
|
||||
'question3': 'answer3'
|
||||
}
|
||||
a: 'A1',
|
||||
question1: 'answer1',
|
||||
question2: 'answer2',
|
||||
question3: 'answer3',
|
||||
},
|
||||
}));
|
||||
|
||||
describe('AssistantSdk', () => {
|
||||
@@ -37,63 +38,62 @@ describe('AssistantSdk', () => {
|
||||
it('should return a prompt with default values when minimal params are provided', () => {
|
||||
// Mock dependencies
|
||||
vi.mocked(Utils.selectEquitably).mockReturnValue({
|
||||
'question1': 'answer1',
|
||||
'question2': 'answer2'
|
||||
question1: 'answer1',
|
||||
question2: 'answer2',
|
||||
});
|
||||
vi.mocked(Utils.getCurrentDate).mockReturnValue('2023-01-01T12:30:45Z');
|
||||
|
||||
const prompt = AssistantSdk.getAssistantPrompt({});
|
||||
|
||||
expect(prompt).toContain('# Assistant Knowledge');
|
||||
expect(prompt).toContain('2023-01-01');
|
||||
expect(prompt).toContain('- **Web Host**: geoff.seemueller.io');
|
||||
expect(prompt).toContain('- **User Location**: Unknown');
|
||||
expect(prompt).toContain('- **Timezone**: UTC');
|
||||
expect(prompt).not.toContain('- **Response Limit**:');
|
||||
expect(prompt).toContain('### Date: ');
|
||||
expect(prompt).toContain('### Web Host: ');
|
||||
expect(prompt).toContain('### User Location: ');
|
||||
expect(prompt).toContain('### Timezone: ');
|
||||
});
|
||||
|
||||
it('should include maxTokens when provided', () => {
|
||||
// Mock dependencies
|
||||
vi.mocked(Utils.selectEquitably).mockReturnValue({
|
||||
'question1': 'answer1',
|
||||
'question2': 'answer2'
|
||||
question1: 'answer1',
|
||||
question2: 'answer2',
|
||||
});
|
||||
vi.mocked(Utils.getCurrentDate).mockReturnValue('2023-01-01T12:30:45Z');
|
||||
|
||||
const prompt = AssistantSdk.getAssistantPrompt({ maxTokens: 1000 });
|
||||
|
||||
expect(prompt).toContain('- **Response Limit**: 1000 tokens (maximum)');
|
||||
expect(prompt).toContain('Max Response Length: 1000 tokens (maximum)');
|
||||
});
|
||||
|
||||
it('should use provided userTimezone and userLocation', () => {
|
||||
// Mock dependencies
|
||||
vi.mocked(Utils.selectEquitably).mockReturnValue({
|
||||
'question1': 'answer1',
|
||||
'question2': 'answer2'
|
||||
question1: 'answer1',
|
||||
question2: 'answer2',
|
||||
});
|
||||
vi.mocked(Utils.getCurrentDate).mockReturnValue('2023-01-01T12:30:45Z');
|
||||
|
||||
const prompt = AssistantSdk.getAssistantPrompt({
|
||||
userTimezone: 'America/New_York',
|
||||
userLocation: 'New York, USA'
|
||||
userLocation: 'New York, USA',
|
||||
});
|
||||
|
||||
expect(prompt).toContain('- **User Location**: New York, USA');
|
||||
expect(prompt).toContain('- **Timezone**: America/New_York');
|
||||
expect(prompt).toContain('### User Location: New York, USA');
|
||||
expect(prompt).toContain('### Timezone: America/New_York');
|
||||
});
|
||||
|
||||
it('should use current date when Utils.getCurrentDate is not available', () => {
|
||||
// Mock dependencies
|
||||
vi.mocked(Utils.selectEquitably).mockReturnValue({
|
||||
'question1': 'answer1',
|
||||
'question2': 'answer2'
|
||||
question1: 'answer1',
|
||||
question2: 'answer2',
|
||||
});
|
||||
vi.mocked(Utils.getCurrentDate).mockReturnValue(undefined);
|
||||
|
||||
const prompt = AssistantSdk.getAssistantPrompt({});
|
||||
|
||||
// Instead of checking for a specific date, just verify that a date is included
|
||||
expect(prompt).toMatch(/- \*\*Date\*\*: \d{4}-\d{2}-\d{2} \d{1,2}:\d{2} \d{1,2}s/);
|
||||
expect(prompt).toMatch(/### Date: \d{4}-\d{2}-\d{2} \d{1,2}:\d{2} \d{1,2}s/);
|
||||
});
|
||||
|
||||
it('should use few_shots directly when Utils.selectEquitably is not available', () => {
|
||||
@@ -114,7 +114,7 @@ describe('AssistantSdk', () => {
|
||||
it('should format fewshots correctly', () => {
|
||||
const fewshots = {
|
||||
'What is the capital of France?': 'Paris is the capital of France.',
|
||||
'How do I make pasta?': 'Boil water, add pasta, cook until al dente.'
|
||||
'How do I make pasta?': 'Boil water, add pasta, cook until al dente.',
|
||||
};
|
||||
|
||||
const result = AssistantSdk.useFewshots(fewshots);
|
||||
@@ -129,12 +129,12 @@ describe('AssistantSdk', () => {
|
||||
|
||||
it('should respect the limit parameter', () => {
|
||||
const fewshots = {
|
||||
'Q1': 'A1',
|
||||
'Q2': 'A2',
|
||||
'Q3': 'A3',
|
||||
'Q4': 'A4',
|
||||
'Q5': 'A5',
|
||||
'Q6': 'A6'
|
||||
Q1: 'A1',
|
||||
Q2: 'A2',
|
||||
Q3: 'A3',
|
||||
Q4: 'A4',
|
||||
Q5: 'A5',
|
||||
Q6: 'A6',
|
||||
};
|
||||
|
||||
const result = AssistantSdk.useFewshots(fewshots, 3);
|
||||
|
@@ -1,26 +1,27 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { ChatSdk } from '../chat-sdk.ts';
|
||||
import { AssistantSdk } from '../assistant-sdk.ts';
|
||||
|
||||
import Message from '../../models/Message.ts';
|
||||
import { ProviderRepository } from '../../providers/_ProviderRepository';
|
||||
import { AssistantSdk } from '../assistant-sdk.ts';
|
||||
import { ChatSdk } from '../chat-sdk.ts';
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('../assistant-sdk', () => ({
|
||||
AssistantSdk: {
|
||||
getAssistantPrompt: vi.fn()
|
||||
}
|
||||
getAssistantPrompt: vi.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock('../../models/Message', () => ({
|
||||
default: {
|
||||
create: vi.fn((message) => message)
|
||||
}
|
||||
create: vi.fn(message => message),
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock('../../providers/_ProviderRepository', () => ({
|
||||
ProviderRepository: {
|
||||
getModelFamily: vi.fn()
|
||||
}
|
||||
getModelFamily: vi.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
describe('ChatSdk', () => {
|
||||
@@ -37,11 +38,11 @@ describe('ChatSdk', () => {
|
||||
|
||||
expect(Message.create).toHaveBeenCalledWith({
|
||||
role: 'assistant',
|
||||
content: ''
|
||||
content: '',
|
||||
});
|
||||
expect(result).toEqual({
|
||||
role: 'assistant',
|
||||
content: ''
|
||||
content: '',
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -49,7 +50,7 @@ describe('ChatSdk', () => {
|
||||
describe('handleChatRequest', () => {
|
||||
it('should return a 400 response if no messages are provided', async () => {
|
||||
const request = {
|
||||
json: vi.fn().mockResolvedValue({ messages: [] })
|
||||
json: vi.fn().mockResolvedValue({ messages: [] }),
|
||||
};
|
||||
const ctx = {
|
||||
openai: {},
|
||||
@@ -58,9 +59,9 @@ describe('ChatSdk', () => {
|
||||
env: {
|
||||
SERVER_COORDINATOR: {
|
||||
idFromName: vi.fn(),
|
||||
get: vi.fn()
|
||||
}
|
||||
}
|
||||
get: vi.fn(),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const response = await ChatSdk.handleChatRequest(request as any, ctx as any);
|
||||
@@ -72,7 +73,7 @@ describe('ChatSdk', () => {
|
||||
it('should save stream data and return a response with streamUrl', async () => {
|
||||
const streamId = 'test-uuid';
|
||||
vi.stubGlobal('crypto', {
|
||||
randomUUID: vi.fn().mockReturnValue(streamId)
|
||||
randomUUID: vi.fn().mockReturnValue(streamId),
|
||||
});
|
||||
|
||||
const messages = [{ role: 'user', content: 'Hello' }];
|
||||
@@ -80,12 +81,12 @@ describe('ChatSdk', () => {
|
||||
const conversationId = 'conv-123';
|
||||
|
||||
const request = {
|
||||
json: vi.fn().mockResolvedValue({ messages, model, conversationId })
|
||||
json: vi.fn().mockResolvedValue({ messages, model, conversationId }),
|
||||
};
|
||||
|
||||
const saveStreamData = vi.fn();
|
||||
const durableObject = {
|
||||
saveStreamData
|
||||
saveStreamData,
|
||||
};
|
||||
|
||||
const ctx = {
|
||||
@@ -95,9 +96,9 @@ describe('ChatSdk', () => {
|
||||
env: {
|
||||
SERVER_COORDINATOR: {
|
||||
idFromName: vi.fn().mockReturnValue('object-id'),
|
||||
get: vi.fn().mockReturnValue(durableObject)
|
||||
}
|
||||
}
|
||||
get: vi.fn().mockReturnValue(durableObject),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const response = await ChatSdk.handleChatRequest(request as any, ctx as any);
|
||||
@@ -105,12 +106,9 @@ describe('ChatSdk', () => {
|
||||
|
||||
expect(ctx.env.SERVER_COORDINATOR.idFromName).toHaveBeenCalledWith('stream-index');
|
||||
expect(ctx.env.SERVER_COORDINATOR.get).toHaveBeenCalledWith('object-id');
|
||||
expect(saveStreamData).toHaveBeenCalledWith(
|
||||
streamId,
|
||||
expect.stringContaining(model)
|
||||
);
|
||||
expect(saveStreamData).toHaveBeenCalledWith(streamId, expect.stringContaining(model));
|
||||
expect(responseBody).toEqual({
|
||||
streamUrl: `/api/streams/${streamId}`
|
||||
streamUrl: `/api/streams/${streamId}`,
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -120,7 +118,7 @@ describe('ChatSdk', () => {
|
||||
const messages = [{ role: 'user', content: 'Hello' }];
|
||||
const dynamicMaxTokens = vi.fn().mockResolvedValue(500);
|
||||
const durableObject = {
|
||||
dynamicMaxTokens
|
||||
dynamicMaxTokens,
|
||||
};
|
||||
|
||||
const ctx = {
|
||||
@@ -128,9 +126,9 @@ describe('ChatSdk', () => {
|
||||
env: {
|
||||
SERVER_COORDINATOR: {
|
||||
idFromName: vi.fn().mockReturnValue('object-id'),
|
||||
get: vi.fn().mockReturnValue(durableObject)
|
||||
}
|
||||
}
|
||||
get: vi.fn().mockReturnValue(durableObject),
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
await ChatSdk.calculateMaxTokens(messages, ctx as any);
|
||||
@@ -150,7 +148,7 @@ describe('ChatSdk', () => {
|
||||
expect(AssistantSdk.getAssistantPrompt).toHaveBeenCalledWith({
|
||||
maxTokens: 1000,
|
||||
userTimezone: 'UTC',
|
||||
userLocation: 'USA/unknown'
|
||||
userLocation: 'USA/unknown',
|
||||
});
|
||||
expect(result).toBe('Assistant prompt');
|
||||
});
|
||||
@@ -160,15 +158,13 @@ describe('ChatSdk', () => {
|
||||
it('should build a message chain with system role for most models', async () => {
|
||||
vi.mocked(ProviderRepository.getModelFamily).mockResolvedValue('openai');
|
||||
|
||||
const messages = [
|
||||
{role: 'user', content: 'Hello'}
|
||||
];
|
||||
const messages = [{ role: 'user', content: 'Hello' }];
|
||||
|
||||
const opts = {
|
||||
systemPrompt: 'System prompt',
|
||||
assistantPrompt: 'Assistant prompt',
|
||||
toolResults: {role: 'tool', content: 'Tool result'},
|
||||
model: 'gpt-4'
|
||||
toolResults: { role: 'tool', content: 'Tool result' },
|
||||
model: 'gpt-4',
|
||||
};
|
||||
|
||||
const result = await ChatSdk.buildMessageChain(messages, opts as any);
|
||||
@@ -177,30 +173,28 @@ describe('ChatSdk', () => {
|
||||
expect(Message.create).toHaveBeenCalledTimes(3);
|
||||
expect(Message.create).toHaveBeenNthCalledWith(1, {
|
||||
role: 'system',
|
||||
content: 'System prompt'
|
||||
content: 'System prompt',
|
||||
});
|
||||
expect(Message.create).toHaveBeenNthCalledWith(2, {
|
||||
role: 'assistant',
|
||||
content: 'Assistant prompt'
|
||||
content: 'Assistant prompt',
|
||||
});
|
||||
expect(Message.create).toHaveBeenNthCalledWith(3, {
|
||||
role: 'user',
|
||||
content: 'Hello'
|
||||
content: 'Hello',
|
||||
});
|
||||
});
|
||||
|
||||
it('should build a message chain with assistant role for o1, gemma, claude, or google models', async () => {
|
||||
vi.mocked(ProviderRepository.getModelFamily).mockResolvedValue('claude');
|
||||
|
||||
const messages = [
|
||||
{ role: 'user', content: 'Hello' }
|
||||
];
|
||||
const messages = [{ role: 'user', content: 'Hello' }];
|
||||
|
||||
const opts = {
|
||||
systemPrompt: 'System prompt',
|
||||
assistantPrompt: 'Assistant prompt',
|
||||
toolResults: { role: 'tool', content: 'Tool result' },
|
||||
model: 'claude-3'
|
||||
model: 'claude-3',
|
||||
};
|
||||
|
||||
const result = await ChatSdk.buildMessageChain(messages, opts as any);
|
||||
@@ -209,7 +203,7 @@ describe('ChatSdk', () => {
|
||||
expect(Message.create).toHaveBeenCalledTimes(3);
|
||||
expect(Message.create).toHaveBeenNthCalledWith(1, {
|
||||
role: 'assistant',
|
||||
content: 'System prompt'
|
||||
content: 'System prompt',
|
||||
});
|
||||
});
|
||||
|
||||
@@ -220,14 +214,14 @@ describe('ChatSdk', () => {
|
||||
{ role: 'user', content: 'Hello' },
|
||||
{ role: 'user', content: '' },
|
||||
{ role: 'user', content: ' ' },
|
||||
{ role: 'user', content: 'World' }
|
||||
{ role: 'user', content: 'World' },
|
||||
];
|
||||
|
||||
const opts = {
|
||||
systemPrompt: 'System prompt',
|
||||
assistantPrompt: 'Assistant prompt',
|
||||
toolResults: { role: 'tool', content: 'Tool result' },
|
||||
model: 'gpt-4'
|
||||
model: 'gpt-4',
|
||||
};
|
||||
|
||||
const result = await ChatSdk.buildMessageChain(messages, opts as any);
|
||||
|
@@ -1,4 +1,5 @@
|
||||
import { describe, it, expect } from 'vitest';
|
||||
|
||||
import { Utils } from '../utils.ts';
|
||||
|
||||
describe('Debug Utils.getSeason', () => {
|
||||
|
@@ -1,13 +1,14 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
|
||||
import handleStreamData from '../handleStreamData.ts';
|
||||
|
||||
describe('handleStreamData', () => {
|
||||
// Setup mocks
|
||||
const mockController = {
|
||||
enqueue: vi.fn()
|
||||
enqueue: vi.fn(),
|
||||
};
|
||||
const mockEncoder = {
|
||||
encode: vi.fn((str) => str)
|
||||
encode: vi.fn(str => str),
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
@@ -16,75 +17,75 @@ describe('handleStreamData', () => {
|
||||
|
||||
it('should return early if data type is not "chat"', () => {
|
||||
const handler = handleStreamData(mockController as any, mockEncoder as any);
|
||||
|
||||
|
||||
handler({ type: 'not-chat', data: {} });
|
||||
|
||||
|
||||
expect(mockController.enqueue).not.toHaveBeenCalled();
|
||||
expect(mockEncoder.encode).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should return early if data is undefined', () => {
|
||||
const handler = handleStreamData(mockController as any, mockEncoder as any);
|
||||
|
||||
|
||||
handler(undefined as any);
|
||||
|
||||
|
||||
expect(mockController.enqueue).not.toHaveBeenCalled();
|
||||
expect(mockEncoder.encode).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should handle content_block_start type data', () => {
|
||||
const handler = handleStreamData(mockController as any, mockEncoder as any);
|
||||
|
||||
|
||||
const data = {
|
||||
type: 'chat',
|
||||
data: {
|
||||
type: 'content_block_start',
|
||||
content_block: {
|
||||
type: 'text',
|
||||
text: 'Hello world'
|
||||
}
|
||||
}
|
||||
text: 'Hello world',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
handler(data);
|
||||
|
||||
|
||||
expect(mockController.enqueue).toHaveBeenCalledTimes(1);
|
||||
expect(mockEncoder.encode).toHaveBeenCalledWith(expect.stringContaining('Hello world'));
|
||||
|
||||
|
||||
const encodedData = mockEncoder.encode.mock.calls[0][0];
|
||||
const parsedData = JSON.parse(encodedData.split('data: ')[1]);
|
||||
|
||||
|
||||
expect(parsedData.type).toBe('chat');
|
||||
expect(parsedData.data.choices[0].delta.content).toBe('Hello world');
|
||||
});
|
||||
|
||||
it('should handle delta.text type data', () => {
|
||||
const handler = handleStreamData(mockController as any, mockEncoder as any);
|
||||
|
||||
|
||||
const data = {
|
||||
type: 'chat',
|
||||
data: {
|
||||
delta: {
|
||||
text: 'Hello world'
|
||||
}
|
||||
}
|
||||
text: 'Hello world',
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
handler(data);
|
||||
|
||||
|
||||
expect(mockController.enqueue).toHaveBeenCalledTimes(1);
|
||||
expect(mockEncoder.encode).toHaveBeenCalledWith(expect.stringContaining('Hello world'));
|
||||
|
||||
|
||||
const encodedData = mockEncoder.encode.mock.calls[0][0];
|
||||
const parsedData = JSON.parse(encodedData.split('data: ')[1]);
|
||||
|
||||
|
||||
expect(parsedData.type).toBe('chat');
|
||||
expect(parsedData.data.choices[0].delta.content).toBe('Hello world');
|
||||
});
|
||||
|
||||
it('should handle choices[0].delta.content type data', () => {
|
||||
const handler = handleStreamData(mockController as any, mockEncoder as any);
|
||||
|
||||
|
||||
const data = {
|
||||
type: 'chat',
|
||||
data: {
|
||||
@@ -92,23 +93,23 @@ describe('handleStreamData', () => {
|
||||
{
|
||||
index: 0,
|
||||
delta: {
|
||||
content: 'Hello world'
|
||||
content: 'Hello world',
|
||||
},
|
||||
logprobs: null,
|
||||
finish_reason: null
|
||||
}
|
||||
]
|
||||
}
|
||||
finish_reason: null,
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
handler(data);
|
||||
|
||||
|
||||
expect(mockController.enqueue).toHaveBeenCalledTimes(1);
|
||||
expect(mockEncoder.encode).toHaveBeenCalledWith(expect.stringContaining('Hello world'));
|
||||
|
||||
|
||||
const encodedData = mockEncoder.encode.mock.calls[0][0];
|
||||
const parsedData = JSON.parse(encodedData.split('data: ')[1]);
|
||||
|
||||
|
||||
expect(parsedData.type).toBe('chat');
|
||||
expect(parsedData.data.choices[0].delta.content).toBe('Hello world');
|
||||
expect(parsedData.data.choices[0].finish_reason).toBe(null);
|
||||
@@ -116,7 +117,7 @@ describe('handleStreamData', () => {
|
||||
|
||||
it('should pass through data with choices but no delta.content', () => {
|
||||
const handler = handleStreamData(mockController as any, mockEncoder as any);
|
||||
|
||||
|
||||
const data = {
|
||||
type: 'chat',
|
||||
data: {
|
||||
@@ -125,64 +126,66 @@ describe('handleStreamData', () => {
|
||||
index: 0,
|
||||
delta: {},
|
||||
logprobs: null,
|
||||
finish_reason: 'stop'
|
||||
}
|
||||
]
|
||||
}
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
handler(data);
|
||||
|
||||
|
||||
expect(mockController.enqueue).toHaveBeenCalledTimes(1);
|
||||
expect(mockEncoder.encode).toHaveBeenCalledWith(expect.stringContaining('"finish_reason":"stop"'));
|
||||
expect(mockEncoder.encode).toHaveBeenCalledWith(
|
||||
expect.stringContaining('"finish_reason":"stop"'),
|
||||
);
|
||||
});
|
||||
|
||||
it('should return early for unrecognized data format', () => {
|
||||
const handler = handleStreamData(mockController as any, mockEncoder as any);
|
||||
|
||||
|
||||
const data = {
|
||||
type: 'chat',
|
||||
data: {
|
||||
// No recognized properties
|
||||
unrecognized: 'property'
|
||||
}
|
||||
unrecognized: 'property',
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
handler(data);
|
||||
|
||||
|
||||
expect(mockController.enqueue).not.toHaveBeenCalled();
|
||||
expect(mockEncoder.encode).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should use custom transform function if provided', () => {
|
||||
const handler = handleStreamData(mockController as any, mockEncoder as any);
|
||||
|
||||
|
||||
const data = {
|
||||
type: 'chat',
|
||||
data: {
|
||||
original: 'data'
|
||||
}
|
||||
original: 'data',
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
const transformFn = vi.fn().mockReturnValue({
|
||||
type: 'chat',
|
||||
data: {
|
||||
choices: [
|
||||
{
|
||||
delta: {
|
||||
content: 'Transformed content'
|
||||
content: 'Transformed content',
|
||||
},
|
||||
logprobs: null,
|
||||
finish_reason: null
|
||||
}
|
||||
]
|
||||
}
|
||||
finish_reason: null,
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
handler(data, transformFn);
|
||||
|
||||
|
||||
expect(transformFn).toHaveBeenCalledWith(data);
|
||||
expect(mockController.enqueue).toHaveBeenCalledTimes(1);
|
||||
expect(mockEncoder.encode).toHaveBeenCalledWith(expect.stringContaining('Transformed content'));
|
||||
});
|
||||
});
|
||||
});
|
||||
|
@@ -1,4 +1,5 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
|
||||
import { Utils } from '../utils.ts';
|
||||
|
||||
describe('Utils', () => {
|
||||
@@ -44,8 +45,8 @@ describe('Utils', () => {
|
||||
// Mock Intl.DateTimeFormat
|
||||
global.Intl.DateTimeFormat = vi.fn().mockReturnValue({
|
||||
resolvedOptions: vi.fn().mockReturnValue({
|
||||
timeZone: 'America/New_York'
|
||||
})
|
||||
timeZone: 'America/New_York',
|
||||
}),
|
||||
});
|
||||
});
|
||||
|
||||
@@ -102,10 +103,10 @@ describe('Utils', () => {
|
||||
|
||||
it('should select items equitably from multiple sources', () => {
|
||||
const sources = {
|
||||
a: { 'key1': 'value1', 'key2': 'value2' },
|
||||
b: { 'key3': 'value3', 'key4': 'value4' },
|
||||
c: { 'key5': 'value5', 'key6': 'value6' },
|
||||
d: { 'key7': 'value7', 'key8': 'value8' }
|
||||
a: { key1: 'value1', key2: 'value2' },
|
||||
b: { key3: 'value3', key4: 'value4' },
|
||||
c: { key5: 'value5', key6: 'value6' },
|
||||
d: { key7: 'value7', key8: 'value8' },
|
||||
};
|
||||
|
||||
const result = Utils.selectEquitably(sources, 4);
|
||||
@@ -117,10 +118,10 @@ describe('Utils', () => {
|
||||
|
||||
it('should handle itemCount greater than available items', () => {
|
||||
const sources = {
|
||||
a: { 'key1': 'value1' },
|
||||
b: { 'key2': 'value2' },
|
||||
a: { key1: 'value1' },
|
||||
b: { key2: 'value2' },
|
||||
c: {},
|
||||
d: {}
|
||||
d: {},
|
||||
};
|
||||
|
||||
const result = Utils.selectEquitably(sources, 5);
|
||||
@@ -135,7 +136,7 @@ describe('Utils', () => {
|
||||
a: {},
|
||||
b: {},
|
||||
c: {},
|
||||
d: {}
|
||||
d: {},
|
||||
};
|
||||
|
||||
const result = Utils.selectEquitably(sources, 5);
|
||||
@@ -148,7 +149,7 @@ describe('Utils', () => {
|
||||
it('should insert blank messages to maintain user/assistant alternation', () => {
|
||||
const messages = [
|
||||
{ role: 'user', content: 'Hello' },
|
||||
{ role: 'user', content: 'How are you?' }
|
||||
{ role: 'user', content: 'How are you?' },
|
||||
];
|
||||
|
||||
const result = Utils.normalizeWithBlanks(messages);
|
||||
@@ -160,9 +161,7 @@ describe('Utils', () => {
|
||||
});
|
||||
|
||||
it('should insert blank user message if first message is assistant', () => {
|
||||
const messages = [
|
||||
{ role: 'assistant', content: 'Hello, how can I help?' }
|
||||
];
|
||||
const messages = [{ role: 'assistant', content: 'Hello, how can I help?' }];
|
||||
|
||||
const result = Utils.normalizeWithBlanks(messages);
|
||||
|
||||
@@ -183,7 +182,7 @@ describe('Utils', () => {
|
||||
const messages = [
|
||||
{ role: 'user', content: 'Hello' },
|
||||
{ role: 'assistant', content: 'Hi there' },
|
||||
{ role: 'user', content: 'How are you?' }
|
||||
{ role: 'user', content: 'How are you?' },
|
||||
];
|
||||
|
||||
const result = Utils.normalizeWithBlanks(messages);
|
||||
|
@@ -1,5 +1,6 @@
|
||||
import { Utils } from "./utils";
|
||||
import few_shots from "../prompts/few_shots";
|
||||
import few_shots from '../prompts/few_shots';
|
||||
|
||||
import { Utils } from './utils';
|
||||
|
||||
export class AssistantSdk {
|
||||
static getAssistantPrompt(params: {
|
||||
@@ -7,11 +8,7 @@ export class AssistantSdk {
|
||||
userTimezone?: string;
|
||||
userLocation?: string;
|
||||
}): string {
|
||||
const {
|
||||
maxTokens,
|
||||
userTimezone = "UTC",
|
||||
userLocation = "",
|
||||
} = params;
|
||||
const { maxTokens, userTimezone = 'UTC', userLocation = '' } = params;
|
||||
// console.log('[DEBUG_LOG] few_shots:', JSON.stringify(few_shots));
|
||||
let selectedFewshots = Utils.selectEquitably?.(few_shots);
|
||||
// console.log('[DEBUG_LOG] selectedFewshots after Utils.selectEquitably:', JSON.stringify(selectedFewshots));
|
||||
@@ -20,18 +17,18 @@ export class AssistantSdk {
|
||||
// console.log('[DEBUG_LOG] selectedFewshots after fallback:', JSON.stringify(selectedFewshots));
|
||||
}
|
||||
const sdkDate = new Date().toISOString();
|
||||
const [currentDate] = sdkDate.includes("T") ? sdkDate.split("T") : [sdkDate];
|
||||
const [currentDate] = sdkDate.includes('T') ? sdkDate.split('T') : [sdkDate];
|
||||
const now = new Date();
|
||||
const formattedMinutes = String(now.getMinutes()).padStart(2, "0");
|
||||
const formattedMinutes = String(now.getMinutes()).padStart(2, '0');
|
||||
const currentTime = `${now.getHours()}:${formattedMinutes} ${now.getSeconds()}s`;
|
||||
|
||||
return `# Assistant Knowledge
|
||||
## Current Context
|
||||
### Date: ${currentDate} ${currentTime}
|
||||
### Web Host: open-gsio.seemueller.workers.dev
|
||||
${maxTokens ? `### Max Response Length: ${maxTokens} tokens (maximum)` : ""}
|
||||
${maxTokens ? `### Max Response Length: ${maxTokens} tokens (maximum)` : ''}
|
||||
### Lexicographical Format: Markdown
|
||||
### User Location: ${userLocation || "Unknown"}
|
||||
### User Location: ${userLocation || 'Unknown'}
|
||||
### Timezone: ${userTimezone}
|
||||
## Response Framework
|
||||
1. Use knowledge provided in the current context as the primary source of truth.
|
||||
@@ -51,11 +48,9 @@ Continuously monitor the evolving conversation. Dynamically adapt each response.
|
||||
static useFewshots(fewshots: Record<string, string>, limit = 5): string {
|
||||
return Object.entries(fewshots)
|
||||
.slice(0, limit)
|
||||
.map(
|
||||
([q, a], i) => {
|
||||
return `#### Example ${i + 1}\n**Human**: ${q}\n**Assistant**: ${a}`
|
||||
}
|
||||
)
|
||||
.join("\n---\n");
|
||||
.map(([q, a], i) => {
|
||||
return `#### Example ${i + 1}\n**Human**: ${q}\n**Assistant**: ${a}`;
|
||||
})
|
||||
.join('\n---\n');
|
||||
}
|
||||
}
|
||||
|
@@ -1,135 +1,131 @@
|
||||
import {OpenAI} from "openai";
|
||||
import Message from "../models/Message.ts";
|
||||
import {AssistantSdk} from "./assistant-sdk.ts";
|
||||
import type {Instance} from "mobx-state-tree";
|
||||
import {ProviderRepository} from "../providers/_ProviderRepository";
|
||||
import type { Instance } from 'mobx-state-tree';
|
||||
import { OpenAI } from 'openai';
|
||||
|
||||
import Message from '../models/Message.ts';
|
||||
import { ProviderRepository } from '../providers/_ProviderRepository';
|
||||
|
||||
import { AssistantSdk } from './assistant-sdk.ts';
|
||||
|
||||
export class ChatSdk {
|
||||
static async preprocess({
|
||||
messages,
|
||||
}) {
|
||||
// run processing on messages to generate events/context
|
||||
return Message.create({
|
||||
role: "assistant",
|
||||
content: "",
|
||||
});
|
||||
static async preprocess({ messages }) {
|
||||
// run processing on messages to generate events/context
|
||||
return Message.create({
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
});
|
||||
}
|
||||
|
||||
static async handleChatRequest(
|
||||
request: Request,
|
||||
ctx: {
|
||||
openai: OpenAI;
|
||||
systemPrompt: any;
|
||||
maxTokens: any;
|
||||
env: Env;
|
||||
},
|
||||
) {
|
||||
const streamId = crypto.randomUUID();
|
||||
const { messages, model, conversationId } = await request.json();
|
||||
|
||||
if (!messages?.length) {
|
||||
return new Response('No messages provided', { status: 400 });
|
||||
}
|
||||
|
||||
static async handleChatRequest(
|
||||
request: Request,
|
||||
ctx: {
|
||||
openai: OpenAI;
|
||||
systemPrompt: any;
|
||||
maxTokens: any;
|
||||
env: Env;
|
||||
const preprocessedContext = await ChatSdk.preprocess({
|
||||
messages,
|
||||
});
|
||||
// console.log(ctx.env)
|
||||
// console.log(ctx.env.SERVER_COORDINATOR);
|
||||
|
||||
const objectId = ctx.env.SERVER_COORDINATOR.idFromName('stream-index');
|
||||
const durableObject = ctx.env.SERVER_COORDINATOR.get(objectId);
|
||||
|
||||
await durableObject.saveStreamData(
|
||||
streamId,
|
||||
JSON.stringify({
|
||||
messages,
|
||||
model,
|
||||
conversationId,
|
||||
timestamp: Date.now(),
|
||||
systemPrompt: ctx.systemPrompt,
|
||||
preprocessedContext,
|
||||
}),
|
||||
);
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
streamUrl: `/api/streams/${streamId}`,
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
) {
|
||||
const streamId = crypto.randomUUID();
|
||||
const {messages, model, conversationId} =
|
||||
await request.json();
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
if (!messages?.length) {
|
||||
return new Response("No messages provided", {status: 400});
|
||||
}
|
||||
static async calculateMaxTokens(
|
||||
messages: any[],
|
||||
ctx: Record<string, any> & {
|
||||
env: Env;
|
||||
maxTokens: number;
|
||||
},
|
||||
) {
|
||||
const objectId = ctx.env.SERVER_COORDINATOR.idFromName('dynamic-token-counter');
|
||||
const durableObject = ctx.env.SERVER_COORDINATOR.get(objectId);
|
||||
return durableObject.dynamicMaxTokens(messages, ctx.maxTokens);
|
||||
}
|
||||
|
||||
const preprocessedContext = await ChatSdk.preprocess({
|
||||
messages,
|
||||
});
|
||||
// console.log(ctx.env)
|
||||
// console.log(ctx.env.SERVER_COORDINATOR);
|
||||
static buildAssistantPrompt({ maxTokens }) {
|
||||
return AssistantSdk.getAssistantPrompt({
|
||||
maxTokens,
|
||||
userTimezone: 'UTC',
|
||||
userLocation: 'USA/unknown',
|
||||
});
|
||||
}
|
||||
|
||||
const objectId = ctx.env.SERVER_COORDINATOR.idFromName("stream-index");
|
||||
const durableObject = ctx.env.SERVER_COORDINATOR.get(objectId);
|
||||
static async buildMessageChain(
|
||||
messages: any[],
|
||||
opts: {
|
||||
systemPrompt: any;
|
||||
assistantPrompt: string;
|
||||
toolResults: Instance<typeof Message>;
|
||||
model: any;
|
||||
env: Env;
|
||||
},
|
||||
) {
|
||||
const modelFamily = await ProviderRepository.getModelFamily(opts.model, opts.env);
|
||||
|
||||
const messagesToSend = [];
|
||||
|
||||
await durableObject.saveStreamData(
|
||||
streamId,
|
||||
JSON.stringify({
|
||||
messages,
|
||||
model,
|
||||
conversationId,
|
||||
timestamp: Date.now(),
|
||||
systemPrompt: ctx.systemPrompt,
|
||||
preprocessedContext
|
||||
}),
|
||||
);
|
||||
messagesToSend.push(
|
||||
Message.create({
|
||||
role:
|
||||
opts.model.includes('o1') ||
|
||||
opts.model.includes('gemma') ||
|
||||
modelFamily === 'claude' ||
|
||||
modelFamily === 'google'
|
||||
? 'assistant'
|
||||
: 'system',
|
||||
content: opts.systemPrompt.trim(),
|
||||
}),
|
||||
);
|
||||
|
||||
return new Response(
|
||||
JSON.stringify({
|
||||
streamUrl: `/api/streams/${streamId}`,
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
},
|
||||
);
|
||||
}
|
||||
messagesToSend.push(
|
||||
Message.create({
|
||||
role: 'assistant',
|
||||
content: opts.assistantPrompt.trim(),
|
||||
}),
|
||||
);
|
||||
|
||||
static async calculateMaxTokens(
|
||||
messages: any[],
|
||||
ctx: Record<string, any> & {
|
||||
env: Env;
|
||||
maxTokens: number;
|
||||
},
|
||||
) {
|
||||
const objectId = ctx.env.SERVER_COORDINATOR.idFromName(
|
||||
"dynamic-token-counter",
|
||||
);
|
||||
const durableObject = ctx.env.SERVER_COORDINATOR.get(objectId);
|
||||
return durableObject.dynamicMaxTokens(messages, ctx.maxTokens);
|
||||
}
|
||||
messagesToSend.push(
|
||||
...messages
|
||||
.filter((message: any) => message.content?.trim())
|
||||
.map((message: any) => Message.create(message)),
|
||||
);
|
||||
|
||||
static buildAssistantPrompt({maxTokens}) {
|
||||
return AssistantSdk.getAssistantPrompt({
|
||||
maxTokens,
|
||||
userTimezone: "UTC",
|
||||
userLocation: "USA/unknown",
|
||||
});
|
||||
}
|
||||
|
||||
static async buildMessageChain(
|
||||
messages: any[],
|
||||
opts: {
|
||||
systemPrompt: any;
|
||||
assistantPrompt: string;
|
||||
toolResults: Instance<typeof Message>;
|
||||
model: any;
|
||||
env: Env;
|
||||
},
|
||||
) {
|
||||
const modelFamily = await ProviderRepository.getModelFamily(opts.model, opts.env)
|
||||
|
||||
const messagesToSend = [];
|
||||
|
||||
messagesToSend.push(
|
||||
Message.create({
|
||||
role:
|
||||
opts.model.includes("o1") ||
|
||||
opts.model.includes("gemma") ||
|
||||
modelFamily === "claude" ||
|
||||
modelFamily === "google"
|
||||
? "assistant"
|
||||
: "system",
|
||||
content: opts.systemPrompt.trim(),
|
||||
}),
|
||||
);
|
||||
|
||||
messagesToSend.push(
|
||||
Message.create({
|
||||
role: "assistant",
|
||||
content: opts.assistantPrompt.trim(),
|
||||
}),
|
||||
);
|
||||
|
||||
messagesToSend.push(
|
||||
...messages
|
||||
.filter((message: any) => message.content?.trim())
|
||||
.map((message: any) => Message.create(message)),
|
||||
);
|
||||
|
||||
return messagesToSend;
|
||||
}
|
||||
return messagesToSend;
|
||||
}
|
||||
}
|
||||
|
||||
export default ChatSdk;
|
||||
|
@@ -22,15 +22,9 @@ interface StreamResponse {
|
||||
};
|
||||
}
|
||||
|
||||
const handleStreamData = (
|
||||
controller: ReadableStreamDefaultController,
|
||||
encoder: TextEncoder,
|
||||
) => {
|
||||
return (
|
||||
data: StreamResponse,
|
||||
transformFn?: (data: StreamResponse) => StreamResponse,
|
||||
) => {
|
||||
if (!data?.type || data.type !== "chat") {
|
||||
const handleStreamData = (controller: ReadableStreamDefaultController, encoder: TextEncoder) => {
|
||||
return (data: StreamResponse, transformFn?: (data: StreamResponse) => StreamResponse) => {
|
||||
if (!data?.type || data.type !== 'chat') {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -39,17 +33,14 @@ const handleStreamData = (
|
||||
if (transformFn) {
|
||||
transformedData = transformFn(data);
|
||||
} else {
|
||||
if (
|
||||
data.data.type === "content_block_start" &&
|
||||
data.data.content_block?.type === "text"
|
||||
) {
|
||||
if (data.data.type === 'content_block_start' && data.data.content_block?.type === 'text') {
|
||||
transformedData = {
|
||||
type: "chat",
|
||||
type: 'chat',
|
||||
data: {
|
||||
choices: [
|
||||
{
|
||||
delta: {
|
||||
content: data.data.content_block.text || "",
|
||||
content: data.data.content_block.text || '',
|
||||
},
|
||||
logprobs: null,
|
||||
finish_reason: null,
|
||||
@@ -59,7 +50,7 @@ const handleStreamData = (
|
||||
};
|
||||
} else if (data.data.delta?.text) {
|
||||
transformedData = {
|
||||
type: "chat",
|
||||
type: 'chat',
|
||||
data: {
|
||||
choices: [
|
||||
{
|
||||
@@ -74,7 +65,7 @@ const handleStreamData = (
|
||||
};
|
||||
} else if (data.data.choices?.[0]?.delta?.content) {
|
||||
transformedData = {
|
||||
type: "chat",
|
||||
type: 'chat',
|
||||
data: {
|
||||
choices: [
|
||||
{
|
||||
@@ -95,9 +86,7 @@ const handleStreamData = (
|
||||
}
|
||||
}
|
||||
|
||||
controller.enqueue(
|
||||
encoder.encode(`data: ${JSON.stringify(transformedData)}\n\n`),
|
||||
);
|
||||
controller.enqueue(encoder.encode(`data: ${JSON.stringify(transformedData)}\n\n`));
|
||||
};
|
||||
};
|
||||
|
||||
|
@@ -1,20 +1,17 @@
|
||||
export class Utils {
|
||||
static getSeason(date: string): string {
|
||||
const hemispheres = {
|
||||
Northern: ["Winter", "Spring", "Summer", "Autumn"],
|
||||
Southern: ["Summer", "Autumn", "Winter", "Spring"],
|
||||
Northern: ['Winter', 'Spring', 'Summer', 'Autumn'],
|
||||
Southern: ['Summer', 'Autumn', 'Winter', 'Spring'],
|
||||
};
|
||||
const d = new Date(date);
|
||||
const month = d.getMonth();
|
||||
const day = d.getDate();
|
||||
const hemisphere = "Northern";
|
||||
const hemisphere = 'Northern';
|
||||
|
||||
if (month < 2 || (month === 2 && day <= 20) || month === 11)
|
||||
return hemispheres[hemisphere][0];
|
||||
if (month < 5 || (month === 5 && day <= 21))
|
||||
return hemispheres[hemisphere][1];
|
||||
if (month < 8 || (month === 8 && day <= 22))
|
||||
return hemispheres[hemisphere][2];
|
||||
if (month < 2 || (month === 2 && day <= 20) || month === 11) return hemispheres[hemisphere][0];
|
||||
if (month < 5 || (month === 5 && day <= 21)) return hemispheres[hemisphere][1];
|
||||
if (month < 8 || (month === 8 && day <= 22)) return hemispheres[hemisphere][2];
|
||||
return hemispheres[hemisphere][3];
|
||||
}
|
||||
static getTimezone(timezone) {
|
||||
@@ -30,7 +27,7 @@ export class Utils {
|
||||
|
||||
static isAssetUrl(url) {
|
||||
const { pathname } = new URL(url);
|
||||
return pathname.startsWith("/assets/");
|
||||
return pathname.startsWith('/assets/');
|
||||
}
|
||||
|
||||
static selectEquitably({ a, b, c, d }, itemCount = 9) {
|
||||
@@ -39,9 +36,7 @@ export class Utils {
|
||||
|
||||
let combinedItems = [];
|
||||
sources.forEach((source, index) => {
|
||||
combinedItems.push(
|
||||
...Object.keys(source).map((key) => ({ source: index, key })),
|
||||
);
|
||||
combinedItems.push(...Object.keys(source).map(key => ({ source: index, key })));
|
||||
});
|
||||
|
||||
combinedItems = combinedItems.sort(() => Math.random() - 0.5);
|
||||
@@ -60,37 +55,35 @@ export class Utils {
|
||||
return result;
|
||||
}
|
||||
|
||||
static normalizeWithBlanks<T extends Normalize.ChatMessage>(msgs: T[]): T[] {
|
||||
static normalizeWithBlanks<T extends NormalizeChatMessage>(msgs: T[]): T[] {
|
||||
const out: T[] = [];
|
||||
|
||||
// In local mode first turn expected to be user.
|
||||
let expected: Normalize.Role = "user";
|
||||
let expected: NormalizeRole = 'user';
|
||||
|
||||
for (const m of msgs) {
|
||||
while (m.role !== expected) {
|
||||
// Insert blanks to match expected sequence user/assistant/user...
|
||||
out.push(Normalize.makeBlank(expected) as T);
|
||||
expected = expected === "user" ? "assistant" : "user";
|
||||
out.push(makeNormalizeBlank(expected) as T);
|
||||
expected = expected === 'user' ? 'assistant' : 'user';
|
||||
}
|
||||
|
||||
out.push(m);
|
||||
expected = expected === "user" ? "assistant" : "user";
|
||||
expected = expected === 'user' ? 'assistant' : 'user';
|
||||
}
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
module Normalize {
|
||||
export type Role = "user" | "assistant";
|
||||
// Normalize module exports
|
||||
export type NormalizeRole = 'user' | 'assistant';
|
||||
|
||||
export interface ChatMessage extends Record<any, any> {
|
||||
role: Role;
|
||||
}
|
||||
|
||||
export const makeBlank = (role: Role): ChatMessage => ({
|
||||
role,
|
||||
content: ""
|
||||
});
|
||||
export interface NormalizeChatMessage extends Record<any, any> {
|
||||
role: NormalizeRole;
|
||||
}
|
||||
|
||||
export const makeNormalizeBlank = (role: NormalizeRole): NormalizeChatMessage => ({
|
||||
role,
|
||||
content: '',
|
||||
});
|
||||
|
@@ -1,6 +1,6 @@
|
||||
import { types } from "mobx-state-tree";
|
||||
import { types } from 'mobx-state-tree';
|
||||
|
||||
export default types.model("ContactRecord", {
|
||||
export default types.model('ContactRecord', {
|
||||
message: types.string,
|
||||
timestamp: types.string,
|
||||
email: types.string,
|
||||
|
@@ -1,10 +1,10 @@
|
||||
// FeedbackRecord.ts
|
||||
import { types } from "mobx-state-tree";
|
||||
import { types } from 'mobx-state-tree';
|
||||
|
||||
const FeedbackRecord = types.model("FeedbackRecord", {
|
||||
const FeedbackRecord = types.model('FeedbackRecord', {
|
||||
feedback: types.string,
|
||||
timestamp: types.string,
|
||||
user: types.optional(types.string, "Anonymous"),
|
||||
user: types.optional(types.string, 'Anonymous'),
|
||||
});
|
||||
|
||||
export default FeedbackRecord;
|
||||
|
@@ -1,12 +1,12 @@
|
||||
// Base Message
|
||||
import { type Instance, types } from "mobx-state-tree";
|
||||
import { type Instance, types } from 'mobx-state-tree';
|
||||
|
||||
export default types
|
||||
.model("Message", {
|
||||
.model('Message', {
|
||||
content: types.string,
|
||||
role: types.enumeration(["user", "assistant", "system"]),
|
||||
role: types.enumeration(['user', 'assistant', 'system']),
|
||||
})
|
||||
.actions((self) => ({
|
||||
.actions(self => ({
|
||||
setContent(newContent: string) {
|
||||
self.content = newContent;
|
||||
},
|
||||
|
@@ -1,8 +1,8 @@
|
||||
import { types } from "mobx-state-tree";
|
||||
import { types } from 'mobx-state-tree';
|
||||
|
||||
export default types
|
||||
.model("O1Message", {
|
||||
role: types.enumeration(["user", "assistant", "system"]),
|
||||
.model('O1Message', {
|
||||
role: types.enumeration(['user', 'assistant', 'system']),
|
||||
content: types.array(
|
||||
types.model({
|
||||
type: types.string,
|
||||
@@ -10,11 +10,11 @@ export default types
|
||||
}),
|
||||
),
|
||||
})
|
||||
.actions((self) => ({
|
||||
setContent(newContent: string, contentType: string = "text") {
|
||||
.actions(self => ({
|
||||
setContent(newContent: string, contentType: string = 'text') {
|
||||
self.content = [{ type: contentType, text: newContent }];
|
||||
},
|
||||
append(newContent: string, contentType: string = "text") {
|
||||
append(newContent: string, contentType: string = 'text') {
|
||||
self.content.push({ type: contentType, text: newContent });
|
||||
},
|
||||
}));
|
||||
|
@@ -1,12 +1,12 @@
|
||||
// Models
|
||||
import { types } from "mobx-state-tree";
|
||||
import { types } from 'mobx-state-tree';
|
||||
|
||||
export default types
|
||||
.model("Message", {
|
||||
.model('Message', {
|
||||
content: types.string,
|
||||
role: types.enumeration(["user", "assistant", "system"]),
|
||||
role: types.enumeration(['user', 'assistant', 'system']),
|
||||
})
|
||||
.actions((self) => ({
|
||||
.actions(self => ({
|
||||
setContent(newContent: string) {
|
||||
self.content = newContent;
|
||||
},
|
||||
|
@@ -1,11 +1,10 @@
|
||||
/* eslint-disable no-useless-escape */
|
||||
const a = {
|
||||
"Can you search for the latest quantum computing breakthroughs?":
|
||||
"I don't have web access.",
|
||||
'Can you search for the latest quantum computing breakthroughs?': "I don't have web access.",
|
||||
};
|
||||
|
||||
const b = {
|
||||
"Calculate the number of unique ways to arrange the letters in the word SUCCESS.":
|
||||
`Alright, let's tackle 'SUCCESS' and see just how unique it can get!
|
||||
'Calculate the number of unique ways to arrange the letters in the word SUCCESS.': `Alright, let's tackle 'SUCCESS' and see just how unique it can get!
|
||||
### Step 1: Count the Letters
|
||||
The word 'SUCCESS' has 7 letters, but it’s no free-for-all—there are repeats!
|
||||
- S: 3 times
|
||||
@@ -27,9 +26,7 @@ $$
|
||||
### Step 4: Reveal the Result
|
||||
There are 420 unique arrangements of 'SUCCESS'. Mathematically satisfying, isn’t it? It’s as if every version of success gets its own unique permutation!`,
|
||||
|
||||
|
||||
"Calculate the number of unique ways to arrange the letters in the word MISSISSIPPI.":
|
||||
`Ready for some permutation wizardry? Let’s decode 'MISSISSIPPI'!
|
||||
'Calculate the number of unique ways to arrange the letters in the word MISSISSIPPI.': `Ready for some permutation wizardry? Let’s decode 'MISSISSIPPI'!
|
||||
### Step 1: Count the Letters
|
||||
'MISSISSIPPI' has 11 letters. But like a social network with cliques, some letters appear in multiples:
|
||||
- M: 1
|
||||
@@ -60,12 +57,12 @@ There are 34,650 unique arrangements of 'MISSISSIPPI'.`,
|
||||
};
|
||||
|
||||
const c = {
|
||||
"Render a Greek statue with warm marble tones and realistic proportions.":
|
||||
"I don't have the ability to generate images right now. ",
|
||||
'Render a Greek statue with warm marble tones and realistic proportions.':
|
||||
"I don't have the ability to generate images right now. ",
|
||||
};
|
||||
|
||||
const d = {
|
||||
"Demonstrate markdown formatted text.": `
|
||||
'Demonstrate markdown formatted text.': `
|
||||
## Headers
|
||||
~~~markdown
|
||||
# Large Header
|
||||
@@ -117,4 +114,4 @@ c = \\\\pm\\\\sqrt{a^2 + b^2}
|
||||
$$`,
|
||||
};
|
||||
|
||||
export default {a, b, c, d};
|
||||
export default { a, b, c, d };
|
||||
|
@@ -1,83 +1,86 @@
|
||||
export type GenericEnv = Record<string, any>;
|
||||
|
||||
|
||||
export class ProviderRepository {
|
||||
#providers: {name: string, key: string, endpoint: string}[] = [];
|
||||
#env: Record<string, any>;
|
||||
#providers: { name: string; key: string; endpoint: string }[] = [];
|
||||
#env: Record<string, any>;
|
||||
|
||||
constructor(env: GenericEnv) {
|
||||
this.#env = env
|
||||
this.setProviders(env);
|
||||
}
|
||||
constructor(env: GenericEnv) {
|
||||
this.#env = env;
|
||||
this.setProviders(env);
|
||||
}
|
||||
|
||||
static OPENAI_COMPAT_ENDPOINTS = {
|
||||
xai: 'https://api.x.ai/v1',
|
||||
groq: 'https://api.groq.com/openai/v1',
|
||||
google: 'https://generativelanguage.googleapis.com/v1beta/openai',
|
||||
fireworks: 'https://api.fireworks.ai/inference/v1',
|
||||
cohere: 'https://api.cohere.ai/compatibility/v1',
|
||||
cloudflare: 'https://api.cloudflare.com/client/v4/accounts/{CLOUDFLARE_ACCOUNT_ID}/ai/v1',
|
||||
anthropic: 'https://api.anthropic.com/v1',
|
||||
openai: 'https://api.openai.com/v1',
|
||||
cerebras: 'https://api.cerebras.com/v1',
|
||||
ollama: "http://localhost:11434/v1",
|
||||
mlx: "http://localhost:10240/v1",
|
||||
}
|
||||
static OPENAI_COMPAT_ENDPOINTS = {
|
||||
xai: 'https://api.x.ai/v1',
|
||||
groq: 'https://api.groq.com/openai/v1',
|
||||
google: 'https://generativelanguage.googleapis.com/v1beta/openai',
|
||||
fireworks: 'https://api.fireworks.ai/inference/v1',
|
||||
cohere: 'https://api.cohere.ai/compatibility/v1',
|
||||
cloudflare: 'https://api.cloudflare.com/client/v4/accounts/{CLOUDFLARE_ACCOUNT_ID}/ai/v1',
|
||||
anthropic: 'https://api.anthropic.com/v1',
|
||||
openai: 'https://api.openai.com/v1',
|
||||
cerebras: 'https://api.cerebras.com/v1',
|
||||
ollama: 'http://localhost:11434/v1',
|
||||
mlx: 'http://localhost:10240/v1',
|
||||
};
|
||||
|
||||
static async getModelFamily(model: any, env: Env) {
|
||||
const allModels = await env.KV_STORAGE.get("supportedModels");
|
||||
const models = JSON.parse(allModels);
|
||||
const modelData = models.filter(m => m.id === model)
|
||||
return modelData[0].provider;
|
||||
}
|
||||
static async getModelFamily(model: any, env: Env) {
|
||||
const allModels = await env.KV_STORAGE.get('supportedModels');
|
||||
const models = JSON.parse(allModels);
|
||||
const modelData = models.filter(m => m.id === model);
|
||||
return modelData[0].provider;
|
||||
}
|
||||
|
||||
static async getModelMeta(meta, env) {
|
||||
const allModels = await env.KV_STORAGE.get("supportedModels");
|
||||
const models = JSON.parse(allModels);
|
||||
return models.filter(m => m.id === meta.model).pop()
|
||||
}
|
||||
static async getModelMeta(meta, env) {
|
||||
const allModels = await env.KV_STORAGE.get('supportedModels');
|
||||
const models = JSON.parse(allModels);
|
||||
return models.filter(m => m.id === meta.model).pop();
|
||||
}
|
||||
|
||||
getProviders(): {name: string, key: string, endpoint: string}[] {
|
||||
return this.#providers;
|
||||
}
|
||||
getProviders(): { name: string; key: string; endpoint: string }[] {
|
||||
return this.#providers;
|
||||
}
|
||||
|
||||
setProviders(env: Record<string, any>) {
|
||||
let envKeys = Object.keys(env);
|
||||
for (let i = 0; i < envKeys.length; i++) {
|
||||
if (envKeys[i].endsWith('KEY')) {
|
||||
const detectedProvider = envKeys[i].split('_')[0].toLowerCase();
|
||||
const detectedProviderValue = env[envKeys[i]];
|
||||
if(detectedProviderValue) {
|
||||
switch (detectedProvider) {
|
||||
case 'anthropic':
|
||||
this.#providers.push({
|
||||
name: 'anthropic',
|
||||
key: env.ANTHROPIC_API_KEY,
|
||||
endpoint: ProviderRepository.OPENAI_COMPAT_ENDPOINTS['anthropic']
|
||||
});
|
||||
break;
|
||||
case 'gemini':
|
||||
this.#providers.push({
|
||||
name: 'google',
|
||||
key: env.GEMINI_API_KEY,
|
||||
endpoint: ProviderRepository.OPENAI_COMPAT_ENDPOINTS['google']
|
||||
});
|
||||
break;
|
||||
case 'cloudflare':
|
||||
this.#providers.push({
|
||||
name: 'cloudflare',
|
||||
key: env.CLOUDFLARE_API_KEY,
|
||||
endpoint: ProviderRepository.OPENAI_COMPAT_ENDPOINTS[detectedProvider].replace("{CLOUDFLARE_ACCOUNT_ID}", env.CLOUDFLARE_ACCOUNT_ID)
|
||||
})
|
||||
default:
|
||||
this.#providers.push({
|
||||
name: detectedProvider,
|
||||
key: env[envKeys[i]],
|
||||
endpoint: ProviderRepository.OPENAI_COMPAT_ENDPOINTS[detectedProvider]
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
setProviders(env: Record<string, any>) {
|
||||
const envKeys = Object.keys(env);
|
||||
for (let i = 0; i < envKeys.length; i++) {
|
||||
if (envKeys[i].endsWith('KEY')) {
|
||||
const detectedProvider = envKeys[i].split('_')[0].toLowerCase();
|
||||
const detectedProviderValue = env[envKeys[i]];
|
||||
if (detectedProviderValue) {
|
||||
switch (detectedProvider) {
|
||||
case 'anthropic':
|
||||
this.#providers.push({
|
||||
name: 'anthropic',
|
||||
key: env.ANTHROPIC_API_KEY,
|
||||
endpoint: ProviderRepository.OPENAI_COMPAT_ENDPOINTS['anthropic'],
|
||||
});
|
||||
break;
|
||||
case 'gemini':
|
||||
this.#providers.push({
|
||||
name: 'google',
|
||||
key: env.GEMINI_API_KEY,
|
||||
endpoint: ProviderRepository.OPENAI_COMPAT_ENDPOINTS['google'],
|
||||
});
|
||||
break;
|
||||
case 'cloudflare':
|
||||
this.#providers.push({
|
||||
name: 'cloudflare',
|
||||
key: env.CLOUDFLARE_API_KEY,
|
||||
endpoint: ProviderRepository.OPENAI_COMPAT_ENDPOINTS[detectedProvider].replace(
|
||||
'{CLOUDFLARE_ACCOUNT_ID}',
|
||||
env.CLOUDFLARE_ACCOUNT_ID,
|
||||
),
|
||||
});
|
||||
break;
|
||||
default:
|
||||
this.#providers.push({
|
||||
name: detectedProvider,
|
||||
key: env[envKeys[i]],
|
||||
endpoint: ProviderRepository.OPENAI_COMPAT_ENDPOINTS[detectedProvider],
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,6 +1,11 @@
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import { BaseChatProvider, CommonProviderParams, ChatStreamProvider } from '../chat-stream-provider.ts';
|
||||
import { OpenAI } from 'openai';
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
|
||||
import {
|
||||
BaseChatProvider,
|
||||
CommonProviderParams,
|
||||
ChatStreamProvider,
|
||||
} from '../chat-stream-provider.ts';
|
||||
|
||||
// Create a concrete implementation of BaseChatProvider for testing
|
||||
class TestChatProvider extends BaseChatProvider {
|
||||
@@ -29,16 +34,16 @@ vi.mock('../../lib/chat-sdk', () => ({
|
||||
buildAssistantPrompt: vi.fn().mockReturnValue('Assistant prompt'),
|
||||
buildMessageChain: vi.fn().mockReturnValue([
|
||||
{ role: 'system', content: 'System prompt' },
|
||||
{ role: 'user', content: 'User message' }
|
||||
])
|
||||
}
|
||||
{ role: 'user', content: 'User message' },
|
||||
]),
|
||||
},
|
||||
}));
|
||||
|
||||
describe('ChatStreamProvider', () => {
|
||||
it('should define the required interface', () => {
|
||||
// Verify the interface has the required method
|
||||
const mockProvider: ChatStreamProvider = {
|
||||
handleStream: vi.fn()
|
||||
handleStream: vi.fn(),
|
||||
};
|
||||
|
||||
expect(mockProvider.handleStream).toBeDefined();
|
||||
|
@@ -1,6 +1,7 @@
|
||||
import {OpenAI} from "openai";
|
||||
import {BaseChatProvider, CommonProviderParams} from "./chat-stream-provider.ts";
|
||||
import {ProviderRepository} from "./_ProviderRepository";
|
||||
import { OpenAI } from 'openai';
|
||||
|
||||
import { ProviderRepository } from './_ProviderRepository';
|
||||
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
|
||||
|
||||
export class CerebrasChatProvider extends BaseChatProvider {
|
||||
getOpenAIClient(param: CommonProviderParams): OpenAI {
|
||||
@@ -11,8 +12,8 @@ export class CerebrasChatProvider extends BaseChatProvider {
|
||||
}
|
||||
|
||||
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
|
||||
// models provided by cerebras do not follow standard tune params
|
||||
// they must be individually configured
|
||||
// models provided by cerebras do not follow standard tune params
|
||||
// they must be individually configured
|
||||
// const tuningParams = {
|
||||
// temperature: 0.86,
|
||||
// top_p: 0.98,
|
||||
@@ -24,18 +25,18 @@ export class CerebrasChatProvider extends BaseChatProvider {
|
||||
return {
|
||||
model: param.model,
|
||||
messages: safeMessages,
|
||||
stream: true
|
||||
// ...tuningParams
|
||||
stream: true,
|
||||
// ...tuningParams
|
||||
};
|
||||
}
|
||||
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === 'stop') {
|
||||
dataCallback({ type: 'chat', data: chunk });
|
||||
return true;
|
||||
}
|
||||
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
dataCallback({ type: 'chat', data: chunk });
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@@ -1,5 +1,6 @@
|
||||
import { OpenAI } from "openai";
|
||||
import ChatSdk from "../lib/chat-sdk.ts";
|
||||
import { OpenAI } from 'openai';
|
||||
|
||||
import ChatSdk from '../lib/chat-sdk.ts';
|
||||
|
||||
export interface CommonProviderParams {
|
||||
openai?: OpenAI; // Optional for providers that use a custom client.
|
||||
@@ -14,10 +15,7 @@ export interface CommonProviderParams {
|
||||
}
|
||||
|
||||
export interface ChatStreamProvider {
|
||||
handleStream(
|
||||
param: CommonProviderParams,
|
||||
dataCallback: (data: any) => void,
|
||||
): Promise<any>;
|
||||
handleStream(param: CommonProviderParams, dataCallback: (data: any) => void): Promise<any>;
|
||||
}
|
||||
|
||||
export abstract class BaseChatProvider implements ChatStreamProvider {
|
||||
@@ -25,17 +23,14 @@ export abstract class BaseChatProvider implements ChatStreamProvider {
|
||||
abstract getStreamParams(param: CommonProviderParams, safeMessages: any[]): any;
|
||||
abstract async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean>;
|
||||
|
||||
async handleStream(
|
||||
param: CommonProviderParams,
|
||||
dataCallback: (data: any) => void,
|
||||
) {
|
||||
async handleStream(param: CommonProviderParams, dataCallback: (data: any) => void) {
|
||||
const assistantPrompt = ChatSdk.buildAssistantPrompt({ maxTokens: param.maxTokens });
|
||||
const safeMessages = await ChatSdk.buildMessageChain(param.messages, {
|
||||
systemPrompt: param.systemPrompt,
|
||||
model: param.model,
|
||||
assistantPrompt,
|
||||
toolResults: param.preprocessedContext,
|
||||
env: param.env
|
||||
env: param.env,
|
||||
});
|
||||
|
||||
const client = this.getOpenAIClient(param);
|
||||
@@ -47,4 +42,4 @@ export abstract class BaseChatProvider implements ChatStreamProvider {
|
||||
if (shouldBreak) break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,14 +1,16 @@
|
||||
import Anthropic from "@anthropic-ai/sdk";
|
||||
import {OpenAI} from "openai";
|
||||
import Anthropic from '@anthropic-ai/sdk';
|
||||
import {
|
||||
_NotCustomized,
|
||||
ISimpleType,
|
||||
ModelPropertiesDeclarationToProperties,
|
||||
ModelSnapshotType2,
|
||||
UnionStringArray,
|
||||
} from "mobx-state-tree";
|
||||
import ChatSdk from "../lib/chat-sdk.ts";
|
||||
import {BaseChatProvider, CommonProviderParams} from "./chat-stream-provider.ts";
|
||||
} from 'mobx-state-tree';
|
||||
import { OpenAI } from 'openai';
|
||||
|
||||
import ChatSdk from '../lib/chat-sdk.ts';
|
||||
|
||||
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
|
||||
|
||||
export class ClaudeChatProvider extends BaseChatProvider {
|
||||
private anthropic: Anthropic | null = null;
|
||||
@@ -33,20 +35,20 @@ export class ClaudeChatProvider extends BaseChatProvider {
|
||||
stream: true,
|
||||
model: param.model,
|
||||
messages: safeMessages,
|
||||
...claudeTuningParams
|
||||
...claudeTuningParams,
|
||||
};
|
||||
}
|
||||
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
if (chunk.type === "message_stop") {
|
||||
if (chunk.type === 'message_stop') {
|
||||
dataCallback({
|
||||
type: "chat",
|
||||
type: 'chat',
|
||||
data: {
|
||||
choices: [
|
||||
{
|
||||
delta: { content: "" },
|
||||
delta: { content: '' },
|
||||
logprobs: null,
|
||||
finish_reason: "stop",
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
},
|
||||
@@ -54,15 +56,12 @@ export class ClaudeChatProvider extends BaseChatProvider {
|
||||
return true;
|
||||
}
|
||||
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
dataCallback({ type: 'chat', data: chunk });
|
||||
return false;
|
||||
}
|
||||
|
||||
// Override the base handleStream method to use Anthropic client instead of OpenAI
|
||||
async handleStream(
|
||||
param: CommonProviderParams,
|
||||
dataCallback: (data: any) => void,
|
||||
) {
|
||||
async handleStream(param: CommonProviderParams, dataCallback: (data: any) => void) {
|
||||
const assistantPrompt = ChatSdk.buildAssistantPrompt({ maxTokens: param.maxTokens });
|
||||
const safeMessages = ChatSdk.buildMessageChain(param.messages, {
|
||||
systemPrompt: param.systemPrompt,
|
||||
@@ -75,7 +74,7 @@ export class ClaudeChatProvider extends BaseChatProvider {
|
||||
const streamParams = this.getStreamParams(param, safeMessages);
|
||||
|
||||
if (!this.anthropic) {
|
||||
throw new Error("Anthropic client not initialized");
|
||||
throw new Error('Anthropic client not initialized');
|
||||
}
|
||||
|
||||
const stream = await this.anthropic.messages.create(streamParams);
|
||||
|
@@ -1,12 +1,16 @@
|
||||
import {OpenAI} from "openai";
|
||||
import {BaseChatProvider, CommonProviderParams} from "./chat-stream-provider.ts";
|
||||
import {ProviderRepository} from "./_ProviderRepository";
|
||||
import { OpenAI } from 'openai';
|
||||
|
||||
import { ProviderRepository } from './_ProviderRepository';
|
||||
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
|
||||
|
||||
export class CloudflareAiChatProvider extends BaseChatProvider {
|
||||
getOpenAIClient(param: CommonProviderParams): OpenAI {
|
||||
return new OpenAI({
|
||||
apiKey: param.env.CLOUDFLARE_API_KEY,
|
||||
baseURL: ProviderRepository.OPENAI_COMPAT_ENDPOINTS.cloudflare.replace("{CLOUDFLARE_ACCOUNT_ID}", param.env.CLOUDFLARE_ACCOUNT_ID),
|
||||
baseURL: ProviderRepository.OPENAI_COMPAT_ENDPOINTS.cloudflare.replace(
|
||||
'{CLOUDFLARE_ACCOUNT_ID}',
|
||||
param.env.CLOUDFLARE_ACCOUNT_ID,
|
||||
),
|
||||
});
|
||||
}
|
||||
|
||||
@@ -18,36 +22,36 @@ export class CloudflareAiChatProvider extends BaseChatProvider {
|
||||
};
|
||||
|
||||
// Set max_tokens based on model
|
||||
if (this.getModelPrefix(param.model) === "@cf/meta") {
|
||||
generationParams["max_tokens"] = 4096;
|
||||
if (this.getModelPrefix(param.model) === '@cf/meta') {
|
||||
generationParams['max_tokens'] = 4096;
|
||||
}
|
||||
|
||||
if (this.getModelPrefix(param.model) === "@hf/mistral") {
|
||||
generationParams["max_tokens"] = 4096;
|
||||
if (this.getModelPrefix(param.model) === '@hf/mistral') {
|
||||
generationParams['max_tokens'] = 4096;
|
||||
}
|
||||
|
||||
if (param.model.toLowerCase().includes("hermes-2-pro-mistral-7b")) {
|
||||
generationParams["max_tokens"] = 1000;
|
||||
if (param.model.toLowerCase().includes('hermes-2-pro-mistral-7b')) {
|
||||
generationParams['max_tokens'] = 1000;
|
||||
}
|
||||
|
||||
if (param.model.toLowerCase().includes("openhermes-2.5-mistral-7b-awq")) {
|
||||
generationParams["max_tokens"] = 1000;
|
||||
if (param.model.toLowerCase().includes('openhermes-2.5-mistral-7b-awq')) {
|
||||
generationParams['max_tokens'] = 1000;
|
||||
}
|
||||
|
||||
if (param.model.toLowerCase().includes("deepseek-coder-6.7b-instruct-awq")) {
|
||||
generationParams["max_tokens"] = 590;
|
||||
if (param.model.toLowerCase().includes('deepseek-coder-6.7b-instruct-awq')) {
|
||||
generationParams['max_tokens'] = 590;
|
||||
}
|
||||
|
||||
if (param.model.toLowerCase().includes("deepseek-math-7b-instruct")) {
|
||||
generationParams["max_tokens"] = 512;
|
||||
if (param.model.toLowerCase().includes('deepseek-math-7b-instruct')) {
|
||||
generationParams['max_tokens'] = 512;
|
||||
}
|
||||
|
||||
if (param.model.toLowerCase().includes("neural-chat-7b-v3-1-awq")) {
|
||||
generationParams["max_tokens"] = 590;
|
||||
if (param.model.toLowerCase().includes('neural-chat-7b-v3-1-awq')) {
|
||||
generationParams['max_tokens'] = 590;
|
||||
}
|
||||
|
||||
if (param.model.toLowerCase().includes("openchat-3.5-0106")) {
|
||||
generationParams["max_tokens"] = 2000;
|
||||
if (param.model.toLowerCase().includes('openchat-3.5-0106')) {
|
||||
generationParams['max_tokens'] = 2000;
|
||||
}
|
||||
|
||||
return generationParams;
|
||||
@@ -56,38 +60,36 @@ export class CloudflareAiChatProvider extends BaseChatProvider {
|
||||
private getModelPrefix(model: string): string {
|
||||
let modelPrefix = `@cf/meta`;
|
||||
|
||||
if (model.toLowerCase().includes("llama")) {
|
||||
if (model.toLowerCase().includes('llama')) {
|
||||
modelPrefix = `@cf/meta`;
|
||||
}
|
||||
|
||||
if (model.toLowerCase().includes("hermes-2-pro-mistral-7b")) {
|
||||
if (model.toLowerCase().includes('hermes-2-pro-mistral-7b')) {
|
||||
modelPrefix = `@hf/nousresearch`;
|
||||
}
|
||||
|
||||
if (model.toLowerCase().includes("mistral-7b-instruct")) {
|
||||
if (model.toLowerCase().includes('mistral-7b-instruct')) {
|
||||
modelPrefix = `@hf/mistral`;
|
||||
}
|
||||
|
||||
if (model.toLowerCase().includes("gemma")) {
|
||||
if (model.toLowerCase().includes('gemma')) {
|
||||
modelPrefix = `@cf/google`;
|
||||
}
|
||||
|
||||
if (model.toLowerCase().includes("deepseek")) {
|
||||
if (model.toLowerCase().includes('deepseek')) {
|
||||
modelPrefix = `@cf/deepseek-ai`;
|
||||
}
|
||||
|
||||
if (model.toLowerCase().includes("openchat-3.5-0106")) {
|
||||
if (model.toLowerCase().includes('openchat-3.5-0106')) {
|
||||
modelPrefix = `@cf/openchat`;
|
||||
}
|
||||
|
||||
const isNueralChat = model
|
||||
.toLowerCase()
|
||||
.includes("neural-chat-7b-v3-1-awq");
|
||||
const isNueralChat = model.toLowerCase().includes('neural-chat-7b-v3-1-awq');
|
||||
if (
|
||||
isNueralChat ||
|
||||
model.toLowerCase().includes("openhermes-2.5-mistral-7b-awq") ||
|
||||
model.toLowerCase().includes("zephyr-7b-beta-awq") ||
|
||||
model.toLowerCase().includes("deepseek-coder-6.7b-instruct-awq")
|
||||
model.toLowerCase().includes('openhermes-2.5-mistral-7b-awq') ||
|
||||
model.toLowerCase().includes('zephyr-7b-beta-awq') ||
|
||||
model.toLowerCase().includes('deepseek-coder-6.7b-instruct-awq')
|
||||
) {
|
||||
modelPrefix = `@hf/thebloke`;
|
||||
}
|
||||
@@ -100,12 +102,12 @@ export class CloudflareAiChatProvider extends BaseChatProvider {
|
||||
}
|
||||
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === 'stop') {
|
||||
dataCallback({ type: 'chat', data: chunk });
|
||||
return true;
|
||||
}
|
||||
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
dataCallback({ type: 'chat', data: chunk });
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@@ -1,4 +1,3 @@
|
||||
import { OpenAI } from "openai";
|
||||
import {
|
||||
_NotCustomized,
|
||||
castToSnapshot,
|
||||
@@ -7,11 +6,14 @@ import {
|
||||
ModelPropertiesDeclarationToProperties,
|
||||
ModelSnapshotType2,
|
||||
UnionStringArray,
|
||||
} from "mobx-state-tree";
|
||||
import Message from "../models/Message.ts";
|
||||
import ChatSdk from "../lib/chat-sdk.ts";
|
||||
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
|
||||
import {ProviderRepository} from "./_ProviderRepository";
|
||||
} from 'mobx-state-tree';
|
||||
import { OpenAI } from 'openai';
|
||||
|
||||
import ChatSdk from '../lib/chat-sdk.ts';
|
||||
import Message from '../models/Message.ts';
|
||||
|
||||
import { ProviderRepository } from './_ProviderRepository';
|
||||
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
|
||||
|
||||
export class FireworksAiChatProvider extends BaseChatProvider {
|
||||
getOpenAIClient(param: CommonProviderParams): OpenAI {
|
||||
@@ -22,9 +24,9 @@ export class FireworksAiChatProvider extends BaseChatProvider {
|
||||
}
|
||||
|
||||
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
|
||||
let modelPrefix = "accounts/fireworks/models/";
|
||||
if (param.model.toLowerCase().includes("yi-")) {
|
||||
modelPrefix = "accounts/yi-01-ai/models/";
|
||||
let modelPrefix = 'accounts/fireworks/models/';
|
||||
if (param.model.toLowerCase().includes('yi-')) {
|
||||
modelPrefix = 'accounts/yi-01-ai/models/';
|
||||
}
|
||||
|
||||
return {
|
||||
@@ -35,12 +37,12 @@ export class FireworksAiChatProvider extends BaseChatProvider {
|
||||
}
|
||||
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === 'stop') {
|
||||
dataCallback({ type: 'chat', data: chunk });
|
||||
return true;
|
||||
}
|
||||
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
dataCallback({ type: 'chat', data: chunk });
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@@ -1,8 +1,10 @@
|
||||
import { OpenAI } from "openai";
|
||||
import ChatSdk from "../lib/chat-sdk.ts";
|
||||
import { StreamParams } from "../services/ChatService.ts";
|
||||
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
|
||||
import {ProviderRepository} from "./_ProviderRepository";
|
||||
import { OpenAI } from 'openai';
|
||||
|
||||
import ChatSdk from '../lib/chat-sdk.ts';
|
||||
import { StreamParams } from '../services/ChatService.ts';
|
||||
|
||||
import { ProviderRepository } from './_ProviderRepository';
|
||||
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
|
||||
|
||||
export class GoogleChatProvider extends BaseChatProvider {
|
||||
getOpenAIClient(param: CommonProviderParams): OpenAI {
|
||||
@@ -21,14 +23,14 @@ export class GoogleChatProvider extends BaseChatProvider {
|
||||
}
|
||||
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
if (chunk.choices?.[0]?.finish_reason === "stop") {
|
||||
if (chunk.choices?.[0]?.finish_reason === 'stop') {
|
||||
dataCallback({
|
||||
type: "chat",
|
||||
type: 'chat',
|
||||
data: {
|
||||
choices: [
|
||||
{
|
||||
delta: { content: chunk.choices[0].delta.content || "" },
|
||||
finish_reason: "stop",
|
||||
delta: { content: chunk.choices[0].delta.content || '' },
|
||||
finish_reason: 'stop',
|
||||
index: chunk.choices[0].index,
|
||||
},
|
||||
],
|
||||
@@ -37,11 +39,11 @@ export class GoogleChatProvider extends BaseChatProvider {
|
||||
return true;
|
||||
} else {
|
||||
dataCallback({
|
||||
type: "chat",
|
||||
type: 'chat',
|
||||
data: {
|
||||
choices: [
|
||||
{
|
||||
delta: { content: chunk.choices?.[0]?.delta?.content || "" },
|
||||
delta: { content: chunk.choices?.[0]?.delta?.content || '' },
|
||||
finish_reason: null,
|
||||
index: chunk.choices?.[0]?.index || 0,
|
||||
},
|
||||
@@ -56,10 +58,7 @@ export class GoogleChatProvider extends BaseChatProvider {
|
||||
export class GoogleChatSdk {
|
||||
private static provider = new GoogleChatProvider();
|
||||
|
||||
static async handleGoogleStream(
|
||||
param: StreamParams,
|
||||
dataCallback: (data) => void,
|
||||
) {
|
||||
static async handleGoogleStream(param: StreamParams, dataCallback: (data) => void) {
|
||||
return this.provider.handleStream(
|
||||
{
|
||||
systemPrompt: param.systemPrompt,
|
||||
|
@@ -1,13 +1,14 @@
|
||||
import { OpenAI } from "openai";
|
||||
import {
|
||||
_NotCustomized,
|
||||
ISimpleType,
|
||||
ModelPropertiesDeclarationToProperties,
|
||||
ModelSnapshotType2,
|
||||
UnionStringArray,
|
||||
} from "mobx-state-tree";
|
||||
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
|
||||
import {ProviderRepository} from "./_ProviderRepository";
|
||||
} from 'mobx-state-tree';
|
||||
import { OpenAI } from 'openai';
|
||||
|
||||
import { ProviderRepository } from './_ProviderRepository';
|
||||
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
|
||||
|
||||
export class GroqChatProvider extends BaseChatProvider {
|
||||
getOpenAIClient(param: CommonProviderParams): OpenAI {
|
||||
@@ -30,17 +31,17 @@ export class GroqChatProvider extends BaseChatProvider {
|
||||
model: param.model,
|
||||
messages: safeMessages,
|
||||
stream: true,
|
||||
...tuningParams
|
||||
...tuningParams,
|
||||
};
|
||||
}
|
||||
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === 'stop') {
|
||||
dataCallback({ type: 'chat', data: chunk });
|
||||
return true;
|
||||
}
|
||||
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
dataCallback({ type: 'chat', data: chunk });
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@@ -1,95 +1,97 @@
|
||||
import { OpenAI } from "openai";
|
||||
import { Utils } from "../lib/utils";
|
||||
import { ChatCompletionCreateParamsStreaming } from "openai/resources/chat/completions/completions";
|
||||
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider";
|
||||
import { OpenAI } from 'openai';
|
||||
import { ChatCompletionCreateParamsStreaming } from 'openai/resources/chat/completions/completions';
|
||||
|
||||
import { Utils } from '../lib/utils';
|
||||
|
||||
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider';
|
||||
|
||||
export class MlxOmniChatProvider extends BaseChatProvider {
|
||||
getOpenAIClient(param: CommonProviderParams): OpenAI {
|
||||
return new OpenAI({
|
||||
baseURL: "http://localhost:10240",
|
||||
apiKey: param.env.MLX_API_KEY,
|
||||
});
|
||||
getOpenAIClient(param: CommonProviderParams): OpenAI {
|
||||
return new OpenAI({
|
||||
baseURL: 'http://localhost:10240',
|
||||
apiKey: param.env.MLX_API_KEY,
|
||||
});
|
||||
}
|
||||
|
||||
getStreamParams(
|
||||
param: CommonProviderParams,
|
||||
safeMessages: any[],
|
||||
): ChatCompletionCreateParamsStreaming {
|
||||
const baseTuningParams = {
|
||||
temperature: 0.86,
|
||||
top_p: 0.98,
|
||||
presence_penalty: 0.1,
|
||||
frequency_penalty: 0.3,
|
||||
max_tokens: param.maxTokens as number,
|
||||
};
|
||||
|
||||
const getTuningParams = () => {
|
||||
return baseTuningParams;
|
||||
};
|
||||
|
||||
let completionRequest: ChatCompletionCreateParamsStreaming = {
|
||||
model: param.model,
|
||||
stream: true,
|
||||
messages: safeMessages,
|
||||
};
|
||||
|
||||
const client = this.getOpenAIClient(param);
|
||||
const isLocal = client.baseURL.includes('localhost');
|
||||
|
||||
if (isLocal) {
|
||||
completionRequest['messages'] = Utils.normalizeWithBlanks(safeMessages);
|
||||
completionRequest['stream_options'] = {
|
||||
include_usage: true,
|
||||
};
|
||||
} else {
|
||||
completionRequest = { ...completionRequest, ...getTuningParams() };
|
||||
}
|
||||
|
||||
getStreamParams(param: CommonProviderParams, safeMessages: any[]): ChatCompletionCreateParamsStreaming {
|
||||
const baseTuningParams = {
|
||||
temperature: 0.86,
|
||||
top_p: 0.98,
|
||||
presence_penalty: 0.1,
|
||||
frequency_penalty: 0.3,
|
||||
max_tokens: param.maxTokens as number,
|
||||
};
|
||||
return completionRequest;
|
||||
}
|
||||
|
||||
const getTuningParams = () => {
|
||||
return baseTuningParams;
|
||||
};
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
const isLocal = chunk.usage !== undefined;
|
||||
|
||||
let completionRequest: ChatCompletionCreateParamsStreaming = {
|
||||
model: param.model,
|
||||
stream: true,
|
||||
messages: safeMessages
|
||||
};
|
||||
|
||||
const client = this.getOpenAIClient(param);
|
||||
const isLocal = client.baseURL.includes("localhost");
|
||||
|
||||
if(isLocal) {
|
||||
completionRequest["messages"] = Utils.normalizeWithBlanks(safeMessages);
|
||||
completionRequest["stream_options"] = {
|
||||
include_usage: true
|
||||
};
|
||||
} else {
|
||||
completionRequest = {...completionRequest, ...getTuningParams()};
|
||||
}
|
||||
|
||||
return completionRequest;
|
||||
if (isLocal && chunk.usage) {
|
||||
dataCallback({
|
||||
type: 'chat',
|
||||
data: {
|
||||
choices: [
|
||||
{
|
||||
delta: { content: '' },
|
||||
logprobs: null,
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
return true; // Break the stream
|
||||
}
|
||||
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
const isLocal = chunk.usage !== undefined;
|
||||
|
||||
if (isLocal && chunk.usage) {
|
||||
dataCallback({
|
||||
type: "chat",
|
||||
data: {
|
||||
choices: [
|
||||
{
|
||||
delta: { content: "" },
|
||||
logprobs: null,
|
||||
finish_reason: "stop",
|
||||
},
|
||||
],
|
||||
},
|
||||
});
|
||||
return true; // Break the stream
|
||||
}
|
||||
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return false; // Continue the stream
|
||||
}
|
||||
dataCallback({ type: 'chat', data: chunk });
|
||||
return false; // Continue the stream
|
||||
}
|
||||
}
|
||||
|
||||
export class MlxOmniChatSdk {
|
||||
private static provider = new MlxOmniChatProvider();
|
||||
private static provider = new MlxOmniChatProvider();
|
||||
|
||||
static async handleMlxOmniStream(
|
||||
ctx: any,
|
||||
dataCallback: (data: any) => any,
|
||||
) {
|
||||
if (!ctx.messages?.length) {
|
||||
return new Response("No messages provided", { status: 400 });
|
||||
}
|
||||
|
||||
return this.provider.handleStream(
|
||||
{
|
||||
systemPrompt: ctx.systemPrompt,
|
||||
preprocessedContext: ctx.preprocessedContext,
|
||||
maxTokens: ctx.maxTokens,
|
||||
messages: Utils.normalizeWithBlanks(ctx.messages),
|
||||
model: ctx.model,
|
||||
env: ctx.env
|
||||
},
|
||||
dataCallback,
|
||||
);
|
||||
static async handleMlxOmniStream(ctx: any, dataCallback: (data: any) => any) {
|
||||
if (!ctx.messages?.length) {
|
||||
return new Response('No messages provided', { status: 400 });
|
||||
}
|
||||
|
||||
return this.provider.handleStream(
|
||||
{
|
||||
systemPrompt: ctx.systemPrompt,
|
||||
preprocessedContext: ctx.preprocessedContext,
|
||||
maxTokens: ctx.maxTokens,
|
||||
messages: Utils.normalizeWithBlanks(ctx.messages),
|
||||
model: ctx.model,
|
||||
env: ctx.env,
|
||||
},
|
||||
dataCallback,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -1,74 +1,75 @@
|
||||
import { OpenAI } from "openai";
|
||||
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
|
||||
import {ProviderRepository} from "./_ProviderRepository";
|
||||
import { OpenAI } from 'openai';
|
||||
|
||||
import { ProviderRepository } from './_ProviderRepository';
|
||||
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
|
||||
|
||||
export class OllamaChatProvider extends BaseChatProvider {
|
||||
getOpenAIClient(param: CommonProviderParams): OpenAI {
|
||||
return new OpenAI({
|
||||
baseURL: param.env.OLLAMA_API_ENDPOINT ?? ProviderRepository.OPENAI_COMPAT_ENDPOINTS.ollama ,
|
||||
apiKey: param.env.OLLAMA_API_KEY,
|
||||
});
|
||||
getOpenAIClient(param: CommonProviderParams): OpenAI {
|
||||
return new OpenAI({
|
||||
baseURL: param.env.OLLAMA_API_ENDPOINT ?? ProviderRepository.OPENAI_COMPAT_ENDPOINTS.ollama,
|
||||
apiKey: param.env.OLLAMA_API_KEY,
|
||||
});
|
||||
}
|
||||
|
||||
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
|
||||
const tuningParams = {
|
||||
temperature: 0.75,
|
||||
};
|
||||
|
||||
const getTuningParams = () => {
|
||||
return tuningParams;
|
||||
};
|
||||
|
||||
return {
|
||||
model: param.model,
|
||||
messages: safeMessages,
|
||||
stream: true,
|
||||
...getTuningParams(),
|
||||
};
|
||||
}
|
||||
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === 'stop') {
|
||||
dataCallback({ type: 'chat', data: chunk });
|
||||
return true;
|
||||
}
|
||||
|
||||
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
|
||||
const tuningParams = {
|
||||
temperature: 0.75,
|
||||
};
|
||||
|
||||
const getTuningParams = () => {
|
||||
return tuningParams;
|
||||
};
|
||||
|
||||
return {
|
||||
model: param.model,
|
||||
messages: safeMessages,
|
||||
stream: true,
|
||||
...getTuningParams(),
|
||||
};
|
||||
}
|
||||
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return true;
|
||||
}
|
||||
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return false;
|
||||
}
|
||||
dataCallback({ type: 'chat', data: chunk });
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export class OllamaChatSdk {
|
||||
private static provider = new OllamaChatProvider();
|
||||
private static provider = new OllamaChatProvider();
|
||||
|
||||
static async handleOllamaStream(
|
||||
ctx: {
|
||||
openai: OpenAI;
|
||||
systemPrompt: any;
|
||||
preprocessedContext: any;
|
||||
maxTokens: unknown | number | undefined;
|
||||
messages: any;
|
||||
disableWebhookGeneration: boolean;
|
||||
model: any;
|
||||
env: Env;
|
||||
},
|
||||
dataCallback: (data: any) => any,
|
||||
) {
|
||||
if (!ctx.messages?.length) {
|
||||
return new Response("No messages provided", { status: 400 });
|
||||
}
|
||||
|
||||
return this.provider.handleStream(
|
||||
{
|
||||
systemPrompt: ctx.systemPrompt,
|
||||
preprocessedContext: ctx.preprocessedContext,
|
||||
maxTokens: ctx.maxTokens,
|
||||
messages: ctx.messages,
|
||||
model: ctx.model,
|
||||
env: ctx.env,
|
||||
disableWebhookGeneration: ctx.disableWebhookGeneration,
|
||||
},
|
||||
dataCallback,
|
||||
);
|
||||
static async handleOllamaStream(
|
||||
ctx: {
|
||||
openai: OpenAI;
|
||||
systemPrompt: any;
|
||||
preprocessedContext: any;
|
||||
maxTokens: unknown | number | undefined;
|
||||
messages: any;
|
||||
disableWebhookGeneration: boolean;
|
||||
model: any;
|
||||
env: Env;
|
||||
},
|
||||
dataCallback: (data: any) => any,
|
||||
) {
|
||||
if (!ctx.messages?.length) {
|
||||
return new Response('No messages provided', { status: 400 });
|
||||
}
|
||||
|
||||
return this.provider.handleStream(
|
||||
{
|
||||
systemPrompt: ctx.systemPrompt,
|
||||
preprocessedContext: ctx.preprocessedContext,
|
||||
maxTokens: ctx.maxTokens,
|
||||
messages: ctx.messages,
|
||||
model: ctx.model,
|
||||
env: ctx.env,
|
||||
disableWebhookGeneration: ctx.disableWebhookGeneration,
|
||||
},
|
||||
dataCallback,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -1,16 +1,21 @@
|
||||
import { OpenAI } from "openai";
|
||||
import { Utils } from "../lib/utils.ts";
|
||||
import { ChatCompletionCreateParamsStreaming } from "openai/resources/chat/completions/completions";
|
||||
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
|
||||
import { OpenAI } from 'openai';
|
||||
import { ChatCompletionCreateParamsStreaming } from 'openai/resources/chat/completions/completions';
|
||||
|
||||
import { Utils } from '../lib/utils.ts';
|
||||
|
||||
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
|
||||
|
||||
export class OpenAiChatProvider extends BaseChatProvider {
|
||||
getOpenAIClient(param: CommonProviderParams): OpenAI {
|
||||
return param.openai as OpenAI;
|
||||
}
|
||||
|
||||
getStreamParams(param: CommonProviderParams, safeMessages: any[]): ChatCompletionCreateParamsStreaming {
|
||||
getStreamParams(
|
||||
param: CommonProviderParams,
|
||||
safeMessages: any[],
|
||||
): ChatCompletionCreateParamsStreaming {
|
||||
const isO1 = () => {
|
||||
if (param.model === "o1-preview" || param.model === "o1-mini") {
|
||||
if (param.model === 'o1-preview' || param.model === 'o1-mini') {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
@@ -27,8 +32,8 @@ export class OpenAiChatProvider extends BaseChatProvider {
|
||||
|
||||
const getTuningParams = () => {
|
||||
if (isO1()) {
|
||||
tuningParams["temperature"] = 1;
|
||||
tuningParams["max_completion_tokens"] = (param.maxTokens as number) + 10000;
|
||||
tuningParams['temperature'] = 1;
|
||||
tuningParams['max_completion_tokens'] = (param.maxTokens as number) + 10000;
|
||||
return tuningParams;
|
||||
}
|
||||
return gpt4oTuningParams;
|
||||
@@ -37,19 +42,19 @@ export class OpenAiChatProvider extends BaseChatProvider {
|
||||
let completionRequest: ChatCompletionCreateParamsStreaming = {
|
||||
model: param.model,
|
||||
stream: true,
|
||||
messages: safeMessages
|
||||
messages: safeMessages,
|
||||
};
|
||||
|
||||
const client = this.getOpenAIClient(param);
|
||||
const isLocal = client.baseURL.includes("localhost");
|
||||
const isLocal = client.baseURL.includes('localhost');
|
||||
|
||||
if(isLocal) {
|
||||
completionRequest["messages"] = Utils.normalizeWithBlanks(safeMessages);
|
||||
completionRequest["stream_options"] = {
|
||||
include_usage: true
|
||||
if (isLocal) {
|
||||
completionRequest['messages'] = Utils.normalizeWithBlanks(safeMessages);
|
||||
completionRequest['stream_options'] = {
|
||||
include_usage: true,
|
||||
};
|
||||
} else {
|
||||
completionRequest = {...completionRequest, ...getTuningParams()};
|
||||
completionRequest = { ...completionRequest, ...getTuningParams() };
|
||||
}
|
||||
|
||||
return completionRequest;
|
||||
@@ -60,13 +65,13 @@ export class OpenAiChatProvider extends BaseChatProvider {
|
||||
|
||||
if (isLocal && chunk.usage) {
|
||||
dataCallback({
|
||||
type: "chat",
|
||||
type: 'chat',
|
||||
data: {
|
||||
choices: [
|
||||
{
|
||||
delta: { content: "" },
|
||||
delta: { content: '' },
|
||||
logprobs: null,
|
||||
finish_reason: "stop",
|
||||
finish_reason: 'stop',
|
||||
},
|
||||
],
|
||||
},
|
||||
@@ -74,7 +79,7 @@ export class OpenAiChatProvider extends BaseChatProvider {
|
||||
return true; // Break the stream
|
||||
}
|
||||
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
dataCallback({ type: 'chat', data: chunk });
|
||||
return false; // Continue the stream
|
||||
}
|
||||
}
|
||||
@@ -95,7 +100,7 @@ export class OpenAiChatSdk {
|
||||
dataCallback: (data: any) => any,
|
||||
) {
|
||||
if (!ctx.messages?.length) {
|
||||
return new Response("No messages provided", { status: 400 });
|
||||
return new Response('No messages provided', { status: 400 });
|
||||
}
|
||||
|
||||
return this.provider.handleStream(
|
||||
|
@@ -1,73 +1,74 @@
|
||||
import { OpenAI } from "openai";
|
||||
import { BaseChatProvider, CommonProviderParams } from "./chat-stream-provider.ts";
|
||||
import { OpenAI } from 'openai';
|
||||
|
||||
import { BaseChatProvider, CommonProviderParams } from './chat-stream-provider.ts';
|
||||
|
||||
export class XaiChatProvider extends BaseChatProvider {
|
||||
getOpenAIClient(param: CommonProviderParams): OpenAI {
|
||||
return new OpenAI({
|
||||
baseURL: "https://api.x.ai/v1",
|
||||
apiKey: param.env.XAI_API_KEY,
|
||||
});
|
||||
getOpenAIClient(param: CommonProviderParams): OpenAI {
|
||||
return new OpenAI({
|
||||
baseURL: 'https://api.x.ai/v1',
|
||||
apiKey: param.env.XAI_API_KEY,
|
||||
});
|
||||
}
|
||||
|
||||
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
|
||||
const tuningParams = {
|
||||
temperature: 0.75,
|
||||
};
|
||||
|
||||
const getTuningParams = () => {
|
||||
return tuningParams;
|
||||
};
|
||||
|
||||
return {
|
||||
model: param.model,
|
||||
messages: safeMessages,
|
||||
stream: true,
|
||||
...getTuningParams(),
|
||||
};
|
||||
}
|
||||
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === 'stop') {
|
||||
dataCallback({ type: 'chat', data: chunk });
|
||||
return true;
|
||||
}
|
||||
|
||||
getStreamParams(param: CommonProviderParams, safeMessages: any[]): any {
|
||||
const tuningParams = {
|
||||
temperature: 0.75,
|
||||
};
|
||||
|
||||
const getTuningParams = () => {
|
||||
return tuningParams;
|
||||
};
|
||||
|
||||
return {
|
||||
model: param.model,
|
||||
messages: safeMessages,
|
||||
stream: true,
|
||||
...getTuningParams(),
|
||||
};
|
||||
}
|
||||
|
||||
async processChunk(chunk: any, dataCallback: (data: any) => void): Promise<boolean> {
|
||||
if (chunk.choices && chunk.choices[0]?.finish_reason === "stop") {
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return true;
|
||||
}
|
||||
|
||||
dataCallback({ type: "chat", data: chunk });
|
||||
return false;
|
||||
}
|
||||
dataCallback({ type: 'chat', data: chunk });
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export class XaiChatSdk {
|
||||
private static provider = new XaiChatProvider();
|
||||
private static provider = new XaiChatProvider();
|
||||
|
||||
static async handleXaiStream(
|
||||
ctx: {
|
||||
openai: OpenAI;
|
||||
systemPrompt: any;
|
||||
preprocessedContext: any;
|
||||
maxTokens: unknown | number | undefined;
|
||||
messages: any;
|
||||
disableWebhookGeneration: boolean;
|
||||
model: any;
|
||||
env: Env;
|
||||
},
|
||||
dataCallback: (data: any) => any,
|
||||
) {
|
||||
if (!ctx.messages?.length) {
|
||||
return new Response("No messages provided", { status: 400 });
|
||||
}
|
||||
|
||||
return this.provider.handleStream(
|
||||
{
|
||||
systemPrompt: ctx.systemPrompt,
|
||||
preprocessedContext: ctx.preprocessedContext,
|
||||
maxTokens: ctx.maxTokens,
|
||||
messages: ctx.messages,
|
||||
model: ctx.model,
|
||||
env: ctx.env,
|
||||
disableWebhookGeneration: ctx.disableWebhookGeneration,
|
||||
},
|
||||
dataCallback,
|
||||
);
|
||||
static async handleXaiStream(
|
||||
ctx: {
|
||||
openai: OpenAI;
|
||||
systemPrompt: any;
|
||||
preprocessedContext: any;
|
||||
maxTokens: unknown | number | undefined;
|
||||
messages: any;
|
||||
disableWebhookGeneration: boolean;
|
||||
model: any;
|
||||
env: Env;
|
||||
},
|
||||
dataCallback: (data: any) => any,
|
||||
) {
|
||||
if (!ctx.messages?.length) {
|
||||
return new Response('No messages provided', { status: 400 });
|
||||
}
|
||||
|
||||
return this.provider.handleStream(
|
||||
{
|
||||
systemPrompt: ctx.systemPrompt,
|
||||
preprocessedContext: ctx.preprocessedContext,
|
||||
maxTokens: ctx.maxTokens,
|
||||
messages: ctx.messages,
|
||||
model: ctx.model,
|
||||
env: ctx.env,
|
||||
disableWebhookGeneration: ctx.disableWebhookGeneration,
|
||||
},
|
||||
dataCallback,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@@ -1,103 +1,99 @@
|
||||
import {BunSqliteKVNamespace} from "./storage/BunSqliteKVNamespace";
|
||||
import {readdir} from 'node:fs/promises';
|
||||
import type { RequestLike } from "itty-router";
|
||||
import { readdir } from 'node:fs/promises';
|
||||
|
||||
import ServerCoordinator from "./durable-objects/ServerCoordinatorBun";
|
||||
import Server from ".";
|
||||
import { config } from 'dotenv';
|
||||
import type { RequestLike } from 'itty-router';
|
||||
|
||||
import {config} from "dotenv";
|
||||
import ServerCoordinator from './durable-objects/ServerCoordinatorBun';
|
||||
import { BunSqliteKVNamespace } from './storage/BunSqliteKVNamespace';
|
||||
|
||||
import Server from '.';
|
||||
|
||||
const router = Server.Router();
|
||||
|
||||
config({
|
||||
path: ".env",
|
||||
debug: true,
|
||||
// defaults: {
|
||||
// EVENTSOURCE_HOST: "https://eventsource.seemueller.io",
|
||||
// }
|
||||
})
|
||||
path: '.env',
|
||||
debug: true,
|
||||
// defaults: {
|
||||
// EVENTSOURCE_HOST: "https://eventsource.seemueller.io",
|
||||
// }
|
||||
});
|
||||
|
||||
export default {
|
||||
port: 3003,
|
||||
fetch: async (request: RequestLike, env: { [key: string]: any; }, ctx: any) =>{
|
||||
// console.log("[trace] request: ", request.method, request.url, "headers: ", request.headers.get("referer"), "body: ", request.body, "env: ", env, "ctx: ", ctx, "")
|
||||
port: 3003,
|
||||
fetch: async (request: RequestLike, env: { [key: string]: any }, ctx: any) => {
|
||||
// console.log("[trace] request: ", request.method, request.url, "headers: ", request.headers.get("referer"), "body: ", request.body, "env: ", env, "ctx: ", ctx, "")
|
||||
|
||||
env["SERVER_COORDINATOR"] = ServerCoordinator;
|
||||
env["ASSETS"] = assetHandler.ASSETS;
|
||||
env["EVENTSOURCE_HOST"] = process.env.EVENTSOURCE_HOST;
|
||||
env["GROQ_API_KEY"] = process.env.GROQ_API_KEY;
|
||||
env["ANTHROPIC_API_KEY"] = process.env.ANTHROPIC_API_KEY;
|
||||
env["FIREWORKS_API_KEY"] = process.env.FIREWORKS_API_KEY;
|
||||
env["XAI_API_KEY"] = process.env.XAI_API_KEY;
|
||||
env["CEREBRAS_API_KEY"] = process.env.CEREBRAS_API_KEY;
|
||||
env["CLOUDFLARE_API_KEY"] = process.env.CLOUDFLARE_API_KEY;
|
||||
env["CLOUDFLARE_ACCOUNT_ID"] = process.env.CLOUDFLARE_ACCOUNT_ID;
|
||||
env["MLX_API_KEY"] = process.env.MLX_API_KEY;
|
||||
env["OLLAMA_API_KEY"] = process.env.OLLAMA_API_KEY;
|
||||
env["KV_STORAGE"] = new BunSqliteKVNamespace({namespace: "open-gsio"});
|
||||
env['SERVER_COORDINATOR'] = ServerCoordinator;
|
||||
env['ASSETS'] = assetHandler.ASSETS;
|
||||
env['EVENTSOURCE_HOST'] = process.env.EVENTSOURCE_HOST;
|
||||
env['GROQ_API_KEY'] = process.env.GROQ_API_KEY;
|
||||
env['ANTHROPIC_API_KEY'] = process.env.ANTHROPIC_API_KEY;
|
||||
env['FIREWORKS_API_KEY'] = process.env.FIREWORKS_API_KEY;
|
||||
env['XAI_API_KEY'] = process.env.XAI_API_KEY;
|
||||
env['CEREBRAS_API_KEY'] = process.env.CEREBRAS_API_KEY;
|
||||
env['CLOUDFLARE_API_KEY'] = process.env.CLOUDFLARE_API_KEY;
|
||||
env['CLOUDFLARE_ACCOUNT_ID'] = process.env.CLOUDFLARE_ACCOUNT_ID;
|
||||
env['MLX_API_KEY'] = process.env.MLX_API_KEY;
|
||||
env['OLLAMA_API_KEY'] = process.env.OLLAMA_API_KEY;
|
||||
env['KV_STORAGE'] = new BunSqliteKVNamespace({ namespace: 'open-gsio' });
|
||||
|
||||
try {
|
||||
const controller = new AbortController();
|
||||
const timeout = new Promise((_, reject) =>
|
||||
setTimeout(() => {
|
||||
controller.abort();
|
||||
reject(new Error('Request timeout after 5s'));
|
||||
}, 5000),
|
||||
);
|
||||
|
||||
try {
|
||||
const controller = new AbortController();
|
||||
const timeout = new Promise((_, reject) =>
|
||||
setTimeout(() => {
|
||||
controller.abort();
|
||||
reject(new Error('Request timeout after 5s'));
|
||||
}, 5000)
|
||||
);
|
||||
|
||||
return await Promise.race([
|
||||
router.fetch(request, env, ctx),
|
||||
timeout
|
||||
]);
|
||||
} catch (e) {
|
||||
console.error("Error handling request:", e);
|
||||
return new Response("Server Error", { status: 500 });
|
||||
}
|
||||
|
||||
return await Promise.race([router.fetch(request, env, ctx), timeout]);
|
||||
} catch (e) {
|
||||
console.error('Error handling request:', e);
|
||||
return new Response('Server Error', { status: 500 });
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
export const assetHandler = {
|
||||
ASSETS: {
|
||||
/**
|
||||
* Fetches the requested static asset from local dist
|
||||
*
|
||||
* @param {Request} request - The incoming Fetch API Request object.
|
||||
* @returns {Promise<Response>} A Promise that resolves with the Response for the requested asset,
|
||||
* or a 404 Response if the asset is not found or an error occurs.
|
||||
*/
|
||||
async fetch(request: Request): Promise<Response> {
|
||||
// Serialize incoming request URL
|
||||
const originalUrl = new URL(request.url);
|
||||
const url = new URL(request.url);
|
||||
ASSETS: {
|
||||
/**
|
||||
* Fetches the requested static asset from local dist
|
||||
*
|
||||
* @param {Request} request - The incoming Fetch API Request object.
|
||||
* @returns {Promise<Response>} A Promise that resolves with the Response for the requested asset,
|
||||
* or a 404 Response if the asset is not found or an error occurs.
|
||||
*/
|
||||
async fetch(request: Request): Promise<Response> {
|
||||
// Serialize incoming request URL
|
||||
const originalUrl = new URL(request.url);
|
||||
const url = new URL(request.url);
|
||||
|
||||
// List all files in the public directory
|
||||
const PUBLIC_DIR = new URL('../client/public/', import.meta.url).pathname;
|
||||
const publicFiles = await readdir(PUBLIC_DIR, {recursive: true});
|
||||
// List all files in the public directory
|
||||
const PUBLIC_DIR = new URL('../client/public/', import.meta.url).pathname;
|
||||
const publicFiles = await readdir(PUBLIC_DIR, { recursive: true });
|
||||
|
||||
// Get the filename from pathname and remove any path traversal attempts
|
||||
const filename = url.pathname.split('/').pop()?.replace(/\.\./g, '') || '';
|
||||
// Get the filename from pathname and remove any path traversal attempts
|
||||
const filename = url.pathname.split('/').pop()?.replace(/\.\./g, '') || '';
|
||||
|
||||
const isStatic = publicFiles.some(file => file === filename);
|
||||
const isStatic = publicFiles.some(file => file === filename);
|
||||
|
||||
if (url.pathname === "/") {
|
||||
url.pathname = "/index.html";
|
||||
} else if (isStatic && !url.pathname.startsWith('/static')) {
|
||||
// leave it alone
|
||||
} else if (isStatic) {
|
||||
url.pathname = `/static${url.pathname}`;
|
||||
}
|
||||
if (url.pathname === '/') {
|
||||
url.pathname = '/index.html';
|
||||
} else if (isStatic && !url.pathname.startsWith('/static')) {
|
||||
// leave it alone
|
||||
} else if (isStatic) {
|
||||
url.pathname = `/static${url.pathname}`;
|
||||
}
|
||||
|
||||
const dist = new URL('../client/dist/client', import.meta.url).pathname;
|
||||
const dist = new URL('../client/dist/client', import.meta.url).pathname;
|
||||
|
||||
try {
|
||||
return new Response(Bun.file(`${dist}${url.pathname}`));
|
||||
} catch (error) {
|
||||
// Log the error with the original requested path
|
||||
console.error(`Error reading asset from path ${originalUrl.pathname}:`, error);
|
||||
return new Response(null, { status: 404 });
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
try {
|
||||
return new Response(Bun.file(`${dist}${url.pathname}`));
|
||||
} catch (error) {
|
||||
// Log the error with the original requested path
|
||||
console.error(`Error reading asset from path ${originalUrl.pathname}:`, error);
|
||||
return new Response(null, { status: 404 });
|
||||
}
|
||||
},
|
||||
},
|
||||
};
|
||||
|
@@ -1,24 +1,20 @@
|
||||
import { types } from "mobx-state-tree";
|
||||
import renderPage from "@open-gsio/client/server";
|
||||
import renderPage from '@open-gsio/client/server';
|
||||
import { types } from 'mobx-state-tree';
|
||||
|
||||
export default types
|
||||
.model("StaticAssetStore", {})
|
||||
.volatile((self) => ({
|
||||
.model('StaticAssetStore', {})
|
||||
.volatile(self => ({
|
||||
env: {} as Env,
|
||||
ctx: {} as ExecutionContext,
|
||||
}))
|
||||
.actions((self) => ({
|
||||
.actions(self => ({
|
||||
setEnv(env: Env) {
|
||||
self.env = env;
|
||||
},
|
||||
setCtx(ctx: ExecutionContext) {
|
||||
self.ctx = ctx;
|
||||
},
|
||||
async handleSsr(
|
||||
url: string,
|
||||
headers: Headers,
|
||||
env: Vike.PageContext.env,
|
||||
) {
|
||||
async handleSsr(url: string, headers: Headers, env: Vike.PageContext.env) {
|
||||
const pageContextInit = {
|
||||
urlOriginal: url,
|
||||
headersOriginal: headers,
|
||||
@@ -29,7 +25,6 @@ export default types
|
||||
const pageContext = await renderPage(pageContextInit);
|
||||
const { httpResponse } = pageContext;
|
||||
|
||||
|
||||
if (!httpResponse) {
|
||||
return null;
|
||||
} else {
|
||||
@@ -41,8 +36,8 @@ export default types
|
||||
try {
|
||||
return await env.ASSETS.fetch(request);
|
||||
} catch (error) {
|
||||
console.error("Error serving static asset:", error);
|
||||
return new Response("Asset not found", { status: 404 });
|
||||
console.error('Error serving static asset:', error);
|
||||
return new Response('Asset not found', { status: 404 });
|
||||
}
|
||||
},
|
||||
}));
|
||||
|
@@ -1,442 +1,440 @@
|
||||
import {flow, getSnapshot, types} from 'mobx-state-tree';
|
||||
/* eslint-disable no-irregular-whitespace */
|
||||
import { flow, getSnapshot, types } from 'mobx-state-tree';
|
||||
import OpenAI from 'openai';
|
||||
|
||||
import ChatSdk from '../lib/chat-sdk';
|
||||
import Message from "../models/Message";
|
||||
import O1Message from "../models/O1Message";
|
||||
import {OpenAiChatSdk} from "../providers/openai";
|
||||
import {GroqChatSdk} from "../providers/groq";
|
||||
import {ClaudeChatSdk} from "../providers/claude";
|
||||
import {FireworksAiChatSdk} from "../providers/fireworks";
|
||||
import handleStreamData from "../lib/handleStreamData";
|
||||
import {GoogleChatSdk} from "../providers/google";
|
||||
import {XaiChatSdk} from "../providers/xai";
|
||||
import {CerebrasSdk} from "../providers/cerebras";
|
||||
import {CloudflareAISdk} from "../providers/cloudflareAi";
|
||||
import {OllamaChatSdk} from "../providers/ollama";
|
||||
import {MlxOmniChatProvider, MlxOmniChatSdk} from "../providers/mlx-omni";
|
||||
import {ProviderRepository} from "../providers/_ProviderRepository";
|
||||
import handleStreamData from '../lib/handleStreamData';
|
||||
import Message from '../models/Message';
|
||||
import O1Message from '../models/O1Message';
|
||||
import { ProviderRepository } from '../providers/_ProviderRepository';
|
||||
import { CerebrasSdk } from '../providers/cerebras';
|
||||
import { ClaudeChatSdk } from '../providers/claude';
|
||||
import { CloudflareAISdk } from '../providers/cloudflareAi';
|
||||
import { FireworksAiChatSdk } from '../providers/fireworks';
|
||||
import { GoogleChatSdk } from '../providers/google';
|
||||
import { GroqChatSdk } from '../providers/groq';
|
||||
import { MlxOmniChatProvider, MlxOmniChatSdk } from '../providers/mlx-omni';
|
||||
import { OllamaChatSdk } from '../providers/ollama';
|
||||
import { OpenAiChatSdk } from '../providers/openai';
|
||||
import { XaiChatSdk } from '../providers/xai';
|
||||
|
||||
export interface StreamParams {
|
||||
env: Env;
|
||||
openai: OpenAI;
|
||||
messages: any[];
|
||||
model: string;
|
||||
systemPrompt: string;
|
||||
preprocessedContext: any;
|
||||
maxTokens: number;
|
||||
env: Env;
|
||||
openai: OpenAI;
|
||||
messages: any[];
|
||||
model: string;
|
||||
systemPrompt: string;
|
||||
preprocessedContext: any;
|
||||
maxTokens: number;
|
||||
}
|
||||
|
||||
const activeStreamType = types.model({
|
||||
name: types.optional(types.string, ""),
|
||||
maxTokens: types.optional(types.number, 0),
|
||||
systemPrompt: types.optional(types.string, ""),
|
||||
model: types.optional(types.string, ""),
|
||||
messages: types.optional(types.array(types.frozen()), []),
|
||||
name: types.optional(types.string, ''),
|
||||
maxTokens: types.optional(types.number, 0),
|
||||
systemPrompt: types.optional(types.string, ''),
|
||||
model: types.optional(types.string, ''),
|
||||
messages: types.optional(types.array(types.frozen()), []),
|
||||
});
|
||||
|
||||
const activeStreamsMap = types.map(
|
||||
activeStreamType,
|
||||
);
|
||||
const activeStreamsMap = types.map(activeStreamType);
|
||||
|
||||
const ChatService = types
|
||||
.model('ChatService', {
|
||||
openAIApiKey: types.optional(types.string, ""),
|
||||
openAIBaseURL: types.optional(types.string, ""),
|
||||
activeStreams: types.optional(
|
||||
activeStreamsMap,
|
||||
{}
|
||||
),
|
||||
maxTokens: types.number,
|
||||
systemPrompt: types.string
|
||||
})
|
||||
.volatile(self => ({
|
||||
openai: {} as OpenAI,
|
||||
env: {} as Env,
|
||||
}))
|
||||
.actions(self => {
|
||||
// Helper functions
|
||||
const createMessageInstance = (message: any) => {
|
||||
if (typeof message.content === 'string') {
|
||||
return Message.create({
|
||||
role: message.role,
|
||||
content: message.content,
|
||||
});
|
||||
.model('ChatService', {
|
||||
openAIApiKey: types.optional(types.string, ''),
|
||||
openAIBaseURL: types.optional(types.string, ''),
|
||||
activeStreams: types.optional(activeStreamsMap, {}),
|
||||
maxTokens: types.number,
|
||||
systemPrompt: types.string,
|
||||
})
|
||||
.volatile(self => ({
|
||||
openai: {} as OpenAI,
|
||||
env: {} as Env,
|
||||
}))
|
||||
.actions(self => {
|
||||
// Helper functions
|
||||
const createMessageInstance = (message: any) => {
|
||||
if (typeof message.content === 'string') {
|
||||
return Message.create({
|
||||
role: message.role,
|
||||
content: message.content,
|
||||
});
|
||||
}
|
||||
if (Array.isArray(message.content)) {
|
||||
const m = O1Message.create({
|
||||
role: message.role,
|
||||
content: message.content.map(item => ({
|
||||
type: item.type,
|
||||
text: item.text,
|
||||
})),
|
||||
});
|
||||
return m;
|
||||
}
|
||||
throw new Error('Unsupported message format');
|
||||
};
|
||||
|
||||
const createStreamParams = async (
|
||||
streamConfig: any,
|
||||
dynamicContext: any,
|
||||
durableObject: any,
|
||||
): Promise<StreamParams> => {
|
||||
return {
|
||||
env: self.env,
|
||||
openai: self.openai,
|
||||
messages: streamConfig.messages.map(createMessageInstance),
|
||||
model: streamConfig.model,
|
||||
systemPrompt: streamConfig.systemPrompt,
|
||||
preprocessedContext: getSnapshot(dynamicContext),
|
||||
maxTokens: await durableObject.dynamicMaxTokens(streamConfig.messages, 2000),
|
||||
};
|
||||
};
|
||||
|
||||
const modelHandlers = {
|
||||
openai: (params: StreamParams, dataHandler: (data: any) => any) =>
|
||||
OpenAiChatSdk.handleOpenAiStream(params, dataHandler),
|
||||
groq: (params: StreamParams, dataHandler: (data: any) => any) =>
|
||||
GroqChatSdk.handleGroqStream(params, dataHandler),
|
||||
claude: (params: StreamParams, dataHandler: (data: any) => any) =>
|
||||
ClaudeChatSdk.handleClaudeStream(params, dataHandler),
|
||||
fireworks: (params: StreamParams, dataHandler: (data: any) => any) =>
|
||||
FireworksAiChatSdk.handleFireworksStream(params, dataHandler),
|
||||
google: (params: StreamParams, dataHandler: (data: any) => any) =>
|
||||
GoogleChatSdk.handleGoogleStream(params, dataHandler),
|
||||
xai: (params: StreamParams, dataHandler: (data: any) => any) =>
|
||||
XaiChatSdk.handleXaiStream(params, dataHandler),
|
||||
cerebras: (params: StreamParams, dataHandler: (data: any) => any) =>
|
||||
CerebrasSdk.handleCerebrasStream(params, dataHandler),
|
||||
cloudflareAI: (params: StreamParams, dataHandler: (data: any) => any) =>
|
||||
CloudflareAISdk.handleCloudflareAIStream(params, dataHandler),
|
||||
ollama: (params: StreamParams, dataHandler: (data: any) => any) =>
|
||||
OllamaChatSdk.handleOllamaStream(params, dataHandler),
|
||||
mlx: (params: StreamParams, dataHandler: (data: any) => any) =>
|
||||
MlxOmniChatSdk.handleMlxOmniStream(params, dataHandler),
|
||||
};
|
||||
|
||||
return {
|
||||
getSupportedModels: flow(function* (): Generator<Promise<unknown>, Response, unknown> {
|
||||
// ----- Helpers ----------------------------------------------------------
|
||||
const logger = console;
|
||||
|
||||
const useCache = true;
|
||||
|
||||
if (useCache) {
|
||||
// ----- 1. Try cached value ---------------------------------------------
|
||||
try {
|
||||
const cached = yield self.env.KV_STORAGE.get('supportedModels');
|
||||
if (cached) {
|
||||
const parsed = JSON.parse(cached as string);
|
||||
if (Array.isArray(parsed) && parsed.length > 0) {
|
||||
logger.info('Cache hit – returning supportedModels from KV');
|
||||
return new Response(JSON.stringify(parsed), { status: 200 });
|
||||
}
|
||||
logger.warn('Cache entry malformed – refreshing');
|
||||
throw new Error('Malformed cache entry');
|
||||
}
|
||||
if (Array.isArray(message.content)) {
|
||||
const m = O1Message.create({
|
||||
role: message.role,
|
||||
content: message.content.map(item => ({
|
||||
type: item.type,
|
||||
text: item.text
|
||||
})),
|
||||
});
|
||||
return m;
|
||||
} catch (err) {
|
||||
logger.warn('Error reading/parsing supportedModels cache', err);
|
||||
}
|
||||
}
|
||||
|
||||
// ----- 2. Build fresh list ---------------------------------------------
|
||||
const providerRepo = new ProviderRepository(self.env);
|
||||
const providers = providerRepo.getProviders();
|
||||
|
||||
const providerModels = new Map<string, any[]>();
|
||||
const modelMeta = new Map<string, any>();
|
||||
|
||||
for (const provider of providers) {
|
||||
if (!provider.key) continue;
|
||||
|
||||
logger.info(`Fetching models from «${provider.endpoint}»`);
|
||||
|
||||
const openai = new OpenAI({ apiKey: provider.key, baseURL: provider.endpoint });
|
||||
|
||||
// 2‑a. List models
|
||||
try {
|
||||
const listResp = yield openai.models.list(); // <‑‑ async
|
||||
const models = 'data' in listResp ? listResp.data : listResp;
|
||||
providerModels.set(provider.name, models);
|
||||
|
||||
// 2‑b. Retrieve metadata
|
||||
for (const mdl of models) {
|
||||
try {
|
||||
const meta = yield openai.models.retrieve(mdl.id); // <‑‑ async
|
||||
modelMeta.set(mdl.id, { ...mdl, ...meta });
|
||||
} catch (err) {
|
||||
// logger.error(`Metadata fetch failed for ${mdl.id}`, err);
|
||||
modelMeta.set(mdl.id, { provider: provider.name, mdl });
|
||||
}
|
||||
}
|
||||
throw new Error('Unsupported message format');
|
||||
} catch (err) {
|
||||
logger.error(`Model list failed for provider «${provider.name}»`, err);
|
||||
}
|
||||
}
|
||||
|
||||
// ----- 3. Merge results -------------------------------------------------
|
||||
const resultMap = new Map<string, any>();
|
||||
for (const [provName, models] of providerModels) {
|
||||
for (const mdl of models) {
|
||||
resultMap.set(mdl.id, {
|
||||
id: mdl.id,
|
||||
provider: provName,
|
||||
...(modelMeta.get(mdl.id) ?? mdl),
|
||||
});
|
||||
}
|
||||
}
|
||||
const resultArr = Array.from(resultMap.values());
|
||||
|
||||
// ----- 4. Cache fresh list ---------------------------------------------
|
||||
try {
|
||||
yield self.env.KV_STORAGE.put(
|
||||
'supportedModels',
|
||||
JSON.stringify(resultArr),
|
||||
{ expirationTtl: 60 * 60 * 24 }, // 24 h
|
||||
);
|
||||
logger.info('supportedModels cache refreshed');
|
||||
} catch (err) {
|
||||
logger.error('KV put failed for supportedModels', err);
|
||||
}
|
||||
|
||||
// ----- 5. Return --------------------------------------------------------
|
||||
return new Response(JSON.stringify(resultArr), { status: 200 });
|
||||
}),
|
||||
setActiveStream(streamId: string, stream: any) {
|
||||
const validStream = {
|
||||
name: stream?.name || 'Unnamed Stream',
|
||||
maxTokens: stream?.maxTokens || 0,
|
||||
systemPrompt: stream?.systemPrompt || '',
|
||||
model: stream?.model || '',
|
||||
messages: stream?.messages || [],
|
||||
};
|
||||
|
||||
self.activeStreams.set(streamId, validStream);
|
||||
},
|
||||
|
||||
const createStreamParams = async (
|
||||
streamConfig: any,
|
||||
dynamicContext: any,
|
||||
durableObject: any
|
||||
): Promise<StreamParams> => {
|
||||
removeActiveStream(streamId: string) {
|
||||
self.activeStreams.delete(streamId);
|
||||
},
|
||||
setEnv(env: Env) {
|
||||
self.env = env;
|
||||
|
||||
return {
|
||||
env: self.env,
|
||||
openai: self.openai,
|
||||
messages: streamConfig.messages.map(createMessageInstance),
|
||||
model: streamConfig.model,
|
||||
systemPrompt: streamConfig.systemPrompt,
|
||||
preprocessedContext: getSnapshot(dynamicContext),
|
||||
maxTokens: await durableObject.dynamicMaxTokens(
|
||||
streamConfig.messages,
|
||||
2000
|
||||
),
|
||||
if (env.OPENAI_API_ENDPOINT && env.OPENAI_API_ENDPOINT.includes('localhost')) {
|
||||
self.openai = new OpenAI({
|
||||
apiKey: self.env.OPENAI_API_KEY,
|
||||
baseURL: self.env.OPENAI_API_ENDPOINT,
|
||||
});
|
||||
} else {
|
||||
self.openai = new OpenAI({
|
||||
apiKey: self.openAIApiKey,
|
||||
baseURL: self.openAIBaseURL,
|
||||
});
|
||||
}
|
||||
},
|
||||
|
||||
handleChatRequest: async (request: Request) => {
|
||||
return ChatSdk.handleChatRequest(request, {
|
||||
openai: self.openai,
|
||||
env: self.env,
|
||||
systemPrompt: self.systemPrompt,
|
||||
maxTokens: self.maxTokens,
|
||||
});
|
||||
},
|
||||
|
||||
async runModelHandler(params: {
|
||||
streamConfig: any;
|
||||
streamParams: any;
|
||||
controller: ReadableStreamDefaultController;
|
||||
encoder: TextEncoder;
|
||||
streamId: string;
|
||||
}) {
|
||||
const { streamConfig, streamParams, controller, encoder, streamId } = params;
|
||||
|
||||
const modelFamily = await ProviderRepository.getModelFamily(streamConfig.model, self.env);
|
||||
|
||||
const useModelHandler = () => {
|
||||
return modelHandlers[modelFamily];
|
||||
};
|
||||
|
||||
const handler = useModelHandler();
|
||||
|
||||
if (handler) {
|
||||
try {
|
||||
await handler(streamParams, handleStreamData(controller, encoder));
|
||||
} catch (error) {
|
||||
const message = error.message.toLowerCase();
|
||||
|
||||
if (
|
||||
message.includes('413 ') ||
|
||||
message.includes('maximum') ||
|
||||
message.includes('too long') ||
|
||||
message.includes('too large')
|
||||
) {
|
||||
throw new ClientError(
|
||||
`Error! Content length exceeds limits. Try shortening your message or editing an earlier message.`,
|
||||
413,
|
||||
{
|
||||
model: streamConfig.model,
|
||||
maxTokens: streamParams.maxTokens,
|
||||
},
|
||||
);
|
||||
}
|
||||
};
|
||||
if (message.includes('429 ')) {
|
||||
throw new ClientError(
|
||||
`Error! Rate limit exceeded. Wait a few minutes before trying again.`,
|
||||
429,
|
||||
{
|
||||
model: streamConfig.model,
|
||||
maxTokens: streamParams.maxTokens,
|
||||
},
|
||||
);
|
||||
}
|
||||
if (message.includes('404')) {
|
||||
throw new ClientError(`Something went wrong, try again.`, 413, {});
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
const modelHandlers = {
|
||||
openai: (params: StreamParams, dataHandler: Function) =>
|
||||
OpenAiChatSdk.handleOpenAiStream(params, dataHandler),
|
||||
groq: (params: StreamParams, dataHandler: Function) =>
|
||||
GroqChatSdk.handleGroqStream(params, dataHandler),
|
||||
claude: (params: StreamParams, dataHandler: Function) =>
|
||||
ClaudeChatSdk.handleClaudeStream(params, dataHandler),
|
||||
fireworks: (params: StreamParams, dataHandler: Function) =>
|
||||
FireworksAiChatSdk.handleFireworksStream(params, dataHandler),
|
||||
google: (params: StreamParams, dataHandler: Function) =>
|
||||
GoogleChatSdk.handleGoogleStream(params, dataHandler),
|
||||
xai: (params: StreamParams, dataHandler: Function) =>
|
||||
XaiChatSdk.handleXaiStream(params, dataHandler),
|
||||
cerebras: (params: StreamParams, dataHandler: Function) =>
|
||||
CerebrasSdk.handleCerebrasStream(params, dataHandler),
|
||||
cloudflareAI: (params: StreamParams, dataHandler: Function) =>
|
||||
CloudflareAISdk.handleCloudflareAIStream(params, dataHandler),
|
||||
ollama: (params: StreamParams, dataHandler: Function) =>
|
||||
OllamaChatSdk.handleOllamaStream(params, dataHandler),
|
||||
mlx: (params: StreamParams, dataHandler: Function) =>
|
||||
MlxOmniChatSdk.handleMlxOmniStream(params, dataHandler),
|
||||
};
|
||||
createSseReadableStream(params: {
|
||||
streamId: string;
|
||||
streamConfig: any;
|
||||
savedStreamConfig: string;
|
||||
durableObject: any;
|
||||
}) {
|
||||
const { streamId, streamConfig, savedStreamConfig, durableObject } = params;
|
||||
|
||||
return {
|
||||
getSupportedModels: flow(function* ():
|
||||
Generator<Promise<unknown>, Response, unknown> {
|
||||
return new ReadableStream({
|
||||
async start(controller) {
|
||||
const encoder = new TextEncoder();
|
||||
|
||||
// ----- Helpers ----------------------------------------------------------
|
||||
const logger = console;
|
||||
try {
|
||||
const dynamicContext = Message.create(streamConfig.preprocessedContext);
|
||||
|
||||
const useCache = true;
|
||||
// Process the stream data using the appropriate handler
|
||||
const streamParams = await createStreamParams(
|
||||
streamConfig,
|
||||
dynamicContext,
|
||||
durableObject,
|
||||
);
|
||||
|
||||
if(useCache) {
|
||||
// ----- 1. Try cached value ---------------------------------------------
|
||||
try {
|
||||
const cached = yield self.env.KV_STORAGE.get('supportedModels');
|
||||
if (cached) {
|
||||
const parsed = JSON.parse(cached as string);
|
||||
if (Array.isArray(parsed) && parsed.length > 0) {
|
||||
logger.info('Cache hit – returning supportedModels from KV');
|
||||
return new Response(JSON.stringify(parsed), { status: 200 });
|
||||
}
|
||||
logger.warn('Cache entry malformed – refreshing');
|
||||
throw new Error('Malformed cache entry');
|
||||
}
|
||||
} catch (err) {
|
||||
logger.warn('Error reading/parsing supportedModels cache', err);
|
||||
}
|
||||
}
|
||||
await self.runModelHandler({
|
||||
streamConfig,
|
||||
streamParams,
|
||||
controller,
|
||||
encoder,
|
||||
streamId,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error(`chatService::handleSseStream::${streamId}::Error`, error);
|
||||
|
||||
|
||||
// ----- 2. Build fresh list ---------------------------------------------
|
||||
const providerRepo = new ProviderRepository(self.env);
|
||||
const providers = providerRepo.getProviders();
|
||||
|
||||
const providerModels = new Map<string, any[]>();
|
||||
const modelMeta = new Map<string, any>();
|
||||
|
||||
for (const provider of providers) {
|
||||
if (!provider.key) continue;
|
||||
|
||||
logger.info(`Fetching models from «${provider.endpoint}»`);
|
||||
|
||||
const openai = new OpenAI({ apiKey: provider.key, baseURL: provider.endpoint });
|
||||
|
||||
// 2‑a. List models
|
||||
try {
|
||||
const listResp = yield openai.models.list(); // <‑‑ async
|
||||
const models = ('data' in listResp) ? listResp.data : listResp;
|
||||
providerModels.set(provider.name, models);
|
||||
|
||||
// 2‑b. Retrieve metadata
|
||||
for (const mdl of models) {
|
||||
try {
|
||||
const meta = yield openai.models.retrieve(mdl.id); // <‑‑ async
|
||||
modelMeta.set(mdl.id, { ...mdl, ...meta });
|
||||
} catch (err) {
|
||||
// logger.error(`Metadata fetch failed for ${mdl.id}`, err);
|
||||
modelMeta.set(mdl.id, {provider: provider.name, mdl});
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
logger.error(`Model list failed for provider «${provider.name}»`, err);
|
||||
}
|
||||
}
|
||||
|
||||
// ----- 3. Merge results -------------------------------------------------
|
||||
const resultMap = new Map<string, any>();
|
||||
for (const [provName, models] of providerModels) {
|
||||
for (const mdl of models) {
|
||||
resultMap.set(mdl.id, {
|
||||
id: mdl.id,
|
||||
provider: provName,
|
||||
...(modelMeta.get(mdl.id) ?? mdl),
|
||||
});
|
||||
}
|
||||
}
|
||||
const resultArr = Array.from(resultMap.values());
|
||||
|
||||
// ----- 4. Cache fresh list ---------------------------------------------
|
||||
try {
|
||||
yield self.env.KV_STORAGE.put(
|
||||
'supportedModels',
|
||||
JSON.stringify(resultArr),
|
||||
{ expirationTtl: 60 * 60 * 24 }, // 24 h
|
||||
);
|
||||
logger.info('supportedModels cache refreshed');
|
||||
} catch (err) {
|
||||
logger.error('KV put failed for supportedModels', err);
|
||||
}
|
||||
|
||||
// ----- 5. Return --------------------------------------------------------
|
||||
return new Response(JSON.stringify(resultArr), { status: 200 });
|
||||
}),
|
||||
setActiveStream(streamId: string, stream: any) {
|
||||
const validStream = {
|
||||
name: stream?.name || "Unnamed Stream",
|
||||
maxTokens: stream?.maxTokens || 0,
|
||||
systemPrompt: stream?.systemPrompt || "",
|
||||
model: stream?.model || "",
|
||||
messages: stream?.messages || [],
|
||||
};
|
||||
|
||||
self.activeStreams.set(streamId, validStream);
|
||||
},
|
||||
|
||||
removeActiveStream(streamId: string) {
|
||||
self.activeStreams.delete(streamId);
|
||||
},
|
||||
setEnv(env: Env) {
|
||||
self.env = env;
|
||||
|
||||
if(env.OPENAI_API_ENDPOINT && env.OPENAI_API_ENDPOINT.includes("localhost")) {
|
||||
self.openai = new OpenAI({
|
||||
apiKey: self.env.OPENAI_API_KEY,
|
||||
baseURL: self.env.OPENAI_API_ENDPOINT,
|
||||
});
|
||||
} else{
|
||||
self.openai = new OpenAI({
|
||||
apiKey: self.openAIApiKey,
|
||||
baseURL: self.openAIBaseURL,
|
||||
});
|
||||
}
|
||||
},
|
||||
|
||||
handleChatRequest: async (request: Request) => {
|
||||
return ChatSdk.handleChatRequest(request, {
|
||||
openai: self.openai,
|
||||
env: self.env,
|
||||
systemPrompt: self.systemPrompt,
|
||||
maxTokens: self.maxTokens
|
||||
});
|
||||
},
|
||||
|
||||
|
||||
async runModelHandler(params: {
|
||||
streamConfig: any;
|
||||
streamParams: any;
|
||||
controller: ReadableStreamDefaultController;
|
||||
encoder: TextEncoder;
|
||||
streamId: string;
|
||||
}) {
|
||||
const {streamConfig, streamParams, controller, encoder, streamId} = params;
|
||||
|
||||
|
||||
const modelFamily = await ProviderRepository.getModelFamily(streamConfig.model, self.env);
|
||||
|
||||
const useModelHandler = () => {
|
||||
return modelHandlers[modelFamily]
|
||||
}
|
||||
|
||||
const handler = useModelHandler();
|
||||
|
||||
|
||||
if (handler) {
|
||||
try {
|
||||
await handler(streamParams, handleStreamData(controller, encoder));
|
||||
|
||||
} catch (error) {
|
||||
const message = error.message.toLowerCase();
|
||||
|
||||
if (message.includes("413 ") || (message.includes("maximum") || message.includes("too long") || message.includes("too large"))) {
|
||||
throw new ClientError(`Error! Content length exceeds limits. Try shortening your message or editing an earlier message.`, 413, {
|
||||
model: streamConfig.model,
|
||||
maxTokens: streamParams.maxTokens
|
||||
})
|
||||
}
|
||||
if (message.includes("429 ")) {
|
||||
throw new ClientError(`Error! Rate limit exceeded. Wait a few minutes before trying again.`, 429, {
|
||||
model: streamConfig.model,
|
||||
maxTokens: streamParams.maxTokens
|
||||
})
|
||||
}
|
||||
if (message.includes("404")) {
|
||||
throw new ClientError(`Something went wrong, try again.`, 413, {})
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
createSseReadableStream(params: {
|
||||
streamId: string;
|
||||
streamConfig: any;
|
||||
savedStreamConfig: string;
|
||||
durableObject: any;
|
||||
}) {
|
||||
const {streamId, streamConfig, savedStreamConfig, durableObject} = params;
|
||||
|
||||
return new ReadableStream({
|
||||
async start(controller) {
|
||||
const encoder = new TextEncoder();
|
||||
|
||||
try {
|
||||
const dynamicContext = Message.create(streamConfig.preprocessedContext);
|
||||
|
||||
// Process the stream data using the appropriate handler
|
||||
const streamParams = await createStreamParams(
|
||||
streamConfig,
|
||||
dynamicContext,
|
||||
durableObject
|
||||
);
|
||||
|
||||
try {
|
||||
await self.runModelHandler({
|
||||
streamConfig,
|
||||
streamParams,
|
||||
controller,
|
||||
encoder,
|
||||
streamId,
|
||||
});
|
||||
} catch (e) {
|
||||
throw e;
|
||||
}
|
||||
|
||||
} catch (error) {
|
||||
console.error(`chatService::handleSseStream::${streamId}::Error`, error);
|
||||
|
||||
if (error instanceof ClientError) {
|
||||
controller.enqueue(
|
||||
encoder.encode(`data: ${JSON.stringify({type: 'error', error: error.message})}\n\n`)
|
||||
);
|
||||
} else {
|
||||
controller.enqueue(
|
||||
encoder.encode(`data: ${JSON.stringify({
|
||||
type: 'error',
|
||||
error: "Server error"
|
||||
})}\n\n`)
|
||||
);
|
||||
}
|
||||
controller.close();
|
||||
} finally {
|
||||
try {
|
||||
controller.close();
|
||||
} catch (_) {
|
||||
}
|
||||
}
|
||||
},
|
||||
});
|
||||
},
|
||||
|
||||
|
||||
handleSseStream: flow(function* (streamId: string): Generator<Promise<string>, Response, unknown> {
|
||||
// Check if a stream is already active for this ID
|
||||
if (self.activeStreams.has(streamId)) {
|
||||
return new Response('Stream already active', {status: 409});
|
||||
}
|
||||
|
||||
// Retrieve the stream configuration from the durable object
|
||||
const objectId = self.env.SERVER_COORDINATOR.idFromName('stream-index');
|
||||
const durableObject = self.env.SERVER_COORDINATOR.get(objectId);
|
||||
const savedStreamConfig = yield durableObject.getStreamData(streamId);
|
||||
|
||||
if (!savedStreamConfig) {
|
||||
return new Response('Stream not found', {status: 404});
|
||||
}
|
||||
|
||||
const streamConfig = JSON.parse(savedStreamConfig);
|
||||
|
||||
const stream = self.createSseReadableStream({
|
||||
streamId,
|
||||
streamConfig,
|
||||
savedStreamConfig,
|
||||
durableObject,
|
||||
});
|
||||
|
||||
// Use `tee()` to create two streams: one for processing and one for the response
|
||||
const [processingStream, responseStream] = stream.tee();
|
||||
|
||||
self.setActiveStream(streamId, {
|
||||
...streamConfig,
|
||||
});
|
||||
|
||||
processingStream.pipeTo(
|
||||
new WritableStream({
|
||||
close() {
|
||||
self.removeActiveStream(streamId);
|
||||
},
|
||||
})
|
||||
if (error instanceof ClientError) {
|
||||
controller.enqueue(
|
||||
encoder.encode(
|
||||
`data: ${JSON.stringify({ type: 'error', error: error.message })}\n\n`,
|
||||
),
|
||||
);
|
||||
} else {
|
||||
controller.enqueue(
|
||||
encoder.encode(
|
||||
`data: ${JSON.stringify({
|
||||
type: 'error',
|
||||
error: 'Server error',
|
||||
})}\n\n`,
|
||||
),
|
||||
);
|
||||
}
|
||||
controller.close();
|
||||
} finally {
|
||||
try {
|
||||
controller.close();
|
||||
} catch (_) {
|
||||
// Ignore errors when closing the controller, as it might already be closed
|
||||
}
|
||||
}
|
||||
},
|
||||
});
|
||||
},
|
||||
|
||||
// Return the second stream as the response
|
||||
return new Response(responseStream, {
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache',
|
||||
'Connection': 'keep-alive',
|
||||
},
|
||||
});
|
||||
}),
|
||||
};
|
||||
});
|
||||
handleSseStream: flow(function* (
|
||||
streamId: string,
|
||||
): Generator<Promise<string>, Response, unknown> {
|
||||
// Check if a stream is already active for this ID
|
||||
if (self.activeStreams.has(streamId)) {
|
||||
return new Response('Stream already active', { status: 409 });
|
||||
}
|
||||
|
||||
// Retrieve the stream configuration from the durable object
|
||||
const objectId = self.env.SERVER_COORDINATOR.idFromName('stream-index');
|
||||
const durableObject = self.env.SERVER_COORDINATOR.get(objectId);
|
||||
const savedStreamConfig = yield durableObject.getStreamData(streamId);
|
||||
|
||||
if (!savedStreamConfig) {
|
||||
return new Response('Stream not found', { status: 404 });
|
||||
}
|
||||
|
||||
const streamConfig = JSON.parse(savedStreamConfig);
|
||||
|
||||
const stream = self.createSseReadableStream({
|
||||
streamId,
|
||||
streamConfig,
|
||||
savedStreamConfig,
|
||||
durableObject,
|
||||
});
|
||||
|
||||
// Use `tee()` to create two streams: one for processing and one for the response
|
||||
const [processingStream, responseStream] = stream.tee();
|
||||
|
||||
self.setActiveStream(streamId, {
|
||||
...streamConfig,
|
||||
});
|
||||
|
||||
processingStream.pipeTo(
|
||||
new WritableStream({
|
||||
close() {
|
||||
self.removeActiveStream(streamId);
|
||||
},
|
||||
}),
|
||||
);
|
||||
|
||||
// Return the second stream as the response
|
||||
return new Response(responseStream, {
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache',
|
||||
Connection: 'keep-alive',
|
||||
},
|
||||
});
|
||||
}),
|
||||
};
|
||||
});
|
||||
|
||||
/**
|
||||
* ClientError
|
||||
* A custom construct for sending client-friendly errors via the controller in a structured and controlled manner.
|
||||
*/
|
||||
export class ClientError extends Error {
|
||||
public statusCode: number;
|
||||
public details: Record<string, any>;
|
||||
public statusCode: number;
|
||||
public details: Record<string, any>;
|
||||
|
||||
constructor(message: string, statusCode: number, details: Record<string, any> = {}) {
|
||||
super(message);
|
||||
this.name = 'ClientError';
|
||||
this.statusCode = statusCode;
|
||||
this.details = details;
|
||||
Object.setPrototypeOf(this, ClientError.prototype);
|
||||
}
|
||||
constructor(message: string, statusCode: number, details: Record<string, any> = {}) {
|
||||
super(message);
|
||||
this.name = 'ClientError';
|
||||
this.statusCode = statusCode;
|
||||
this.details = details;
|
||||
Object.setPrototypeOf(this, ClientError.prototype);
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats the error for SSE-compatible data transmission.
|
||||
*/
|
||||
public formatForSSE(): string {
|
||||
return JSON.stringify({
|
||||
type: 'error',
|
||||
message: this.message,
|
||||
details: this.details,
|
||||
statusCode: this.statusCode,
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Formats the error for SSE-compatible data transmission.
|
||||
*/
|
||||
public formatForSSE(): string {
|
||||
return JSON.stringify({
|
||||
type: 'error',
|
||||
message: this.message,
|
||||
details: this.details,
|
||||
statusCode: this.statusCode,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
export default ChatService;
|
||||
|
@@ -1,14 +1,15 @@
|
||||
// ContactService.ts
|
||||
import { types, flow, getSnapshot } from "mobx-state-tree";
|
||||
import ContactRecord from "../models/ContactRecord.ts";
|
||||
import { types, flow, getSnapshot } from 'mobx-state-tree';
|
||||
|
||||
import ContactRecord from '../models/ContactRecord.ts';
|
||||
|
||||
export default types
|
||||
.model("ContactStore", {})
|
||||
.volatile((self) => ({
|
||||
.model('ContactStore', {})
|
||||
.volatile(self => ({
|
||||
env: {} as Env,
|
||||
ctx: {} as ExecutionContext,
|
||||
}))
|
||||
.actions((self) => ({
|
||||
.actions(self => ({
|
||||
setEnv(env: Env) {
|
||||
self.env = env;
|
||||
},
|
||||
@@ -17,12 +18,7 @@ export default types
|
||||
},
|
||||
handleContact: flow(function* (request: Request) {
|
||||
try {
|
||||
const {
|
||||
markdown: message,
|
||||
email,
|
||||
firstname,
|
||||
lastname,
|
||||
} = yield request.json();
|
||||
const { markdown: message, email, firstname, lastname } = yield request.json();
|
||||
const contactRecord = ContactRecord.create({
|
||||
message,
|
||||
timestamp: new Date().toISOString(),
|
||||
@@ -37,19 +33,19 @@ export default types
|
||||
);
|
||||
|
||||
yield self.env.EMAIL_SERVICE.sendMail({
|
||||
to: "geoff@seemueller.io",
|
||||
to: 'geoff@seemueller.io',
|
||||
plaintextMessage: `WEBSITE CONTACT FORM SUBMISSION
|
||||
${firstname} ${lastname}
|
||||
${email}
|
||||
${message}`,
|
||||
});
|
||||
|
||||
return new Response("Contact record saved successfully", {
|
||||
return new Response('Contact record saved successfully', {
|
||||
status: 200,
|
||||
});
|
||||
} catch (error) {
|
||||
console.error("Error processing contact request:", error);
|
||||
return new Response("Failed to process contact request", {
|
||||
console.error('Error processing contact request:', error);
|
||||
return new Response('Failed to process contact request', {
|
||||
status: 500,
|
||||
});
|
||||
}
|
||||
|
@@ -1,13 +1,14 @@
|
||||
import { types, flow, getSnapshot } from "mobx-state-tree";
|
||||
import FeedbackRecord from "../models/FeedbackRecord.ts";
|
||||
import { types, flow, getSnapshot } from 'mobx-state-tree';
|
||||
|
||||
import FeedbackRecord from '../models/FeedbackRecord.ts';
|
||||
|
||||
export default types
|
||||
.model("FeedbackStore", {})
|
||||
.volatile((self) => ({
|
||||
.model('FeedbackStore', {})
|
||||
.volatile(self => ({
|
||||
env: {} as Env,
|
||||
ctx: {} as ExecutionContext,
|
||||
}))
|
||||
.actions((self) => ({
|
||||
.actions(self => ({
|
||||
setEnv(env: Env) {
|
||||
self.env = env;
|
||||
},
|
||||
@@ -19,7 +20,7 @@ export default types
|
||||
const {
|
||||
feedback,
|
||||
timestamp = new Date().toISOString(),
|
||||
user = "Anonymous",
|
||||
user = 'Anonymous',
|
||||
} = yield request.json();
|
||||
|
||||
const feedbackRecord = FeedbackRecord.create({
|
||||
@@ -35,17 +36,17 @@ export default types
|
||||
);
|
||||
|
||||
yield self.env.EMAIL_SERVICE.sendMail({
|
||||
to: "geoff@seemueller.io",
|
||||
to: 'geoff@seemueller.io',
|
||||
plaintextMessage: `NEW FEEDBACK SUBMISSION
|
||||
User: ${user}
|
||||
Feedback: ${feedback}
|
||||
Timestamp: ${timestamp}`,
|
||||
});
|
||||
|
||||
return new Response("Feedback saved successfully", { status: 200 });
|
||||
return new Response('Feedback saved successfully', { status: 200 });
|
||||
} catch (error) {
|
||||
console.error("Error processing feedback request:", error);
|
||||
return new Response("Failed to process feedback request", {
|
||||
console.error('Error processing feedback request:', error);
|
||||
return new Response('Failed to process feedback request', {
|
||||
status: 500,
|
||||
});
|
||||
}
|
||||
|
@@ -1,14 +1,14 @@
|
||||
import { types, flow } from "mobx-state-tree";
|
||||
import { types, flow } from 'mobx-state-tree';
|
||||
|
||||
const MetricsService = types
|
||||
.model("MetricsService", {
|
||||
.model('MetricsService', {
|
||||
isCollectingMetrics: types.optional(types.boolean, true),
|
||||
})
|
||||
.volatile((self) => ({
|
||||
.volatile(self => ({
|
||||
env: {} as Env,
|
||||
ctx: {} as ExecutionContext,
|
||||
}))
|
||||
.actions((self) => ({
|
||||
.actions(self => ({
|
||||
setEnv(env: Env) {
|
||||
self.env = env;
|
||||
},
|
||||
@@ -17,35 +17,35 @@ const MetricsService = types
|
||||
},
|
||||
handleMetricsRequest: flow(function* (request: Request) {
|
||||
const url = new URL(request.url);
|
||||
let proxyUrl = "";
|
||||
if(self.env.METRICS_HOST) {
|
||||
let proxyUrl = '';
|
||||
if (self.env.METRICS_HOST) {
|
||||
proxyUrl = new URL(`${self.env.METRICS_HOST}${url.pathname}${url.search}`).toString();
|
||||
}
|
||||
|
||||
if(proxyUrl) {
|
||||
if (proxyUrl) {
|
||||
try {
|
||||
const response = yield fetch(proxyUrl, {
|
||||
method: request.method,
|
||||
headers: request.headers,
|
||||
body: ["GET", "HEAD"].includes(request.method) ? null : request.body,
|
||||
redirect: "follow",
|
||||
body: ['GET', 'HEAD'].includes(request.method) ? null : request.body,
|
||||
redirect: 'follow',
|
||||
});
|
||||
|
||||
return response;
|
||||
} catch (error) {
|
||||
console.error("Failed to proxy metrics request:", error);
|
||||
return new Response("metrics misconfigured", { status: 200 });
|
||||
console.error('Failed to proxy metrics request:', error);
|
||||
return new Response('metrics misconfigured', { status: 200 });
|
||||
}
|
||||
} else {
|
||||
const event = {
|
||||
method: request.method,
|
||||
headers: request.headers,
|
||||
body: ["GET", "HEAD"].includes(request.method) ? null : request.body,
|
||||
}
|
||||
if(self.env?.KV_STORAGE?.put) {
|
||||
body: ['GET', 'HEAD'].includes(request.method) ? null : request.body,
|
||||
};
|
||||
if (self.env?.KV_STORAGE?.put) {
|
||||
self.env.KV_STORAGE.put(`metrics_events::${crypto.randomUUID()}`, JSON.stringify(event));
|
||||
} else {
|
||||
console.log("Detected metrics misconfiguration...not storing")
|
||||
console.log('Detected metrics misconfiguration...not storing');
|
||||
}
|
||||
}
|
||||
}),
|
||||
|
@@ -1,12 +1,12 @@
|
||||
import { types } from "mobx-state-tree";
|
||||
import { types } from 'mobx-state-tree';
|
||||
|
||||
const TransactionService = types
|
||||
.model("TransactionService", {})
|
||||
.volatile((self) => ({
|
||||
.model('TransactionService', {})
|
||||
.volatile(self => ({
|
||||
env: {} as Env,
|
||||
ctx: {} as ExecutionContext,
|
||||
}))
|
||||
.actions((self) => ({
|
||||
.actions(self => ({
|
||||
setEnv(env: Env) {
|
||||
self.env = env;
|
||||
},
|
||||
@@ -15,7 +15,7 @@ const TransactionService = types
|
||||
},
|
||||
|
||||
routeAction: async function (action: string, requestBody: any) {
|
||||
const actionHandlers: Record<string, Function> = {
|
||||
const actionHandlers: Record<string, (data: any) => Promise<any>> = {
|
||||
PREPARE_TX: self.handlePrepareTransaction,
|
||||
};
|
||||
|
||||
@@ -30,9 +30,9 @@ const TransactionService = types
|
||||
handlePrepareTransaction: async function (data: []) {
|
||||
const [donerId, currency, amount] = data;
|
||||
const CreateWalletEndpoints = {
|
||||
bitcoin: "/api/btc/create",
|
||||
ethereum: "/api/eth/create",
|
||||
dogecoin: "/api/doge/create",
|
||||
bitcoin: '/api/btc/create',
|
||||
ethereum: '/api/eth/create',
|
||||
dogecoin: '/api/doge/create',
|
||||
};
|
||||
|
||||
const walletRequest = await fetch(
|
||||
@@ -40,8 +40,7 @@ const TransactionService = types
|
||||
);
|
||||
const walletResponse = await walletRequest.text();
|
||||
// console.log({ walletRequest: walletResponse });
|
||||
const [address, privateKey, publicKey, phrase] =
|
||||
JSON.parse(walletResponse);
|
||||
const [address, privateKey, publicKey, phrase] = JSON.parse(walletResponse);
|
||||
|
||||
const txKey = crypto.randomUUID();
|
||||
|
||||
@@ -73,19 +72,19 @@ const TransactionService = types
|
||||
try {
|
||||
const raw = await request.text();
|
||||
// console.log({ raw });
|
||||
const [action, ...payload] = raw.split(",");
|
||||
const [action, ...payload] = raw.split(',');
|
||||
|
||||
const response = await self.routeAction(action, payload);
|
||||
|
||||
return new Response(JSON.stringify(response), {
|
||||
status: 200,
|
||||
headers: { "Content-Type": "application/json" },
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
});
|
||||
} catch (error) {
|
||||
console.error("Error handling transaction:", error);
|
||||
return new Response(JSON.stringify({ error: "Transaction failed" }), {
|
||||
console.error('Error handling transaction:', error);
|
||||
return new Response(JSON.stringify({ error: 'Transaction failed' }), {
|
||||
status: 500,
|
||||
headers: { "Content-Type": "application/json" },
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
});
|
||||
}
|
||||
},
|
||||
|
@@ -1,5 +1,5 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { getSnapshot } from 'mobx-state-tree';
|
||||
|
||||
import AssetService from '../AssetService.ts';
|
||||
|
||||
// Mock the vike/server module
|
||||
@@ -8,26 +8,27 @@ vi.mock('vike/server', () => ({
|
||||
}));
|
||||
|
||||
// Import the mocked renderPage function for assertions
|
||||
// eslint-disable-next-line import/order
|
||||
import { renderPage } from 'vike/server';
|
||||
|
||||
describe('AssetService', () => {
|
||||
let assetService;
|
||||
|
||||
|
||||
beforeEach(() => {
|
||||
// Create a new instance of the service before each test
|
||||
assetService = AssetService.create();
|
||||
|
||||
|
||||
// Reset mocks
|
||||
vi.resetAllMocks();
|
||||
});
|
||||
|
||||
|
||||
describe('Initial state', () => {
|
||||
it('should have empty env and ctx objects initially', () => {
|
||||
expect(assetService.env).toEqual({});
|
||||
expect(assetService.ctx).toEqual({});
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
describe('setEnv', () => {
|
||||
it('should set the environment', () => {
|
||||
const mockEnv = { ASSETS: { fetch: vi.fn() } };
|
||||
@@ -35,7 +36,7 @@ describe('AssetService', () => {
|
||||
expect(assetService.env).toEqual(mockEnv);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
describe('setCtx', () => {
|
||||
it('should set the execution context', () => {
|
||||
const mockCtx = { waitUntil: vi.fn() };
|
||||
@@ -43,18 +44,18 @@ describe('AssetService', () => {
|
||||
expect(assetService.ctx).toEqual(mockCtx);
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
describe('handleSsr', () => {
|
||||
it('should return null when httpResponse is not available', async () => {
|
||||
// Setup mock to return a pageContext without httpResponse
|
||||
vi.mocked(renderPage).mockResolvedValue({});
|
||||
|
||||
|
||||
const url = 'https://example.com';
|
||||
const headers = new Headers();
|
||||
const env = {};
|
||||
|
||||
|
||||
const result = await assetService.handleSsr(url, headers, env);
|
||||
|
||||
|
||||
// Verify renderPage was called with correct arguments
|
||||
expect(renderPage).toHaveBeenCalledWith({
|
||||
urlOriginal: url,
|
||||
@@ -62,15 +63,15 @@ describe('AssetService', () => {
|
||||
fetch: expect.any(Function),
|
||||
env,
|
||||
});
|
||||
|
||||
|
||||
// Verify result is null
|
||||
expect(result).toBeNull();
|
||||
});
|
||||
|
||||
|
||||
it('should return a Response when httpResponse is available', async () => {
|
||||
// Create mock stream
|
||||
const mockStream = new ReadableStream();
|
||||
|
||||
|
||||
// Setup mock to return a pageContext with httpResponse
|
||||
vi.mocked(renderPage).mockResolvedValue({
|
||||
httpResponse: {
|
||||
@@ -79,13 +80,13 @@ describe('AssetService', () => {
|
||||
getReadableWebStream: () => mockStream,
|
||||
},
|
||||
});
|
||||
|
||||
|
||||
const url = 'https://example.com';
|
||||
const headers = new Headers();
|
||||
const env = {};
|
||||
|
||||
|
||||
const result = await assetService.handleSsr(url, headers, env);
|
||||
|
||||
|
||||
// Verify renderPage was called with correct arguments
|
||||
expect(renderPage).toHaveBeenCalledWith({
|
||||
urlOriginal: url,
|
||||
@@ -93,72 +94,72 @@ describe('AssetService', () => {
|
||||
fetch: expect.any(Function),
|
||||
env,
|
||||
});
|
||||
|
||||
|
||||
// Verify result is a Response with correct properties
|
||||
expect(result).toBeInstanceOf(Response);
|
||||
expect(result.status).toBe(200);
|
||||
expect(result.headers.get('Content-Type')).toBe('text/html');
|
||||
});
|
||||
});
|
||||
|
||||
|
||||
describe('handleStaticAssets', () => {
|
||||
it('should fetch assets from the environment', async () => {
|
||||
// Create mock request
|
||||
const request = new Request('https://example.com/static/image.png');
|
||||
|
||||
|
||||
// Create mock response
|
||||
const mockResponse = new Response('Mock asset content', {
|
||||
status: 200,
|
||||
headers: { 'Content-Type': 'image/png' },
|
||||
});
|
||||
|
||||
|
||||
// Create mock environment with ASSETS.fetch
|
||||
const mockEnv = {
|
||||
ASSETS: {
|
||||
fetch: vi.fn().mockResolvedValue(mockResponse),
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
// Set the environment
|
||||
assetService.setEnv(mockEnv);
|
||||
|
||||
|
||||
// Call the method
|
||||
const result = await assetService.handleStaticAssets(request, mockEnv);
|
||||
|
||||
|
||||
// Verify ASSETS.fetch was called with the request
|
||||
expect(mockEnv.ASSETS.fetch).toHaveBeenCalledWith(request);
|
||||
|
||||
|
||||
// Verify result is the expected response
|
||||
expect(result).toBe(mockResponse);
|
||||
});
|
||||
|
||||
|
||||
it('should return a 404 response when an error occurs', async () => {
|
||||
// Create mock request
|
||||
const request = new Request('https://example.com/static/not-found.png');
|
||||
|
||||
|
||||
// Create mock environment with ASSETS.fetch that throws an error
|
||||
const mockEnv = {
|
||||
ASSETS: {
|
||||
fetch: vi.fn().mockRejectedValue(new Error('Asset not found')),
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
// Set the environment
|
||||
assetService.setEnv(mockEnv);
|
||||
|
||||
|
||||
// Call the method
|
||||
const result = await assetService.handleStaticAssets(request, mockEnv);
|
||||
|
||||
|
||||
// Verify ASSETS.fetch was called with the request
|
||||
expect(mockEnv.ASSETS.fetch).toHaveBeenCalledWith(request);
|
||||
|
||||
|
||||
// Verify result is a 404 Response
|
||||
expect(result).toBeInstanceOf(Response);
|
||||
expect(result.status).toBe(404);
|
||||
|
||||
|
||||
// Verify response body
|
||||
const text = await result.clone().text();
|
||||
expect(text).toBe('Asset not found');
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
@@ -1,31 +1,28 @@
|
||||
import {afterEach, beforeEach, describe, expect, it, vi} from 'vitest';
|
||||
import {getSnapshot} from 'mobx-state-tree';
|
||||
import ChatService, {ClientError} from '../ChatService.ts';
|
||||
import { getSnapshot } from 'mobx-state-tree';
|
||||
import OpenAI from 'openai';
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
|
||||
|
||||
import ChatSdk from '../../lib/chat-sdk.ts';
|
||||
import ChatService, { ClientError } from '../ChatService.ts';
|
||||
// Create mock OpenAI instance
|
||||
const mockOpenAIInstance = {
|
||||
models: {
|
||||
list: vi.fn().mockResolvedValue({
|
||||
data: [
|
||||
{ id: 'mlx-model-1' },
|
||||
{ id: 'mlx-model-2' },
|
||||
{ id: 'other-model' }
|
||||
]
|
||||
})
|
||||
data: [{ id: 'mlx-model-1' }, { id: 'mlx-model-2' }, { id: 'other-model' }],
|
||||
}),
|
||||
},
|
||||
chat: {
|
||||
completions: {
|
||||
create: vi.fn()
|
||||
}
|
||||
create: vi.fn(),
|
||||
},
|
||||
},
|
||||
baseURL: 'http://localhost:8000'
|
||||
baseURL: 'http://localhost:8000',
|
||||
};
|
||||
|
||||
// Mock dependencies
|
||||
vi.mock('openai', () => {
|
||||
return {
|
||||
default: vi.fn().mockImplementation(() => mockOpenAIInstance)
|
||||
default: vi.fn().mockImplementation(() => mockOpenAIInstance),
|
||||
};
|
||||
});
|
||||
|
||||
@@ -33,12 +30,12 @@ vi.mock('../../lib/chat-sdk', () => ({
|
||||
default: {
|
||||
handleChatRequest: vi.fn(),
|
||||
buildAssistantPrompt: vi.fn(),
|
||||
buildMessageChain: vi.fn()
|
||||
}
|
||||
buildMessageChain: vi.fn(),
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock('../../lib/handleStreamData', () => ({
|
||||
default: vi.fn().mockReturnValue(() => {})
|
||||
default: vi.fn().mockReturnValue(() => {}),
|
||||
}));
|
||||
|
||||
describe('ChatService', () => {
|
||||
@@ -51,7 +48,7 @@ describe('ChatService', () => {
|
||||
maxTokens: 2000,
|
||||
systemPrompt: 'You are a helpful assistant.',
|
||||
openAIApiKey: 'test-api-key',
|
||||
openAIBaseURL: 'https://api.openai.com/v1'
|
||||
openAIBaseURL: 'https://api.openai.com/v1',
|
||||
});
|
||||
|
||||
// Create mock environment
|
||||
@@ -61,14 +58,16 @@ describe('ChatService', () => {
|
||||
SERVER_COORDINATOR: {
|
||||
idFromName: vi.fn().mockReturnValue('test-id'),
|
||||
get: vi.fn().mockReturnValue({
|
||||
getStreamData: vi.fn().mockResolvedValue(JSON.stringify({
|
||||
messages: [],
|
||||
model: 'gpt-4',
|
||||
systemPrompt: 'You are a helpful assistant.',
|
||||
preprocessedContext: {}
|
||||
}))
|
||||
})
|
||||
}
|
||||
getStreamData: vi.fn().mockResolvedValue(
|
||||
JSON.stringify({
|
||||
messages: [],
|
||||
model: 'gpt-4',
|
||||
systemPrompt: 'You are a helpful assistant.',
|
||||
preprocessedContext: {},
|
||||
}),
|
||||
),
|
||||
}),
|
||||
},
|
||||
};
|
||||
|
||||
// Set the environment using the action
|
||||
@@ -86,7 +85,7 @@ describe('ChatService', () => {
|
||||
it('should have the correct initial state', () => {
|
||||
const freshService = ChatService.create({
|
||||
maxTokens: 2000,
|
||||
systemPrompt: 'You are a helpful assistant.'
|
||||
systemPrompt: 'You are a helpful assistant.',
|
||||
});
|
||||
|
||||
expect(freshService.maxTokens).toBe(2000);
|
||||
@@ -101,7 +100,7 @@ describe('ChatService', () => {
|
||||
it('should set the environment and initialize OpenAI client with local endpoint', () => {
|
||||
const localEnv = {
|
||||
...mockEnv,
|
||||
OPENAI_API_ENDPOINT: 'http://localhost:8000'
|
||||
OPENAI_API_ENDPOINT: 'http://localhost:8000',
|
||||
};
|
||||
|
||||
// Reset the mock to track new calls
|
||||
@@ -112,7 +111,7 @@ describe('ChatService', () => {
|
||||
expect(chatService.env).toEqual(localEnv);
|
||||
expect(OpenAI).toHaveBeenCalledWith({
|
||||
apiKey: localEnv.OPENAI_API_KEY,
|
||||
baseURL: localEnv.OPENAI_API_ENDPOINT
|
||||
baseURL: localEnv.OPENAI_API_ENDPOINT,
|
||||
});
|
||||
});
|
||||
|
||||
@@ -122,7 +121,7 @@ describe('ChatService', () => {
|
||||
maxTokens: 2000,
|
||||
systemPrompt: 'You are a helpful assistant.',
|
||||
openAIApiKey: 'test-api-key',
|
||||
openAIBaseURL: 'https://api.openai.com/v1'
|
||||
openAIBaseURL: 'https://api.openai.com/v1',
|
||||
});
|
||||
|
||||
// Reset the mock to track new calls
|
||||
@@ -133,7 +132,7 @@ describe('ChatService', () => {
|
||||
expect(service.env).toEqual(mockEnv);
|
||||
expect(OpenAI).toHaveBeenCalledWith({
|
||||
apiKey: 'test-api-key',
|
||||
baseURL: 'https://api.openai.com/v1'
|
||||
baseURL: 'https://api.openai.com/v1',
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -146,7 +145,7 @@ describe('ChatService', () => {
|
||||
maxTokens: 1000,
|
||||
systemPrompt: 'You are a helpful assistant.',
|
||||
model: 'gpt-4',
|
||||
messages: []
|
||||
messages: [],
|
||||
};
|
||||
|
||||
// Set active stream
|
||||
@@ -170,7 +169,7 @@ describe('ChatService', () => {
|
||||
maxTokens: 0,
|
||||
systemPrompt: '',
|
||||
model: '',
|
||||
messages: []
|
||||
messages: [],
|
||||
});
|
||||
|
||||
// Set active stream with partial data
|
||||
@@ -181,7 +180,7 @@ describe('ChatService', () => {
|
||||
maxTokens: 0,
|
||||
systemPrompt: '',
|
||||
model: '',
|
||||
messages: []
|
||||
messages: [],
|
||||
});
|
||||
});
|
||||
});
|
||||
@@ -189,21 +188,21 @@ describe('ChatService', () => {
|
||||
describe('getSupportedModels', () => {
|
||||
it('should return local models when using localhost endpoint', async () => {
|
||||
const originalResponseJson = Response.json;
|
||||
Response.json = vi.fn().mockImplementation((data) => {
|
||||
Response.json = vi.fn().mockImplementation(data => {
|
||||
return {
|
||||
json: async () => data
|
||||
json: async () => data,
|
||||
};
|
||||
});
|
||||
|
||||
const localEnv = {
|
||||
...mockEnv,
|
||||
OPENAI_API_ENDPOINT: 'http://localhost:8000'
|
||||
OPENAI_API_ENDPOINT: 'http://localhost:8000',
|
||||
};
|
||||
|
||||
// Create a new service instance for this test
|
||||
const localService = ChatService.create({
|
||||
maxTokens: 2000,
|
||||
systemPrompt: 'You are a helpful assistant.'
|
||||
systemPrompt: 'You are a helpful assistant.',
|
||||
});
|
||||
|
||||
localService.setEnv(localEnv);
|
||||
@@ -211,7 +210,7 @@ describe('ChatService', () => {
|
||||
// Mock the implementation of getSupportedModels for this test
|
||||
const originalGetSupportedModels = localService.getSupportedModels;
|
||||
localService.getSupportedModels = vi.fn().mockResolvedValueOnce({
|
||||
json: async () => ['mlx-model-1', 'mlx-model-2']
|
||||
json: async () => ['mlx-model-1', 'mlx-model-2'],
|
||||
});
|
||||
|
||||
const response = await localService.getSupportedModels();
|
||||
@@ -238,7 +237,7 @@ describe('ChatService', () => {
|
||||
openai: chatService.openai,
|
||||
env: mockEnv,
|
||||
systemPrompt: chatService.systemPrompt,
|
||||
maxTokens: chatService.maxTokens
|
||||
maxTokens: chatService.maxTokens,
|
||||
});
|
||||
|
||||
expect(result).toBe(mockResponse);
|
||||
@@ -263,7 +262,7 @@ describe('ChatService', () => {
|
||||
|
||||
// Mock the SERVER_COORDINATOR.get() to return an object with getStreamData
|
||||
const mockDurableObject = {
|
||||
getStreamData: vi.fn().mockResolvedValue(null)
|
||||
getStreamData: vi.fn().mockResolvedValue(null),
|
||||
};
|
||||
|
||||
// Update the mockEnv to use our mock
|
||||
@@ -271,8 +270,8 @@ describe('ChatService', () => {
|
||||
...mockEnv,
|
||||
SERVER_COORDINATOR: {
|
||||
idFromName: vi.fn().mockReturnValue('test-id'),
|
||||
get: vi.fn().mockReturnValue(mockDurableObject)
|
||||
}
|
||||
get: vi.fn().mockReturnValue(mockDurableObject),
|
||||
},
|
||||
};
|
||||
|
||||
// Set the environment
|
||||
@@ -290,15 +289,15 @@ describe('ChatService', () => {
|
||||
// Create a new service instance for this test
|
||||
const testService = ChatService.create({
|
||||
maxTokens: 2000,
|
||||
systemPrompt: 'You are a helpful assistant.'
|
||||
systemPrompt: 'You are a helpful assistant.',
|
||||
});
|
||||
|
||||
// Set up minimal environment
|
||||
testService.setEnv({
|
||||
SERVER_COORDINATOR: {
|
||||
idFromName: vi.fn(),
|
||||
get: vi.fn()
|
||||
}
|
||||
get: vi.fn(),
|
||||
},
|
||||
});
|
||||
|
||||
// Save the original method
|
||||
@@ -310,10 +309,10 @@ describe('ChatService', () => {
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache',
|
||||
'Connection': 'keep-alive'
|
||||
Connection: 'keep-alive',
|
||||
},
|
||||
status: 200,
|
||||
text: vi.fn().mockResolvedValue('')
|
||||
text: vi.fn().mockResolvedValue(''),
|
||||
});
|
||||
|
||||
const result = await testService.handleSseStream(streamId);
|
||||
@@ -349,7 +348,7 @@ describe('ChatService', () => {
|
||||
type: 'error',
|
||||
message: 'Test error',
|
||||
details: { detail: 'test' },
|
||||
statusCode: 400
|
||||
statusCode: 400,
|
||||
});
|
||||
});
|
||||
});
|
||||
|
@@ -1,7 +1,8 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { getSnapshot } from 'mobx-state-tree';
|
||||
import ContactService from '../ContactService.ts';
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
|
||||
import ContactRecord from '../../models/ContactRecord.ts';
|
||||
import ContactService from '../ContactService.ts';
|
||||
|
||||
describe('ContactService', () => {
|
||||
let contactService;
|
||||
|
@@ -1,7 +1,8 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { getSnapshot } from 'mobx-state-tree';
|
||||
import FeedbackService from '../FeedbackService.ts';
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
|
||||
import FeedbackRecord from '../../models/FeedbackRecord.ts';
|
||||
import FeedbackService from '../FeedbackService.ts';
|
||||
|
||||
describe('FeedbackService', () => {
|
||||
let feedbackService;
|
||||
|
@@ -1,9 +1,10 @@
|
||||
import {describe, expect, it} from 'vitest';
|
||||
import MetricsService from "../MetricsService";
|
||||
import { describe, expect, it } from 'vitest';
|
||||
|
||||
import MetricsService from '../MetricsService';
|
||||
|
||||
describe('MetricsService', () => {
|
||||
it("should create a metrics service", () => {
|
||||
it('should create a metrics service', () => {
|
||||
const metricsService = MetricsService.create();
|
||||
expect(metricsService).toBeTruthy();
|
||||
})
|
||||
})
|
||||
});
|
||||
});
|
||||
|
@@ -1,34 +1,38 @@
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
import { getSnapshot, Instance } from 'mobx-state-tree';
|
||||
import { describe, it, expect, vi, beforeEach } from 'vitest';
|
||||
|
||||
import TransactionService from '../TransactionService.ts';
|
||||
|
||||
// Define types for testing
|
||||
type TransactionServiceInstance = Instance<typeof TransactionService>;
|
||||
|
||||
// Mock global types
|
||||
vi.stubGlobal('Response', class MockResponse {
|
||||
status: number;
|
||||
headers: Headers;
|
||||
body: any;
|
||||
vi.stubGlobal(
|
||||
'Response',
|
||||
class MockResponse {
|
||||
status: number;
|
||||
headers: Headers;
|
||||
body: any;
|
||||
|
||||
constructor(body?: any, init?: ResponseInit) {
|
||||
this.body = body;
|
||||
this.status = init?.status || 200;
|
||||
this.headers = new Headers(init?.headers);
|
||||
}
|
||||
constructor(body?: any, init?: ResponseInit) {
|
||||
this.body = body;
|
||||
this.status = init?.status || 200;
|
||||
this.headers = new Headers(init?.headers);
|
||||
}
|
||||
|
||||
clone() {
|
||||
return this;
|
||||
}
|
||||
clone() {
|
||||
return this;
|
||||
}
|
||||
|
||||
async text() {
|
||||
return this.body?.toString() || '';
|
||||
}
|
||||
async text() {
|
||||
return this.body?.toString() || '';
|
||||
}
|
||||
|
||||
async json() {
|
||||
return typeof this.body === 'string' ? JSON.parse(this.body) : this.body;
|
||||
}
|
||||
});
|
||||
async json() {
|
||||
return typeof this.body === 'string' ? JSON.parse(this.body) : this.body;
|
||||
}
|
||||
},
|
||||
);
|
||||
|
||||
describe('TransactionService', () => {
|
||||
let transactionService: TransactionServiceInstance;
|
||||
@@ -83,8 +87,9 @@ describe('TransactionService', () => {
|
||||
|
||||
it('should throw an error for unknown actions', async () => {
|
||||
// Call routeAction with an invalid action
|
||||
await expect(transactionService.routeAction('UNKNOWN_ACTION', ['data']))
|
||||
.rejects.toThrow('No handler for action: UNKNOWN_ACTION');
|
||||
await expect(transactionService.routeAction('UNKNOWN_ACTION', ['data'])).rejects.toThrow(
|
||||
'No handler for action: UNKNOWN_ACTION',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -96,8 +101,8 @@ describe('TransactionService', () => {
|
||||
// Mock KV_STORAGE
|
||||
const mockEnv = {
|
||||
KV_STORAGE: {
|
||||
put: vi.fn().mockResolvedValue(undefined)
|
||||
}
|
||||
put: vi.fn().mockResolvedValue(undefined),
|
||||
},
|
||||
};
|
||||
transactionService.setEnv(mockEnv);
|
||||
});
|
||||
@@ -108,31 +113,33 @@ describe('TransactionService', () => {
|
||||
'mock-address',
|
||||
'mock-private-key',
|
||||
'mock-public-key',
|
||||
'mock-phrase'
|
||||
'mock-phrase',
|
||||
]);
|
||||
|
||||
global.fetch.mockResolvedValue({
|
||||
text: vi.fn().mockResolvedValue(mockWalletResponse)
|
||||
text: vi.fn().mockResolvedValue(mockWalletResponse),
|
||||
});
|
||||
|
||||
// Call the method with test data
|
||||
const result = await transactionService.handlePrepareTransaction(['donor123', 'bitcoin', '0.01']);
|
||||
const result = await transactionService.handlePrepareTransaction([
|
||||
'donor123',
|
||||
'bitcoin',
|
||||
'0.01',
|
||||
]);
|
||||
|
||||
// Verify fetch was called with the correct URL
|
||||
expect(global.fetch).toHaveBeenCalledWith(
|
||||
'https://wallets.seemueller.io/api/btc/create'
|
||||
);
|
||||
expect(global.fetch).toHaveBeenCalledWith('https://wallets.seemueller.io/api/btc/create');
|
||||
|
||||
// Verify KV_STORAGE.put was called with the correct data
|
||||
expect(transactionService.env.KV_STORAGE.put).toHaveBeenCalledWith(
|
||||
'transactions::prepared::mock-uuid',
|
||||
expect.stringContaining('mock-address')
|
||||
expect.stringContaining('mock-address'),
|
||||
);
|
||||
|
||||
// Verify the returned data
|
||||
expect(result).toEqual({
|
||||
depositAddress: 'mock-address',
|
||||
txKey: 'mock-uuid'
|
||||
txKey: 'mock-uuid',
|
||||
});
|
||||
});
|
||||
|
||||
@@ -142,29 +149,25 @@ describe('TransactionService', () => {
|
||||
'mock-address',
|
||||
'mock-private-key',
|
||||
'mock-public-key',
|
||||
'mock-phrase'
|
||||
'mock-phrase',
|
||||
]);
|
||||
|
||||
global.fetch.mockResolvedValue({
|
||||
text: vi.fn().mockResolvedValue(mockWalletResponse)
|
||||
text: vi.fn().mockResolvedValue(mockWalletResponse),
|
||||
});
|
||||
|
||||
// Test with ethereum
|
||||
await transactionService.handlePrepareTransaction(['donor123', 'ethereum', '0.01']);
|
||||
expect(global.fetch).toHaveBeenCalledWith(
|
||||
'https://wallets.seemueller.io/api/eth/create'
|
||||
);
|
||||
expect(global.fetch).toHaveBeenCalledWith('https://wallets.seemueller.io/api/eth/create');
|
||||
|
||||
// Reset mock and test with dogecoin
|
||||
vi.resetAllMocks();
|
||||
global.fetch.mockResolvedValue({
|
||||
text: vi.fn().mockResolvedValue(mockWalletResponse)
|
||||
text: vi.fn().mockResolvedValue(mockWalletResponse),
|
||||
});
|
||||
|
||||
await transactionService.handlePrepareTransaction(['donor123', 'dogecoin', '0.01']);
|
||||
expect(global.fetch).toHaveBeenCalledWith(
|
||||
'https://wallets.seemueller.io/api/doge/create'
|
||||
);
|
||||
expect(global.fetch).toHaveBeenCalledWith('https://wallets.seemueller.io/api/doge/create');
|
||||
});
|
||||
});
|
||||
|
||||
@@ -177,17 +180,18 @@ describe('TransactionService', () => {
|
||||
it('should process a valid transaction request', async () => {
|
||||
// Create a mock request
|
||||
const mockRequest = {
|
||||
text: vi.fn().mockResolvedValue('PREPARE_TX,donor123,bitcoin,0.01')
|
||||
text: vi.fn().mockResolvedValue('PREPARE_TX,donor123,bitcoin,0.01'),
|
||||
};
|
||||
|
||||
// Call the method
|
||||
const response = await transactionService.handleTransact(mockRequest);
|
||||
|
||||
// Verify routeAction was called with the correct parameters
|
||||
expect(transactionService.routeAction).toHaveBeenCalledWith(
|
||||
'PREPARE_TX',
|
||||
['donor123', 'bitcoin', '0.01']
|
||||
);
|
||||
expect(transactionService.routeAction).toHaveBeenCalledWith('PREPARE_TX', [
|
||||
'donor123',
|
||||
'bitcoin',
|
||||
'0.01',
|
||||
]);
|
||||
|
||||
// Verify the response
|
||||
expect(response).toBeInstanceOf(Response);
|
||||
@@ -200,7 +204,7 @@ describe('TransactionService', () => {
|
||||
it('should handle errors gracefully', async () => {
|
||||
// Create a mock request
|
||||
const mockRequest = {
|
||||
text: vi.fn().mockResolvedValue('PREPARE_TX,donor123,bitcoin,0.01')
|
||||
text: vi.fn().mockResolvedValue('PREPARE_TX,donor123,bitcoin,0.01'),
|
||||
};
|
||||
|
||||
// Make routeAction throw an error
|
||||
|
@@ -1,100 +1,130 @@
|
||||
import type {
|
||||
KVNamespace,
|
||||
KVNamespaceGetOptions,
|
||||
KVNamespaceListOptions,
|
||||
KVNamespaceListResult, KVNamespacePutOptions
|
||||
} from "@cloudflare/workers-types";
|
||||
import {BunSqliteKeyValue} from "bun-sqlite-key-value";
|
||||
import {OPEN_GSIO_DATA_DIR} from "../constants";
|
||||
KVNamespace,
|
||||
KVNamespaceGetOptions,
|
||||
KVNamespaceListOptions,
|
||||
KVNamespaceListResult,
|
||||
KVNamespacePutOptions,
|
||||
} from '@cloudflare/workers-types';
|
||||
import { BunSqliteKeyValue } from 'bun-sqlite-key-value';
|
||||
|
||||
interface BaseKV extends KVNamespace {
|
||||
import { OPEN_GSIO_DATA_DIR } from '../constants';
|
||||
|
||||
}
|
||||
interface BaseKV extends KVNamespace {}
|
||||
|
||||
|
||||
interface Options {
|
||||
namespace: string;
|
||||
path: string
|
||||
interface _Options {
|
||||
namespace: string;
|
||||
path: string;
|
||||
}
|
||||
|
||||
const defaultOptions = {
|
||||
namespace: "open-gsio",
|
||||
path: OPEN_GSIO_DATA_DIR
|
||||
namespace: 'open-gsio',
|
||||
path: OPEN_GSIO_DATA_DIR,
|
||||
};
|
||||
|
||||
export class BunSqliteKVNamespace implements BaseKV {
|
||||
private db: any;
|
||||
private db: any;
|
||||
|
||||
constructor(options?: { namespace?: string, path?: string }) {
|
||||
const merged = {...defaultOptions, ...options};
|
||||
const {namespace, path} = merged;
|
||||
constructor(options?: { namespace?: string; path?: string }) {
|
||||
const merged = { ...defaultOptions, ...options };
|
||||
const { namespace, path } = merged;
|
||||
|
||||
this.db = new BunSqliteKeyValue(`${path}/${namespace}`);
|
||||
this.db = new BunSqliteKeyValue(`${path}/${namespace}`);
|
||||
}
|
||||
|
||||
async delete(key: string): Promise<void> {
|
||||
await this.db.delete(key);
|
||||
}
|
||||
|
||||
async get(
|
||||
key: string | Array<string>,
|
||||
options?:
|
||||
| Partial<KVNamespaceGetOptions<undefined>>
|
||||
| 'text'
|
||||
| 'json'
|
||||
| 'arrayBuffer'
|
||||
| 'stream'
|
||||
| KVNamespaceGetOptions<'text'>
|
||||
| KVNamespaceGetOptions<'json'>
|
||||
| KVNamespaceGetOptions<'arrayBuffer'>
|
||||
| KVNamespaceGetOptions<'stream'>
|
||||
| 'text'
|
||||
| 'json',
|
||||
): Promise<any> {
|
||||
if (Array.isArray(key)) {
|
||||
const result = new Map();
|
||||
for (const k of key) {
|
||||
const value = await this.db.get(k);
|
||||
result.set(k, value);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
async delete(key: string): Promise<void> {
|
||||
await this.db.delete(key);
|
||||
const value = await this.db.get(key);
|
||||
if (value === undefined) return null;
|
||||
|
||||
if (
|
||||
!options ||
|
||||
options === 'text' ||
|
||||
(typeof options === 'object' && options.type === 'text')
|
||||
) {
|
||||
return value;
|
||||
}
|
||||
|
||||
async get(key: string | Array<string>, options?: Partial<KVNamespaceGetOptions<undefined>> | "text" | "json" | "arrayBuffer" | "stream" | KVNamespaceGetOptions<"text"> | KVNamespaceGetOptions<"json"> | KVNamespaceGetOptions<"arrayBuffer"> | KVNamespaceGetOptions<"stream"> | "text" | "json"): Promise<any> {
|
||||
if (Array.isArray(key)) {
|
||||
const result = new Map();
|
||||
for (const k of key) {
|
||||
const value = await this.db.get(k);
|
||||
result.set(k, value);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
const value = await this.db.get(key);
|
||||
if (value === undefined) return null;
|
||||
|
||||
if (!options || options === "text" || (typeof options === "object" && options.type === "text")) {
|
||||
return value;
|
||||
}
|
||||
if (options === "json" || (typeof options === "object" && options.type === "json")) {
|
||||
return JSON.parse(value);
|
||||
}
|
||||
if (options === "arrayBuffer" || (typeof options === "object" && options.type === "arrayBuffer")) {
|
||||
return new TextEncoder().encode(value).buffer;
|
||||
}
|
||||
if (options === "stream" || (typeof options === "object" && options.type === "stream")) {
|
||||
return new ReadableStream({
|
||||
start(controller) {
|
||||
controller.enqueue(new TextEncoder().encode(value));
|
||||
controller.close();
|
||||
}
|
||||
});
|
||||
}
|
||||
return value;
|
||||
if (options === 'json' || (typeof options === 'object' && options.type === 'json')) {
|
||||
return JSON.parse(value);
|
||||
}
|
||||
|
||||
getWithMetadata(key: string | Array<string>, options?: any): any {
|
||||
return null;
|
||||
if (
|
||||
options === 'arrayBuffer' ||
|
||||
(typeof options === 'object' && options.type === 'arrayBuffer')
|
||||
) {
|
||||
return new TextEncoder().encode(value).buffer;
|
||||
}
|
||||
|
||||
async list<Metadata = unknown>(options?: KVNamespaceListOptions): Promise<KVNamespaceListResult<Metadata, string>> {
|
||||
const keys = await this.db.keys();
|
||||
return {
|
||||
keys: keys.map(key => ({name: key})),
|
||||
list_complete: true,
|
||||
cursor: ''
|
||||
};
|
||||
if (options === 'stream' || (typeof options === 'object' && options.type === 'stream')) {
|
||||
return new ReadableStream({
|
||||
start(controller) {
|
||||
controller.enqueue(new TextEncoder().encode(value));
|
||||
controller.close();
|
||||
},
|
||||
});
|
||||
}
|
||||
return value;
|
||||
}
|
||||
|
||||
async put(key: string, value: string | ArrayBuffer | ArrayBufferView | ReadableStream, options?: KVNamespacePutOptions): Promise<void> {
|
||||
if (value instanceof ArrayBuffer || ArrayBuffer.isView(value)) {
|
||||
value = new TextDecoder().decode(value);
|
||||
} else if (value instanceof ReadableStream) {
|
||||
const reader = value.getReader();
|
||||
const chunks = [];
|
||||
while (true) {
|
||||
const {done, value} = await reader.read();
|
||||
if (done) break;
|
||||
chunks.push(value);
|
||||
}
|
||||
value = new TextDecoder().decode(new Uint8Array(Buffer.concat(chunks)));
|
||||
getWithMetadata(_key: string | Array<string>, _options?: any): any {
|
||||
return null;
|
||||
}
|
||||
|
||||
async list<Metadata = unknown>(
|
||||
_options?: KVNamespaceListOptions,
|
||||
): Promise<KVNamespaceListResult<Metadata, string>> {
|
||||
const keys = await this.db.keys();
|
||||
return {
|
||||
keys: keys.map(key => ({ name: key })),
|
||||
list_complete: true,
|
||||
cursor: '',
|
||||
};
|
||||
}
|
||||
|
||||
async put(
|
||||
key: string,
|
||||
value: string | ArrayBuffer | ArrayBufferView | ReadableStream,
|
||||
_options?: KVNamespacePutOptions,
|
||||
): Promise<void> {
|
||||
if (value instanceof ArrayBuffer || ArrayBuffer.isView(value)) {
|
||||
value = new TextDecoder().decode(value);
|
||||
} else if (value instanceof ReadableStream) {
|
||||
const reader = value.getReader();
|
||||
const chunks = [];
|
||||
let readDone = false;
|
||||
while (!readDone) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) {
|
||||
readDone = true;
|
||||
} else {
|
||||
chunks.push(value);
|
||||
}
|
||||
await this.db.set(key, value);
|
||||
}
|
||||
value = new TextDecoder().decode(new Uint8Array(Buffer.concat(chunks)));
|
||||
}
|
||||
}
|
||||
await this.db.set(key, value);
|
||||
}
|
||||
}
|
||||
|
@@ -1,23 +1,23 @@
|
||||
import {defineConfig} from "vite";
|
||||
import {configDefaults} from 'vitest/config';
|
||||
import { defineConfig } from 'vite';
|
||||
// eslint-disable-next-line import/no-unresolved
|
||||
import { configDefaults } from 'vitest/config';
|
||||
|
||||
export default defineConfig(({command}) => {
|
||||
|
||||
return {
|
||||
test: {
|
||||
globals: true,
|
||||
environment: 'jsdom',
|
||||
registerNodeLoader: false,
|
||||
// setupFiles: ['./src/test/setup.ts'],
|
||||
exclude: [...configDefaults.exclude, 'dist/**', '.open-gsio/**'],
|
||||
reporters: process.env.GITHUB_ACTIONS ? ['dot', 'github-actions', 'html'] : ['dot', 'html'],
|
||||
coverage: {
|
||||
// you can include other reporters, but 'json-summary' is required, json is recommended
|
||||
reporter: ['json-summary', 'json', 'html'],
|
||||
reportsDirectory: "coverage",
|
||||
// If you want a coverage reports even if your tests are failing, include the reportOnFailure option
|
||||
reportOnFailure: true,
|
||||
}
|
||||
}
|
||||
};
|
||||
export default defineConfig(({ command }) => {
|
||||
return {
|
||||
test: {
|
||||
globals: true,
|
||||
environment: 'jsdom',
|
||||
registerNodeLoader: false,
|
||||
// setupFiles: ['./src/test/setup.ts'],
|
||||
exclude: [...configDefaults.exclude, 'dist/**', '.open-gsio/**'],
|
||||
reporters: process.env.GITHUB_ACTIONS ? ['dot', 'github-actions', 'html'] : ['dot', 'html'],
|
||||
coverage: {
|
||||
// you can include other reporters, but 'json-summary' is required, json is recommended
|
||||
reporter: ['json-summary', 'json', 'html'],
|
||||
reportsDirectory: 'coverage',
|
||||
// If you want a coverage reports even if your tests are failing, include the reportOnFailure option
|
||||
reportOnFailure: true,
|
||||
},
|
||||
},
|
||||
};
|
||||
});
|
||||
|
Reference in New Issue
Block a user