refactor(ux): consolidate BMAD skills, update design system, and clean up Prisma generated client

This commit is contained in:
Sepehr Ramezani
2026-04-19 19:21:27 +02:00
parent 5296c4da2c
commit 25529a24b8
2476 changed files with 127934 additions and 101962 deletions

View File

@@ -1,11 +1,13 @@
import { createOpenAI } from '@ai-sdk/openai';
import { generateObject, generateText, embed } from 'ai';
import { generateObject, generateText as aiGenerateText, embed, stepCountIs } from 'ai';
import { z } from 'zod';
import { AIProvider, TagSuggestion, TitleSuggestion } from '../types';
import { AIProvider, TagSuggestion, TitleSuggestion, ToolUseOptions, ToolCallResult } from '../types';
export class CustomOpenAIProvider implements AIProvider {
private model: any;
private embeddingModel: any;
private apiKey: string;
private baseUrl: string;
constructor(
apiKey: string,
@@ -13,13 +15,22 @@ export class CustomOpenAIProvider implements AIProvider {
modelName: string = 'gpt-4o-mini',
embeddingModelName: string = 'text-embedding-3-small'
) {
this.apiKey = apiKey;
this.baseUrl = baseUrl.endsWith('/') ? baseUrl.slice(0, -1) : baseUrl;
// Create OpenAI-compatible client with custom base URL
// Use .chat() to force /chat/completions endpoint (avoids Responses API)
const customClient = createOpenAI({
baseURL: baseUrl,
apiKey: apiKey,
fetch: async (url, options) => {
const headers = new Headers(options?.headers);
headers.set('HTTP-Referer', 'https://localhost:3000');
headers.set('X-Title', 'Memento AI');
return fetch(url, { ...options, headers });
}
});
this.model = customClient(modelName);
this.model = customClient.chat(modelName);
this.embeddingModel = customClient.embedding(embeddingModelName);
}
@@ -79,7 +90,7 @@ export class CustomOpenAIProvider implements AIProvider {
async generateText(prompt: string): Promise<string> {
try {
const { text } = await generateText({
const { text } = await aiGenerateText({
model: this.model,
prompt: prompt,
});
@@ -90,4 +101,47 @@ export class CustomOpenAIProvider implements AIProvider {
throw e;
}
}
async chat(messages: any[], systemPrompt?: string): Promise<any> {
try {
const { text } = await aiGenerateText({
model: this.model,
system: systemPrompt,
messages: messages,
});
return { text: text.trim() };
} catch (e) {
console.error('Erreur chat Custom OpenAI:', e);
throw e;
}
}
async generateWithTools(options: ToolUseOptions): Promise<ToolCallResult> {
const { tools, maxSteps = 10, systemPrompt, messages, prompt } = options
const opts: Record<string, any> = {
model: this.model,
tools,
stopWhen: stepCountIs(maxSteps),
}
if (systemPrompt) opts.system = systemPrompt
if (messages) opts.messages = messages
else if (prompt) opts.prompt = prompt
const result = await aiGenerateText(opts as any)
return {
toolCalls: result.toolCalls?.map((tc: any) => ({ toolName: tc.toolName, input: tc.input })) || [],
toolResults: result.toolResults?.map((tr: any) => ({ toolName: tr.toolName, input: tr.input, output: tr.output })) || [],
text: result.text,
steps: result.steps?.map((step: any) => ({
text: step.text,
toolCalls: step.toolCalls?.map((tc: any) => ({ toolName: tc.toolName, input: tc.input })) || [],
toolResults: step.toolResults?.map((tr: any) => ({ toolName: tr.toolName, input: tr.input, output: tr.output })) || []
})) || []
}
}
getModel() {
return this.model;
}
}