refactor(ux): consolidate BMAD skills, update design system, and clean up Prisma generated client

This commit is contained in:
Sepehr Ramezani
2026-04-19 19:21:27 +02:00
parent 5296c4da2c
commit 25529a24b8
2476 changed files with 127934 additions and 101962 deletions

View File

@@ -1,9 +1,12 @@
import { AIProvider, TagSuggestion, TitleSuggestion } from '../types';
import { createOpenAI } from '@ai-sdk/openai';
import { generateText as aiGenerateText, stepCountIs } from 'ai';
import { AIProvider, TagSuggestion, TitleSuggestion, ToolUseOptions, ToolCallResult } from '../types';
export class OllamaProvider implements AIProvider {
private baseUrl: string;
private modelName: string;
private embeddingModelName: string;
private model: any;
constructor(baseUrl: string, modelName: string = 'llama3', embeddingModelName?: string) {
if (!baseUrl) {
@@ -13,6 +16,15 @@ export class OllamaProvider implements AIProvider {
this.baseUrl = baseUrl.endsWith('/api') ? baseUrl : `${baseUrl}/api`;
this.modelName = modelName;
this.embeddingModelName = embeddingModelName || modelName;
// Create OpenAI-compatible model for streaming support
// Ollama exposes /v1/chat/completions which is compatible with the OpenAI SDK
const cleanUrl = this.baseUrl.replace(/\/api$/, '');
const ollamaClient = createOpenAI({
baseURL: `${cleanUrl}/v1`,
apiKey: 'ollama',
});
this.model = ollamaClient.chat(modelName);
}
async generateTags(content: string, language: string = "en"): Promise<TagSuggestion[]> {
@@ -148,4 +160,63 @@ Note content: "${content}"`;
throw e;
}
}
async chat(messages: any[], systemPrompt?: string): Promise<any> {
try {
const ollamaMessages = messages.map(m => ({
role: m.role,
content: m.content
}));
if (systemPrompt) {
ollamaMessages.unshift({ role: 'system', content: systemPrompt });
}
const response = await fetch(`${this.baseUrl}/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
model: this.modelName,
messages: ollamaMessages,
stream: false,
}),
});
if (!response.ok) throw new Error(`Ollama error: ${response.statusText}`);
const data = await response.json();
return { text: data.message?.content?.trim() || '' };
} catch (e) {
console.error('Erreur chat Ollama:', e);
throw e;
}
}
getModel() {
return this.model;
}
async generateWithTools(options: ToolUseOptions): Promise<ToolCallResult> {
const { tools, maxSteps = 10, systemPrompt, messages, prompt } = options
const opts: Record<string, any> = {
model: this.model,
tools,
stopWhen: stepCountIs(maxSteps),
}
if (systemPrompt) opts.system = systemPrompt
if (messages) opts.messages = messages
else if (prompt) opts.prompt = prompt
const result = await aiGenerateText(opts as any)
return {
toolCalls: result.toolCalls?.map((tc: any) => ({ toolName: tc.toolName, input: tc.input })) || [],
toolResults: result.toolResults?.map((tr: any) => ({ toolName: tr.toolName, input: tr.input, output: tr.output })) || [],
text: result.text,
steps: result.steps?.map((step: any) => ({
text: step.text,
toolCalls: step.toolCalls?.map((tc: any) => ({ toolName: tc.toolName, input: tc.input })) || [],
toolResults: step.toolResults?.map((tr: any) => ({ toolName: tr.toolName, input: tr.input, output: tr.output })) || []
})) || []
}
}
}