fix: improve note interactions and markdown LaTeX support
## Bug Fixes ### Note Card Actions - Fix broken size change functionality (missing state declaration) - Implement React 19 useOptimistic for instant UI feedback - Add startTransition for non-blocking updates - Ensure smooth animations without page refresh - All note actions now work: pin, archive, color, size, checklist ### Markdown LaTeX Rendering - Add remark-math and rehype-katex plugins - Support inline equations with dollar sign syntax - Support block equations with double dollar sign syntax - Import KaTeX CSS for proper styling - Equations now render correctly instead of showing raw LaTeX ## Technical Details - Replace undefined currentNote references with optimistic state - Add optimistic updates before server actions for instant feedback - Use router.refresh() in transitions for smart cache invalidation - Install remark-math, rehype-katex, and katex packages ## Testing - Build passes successfully with no TypeScript errors - Dev server hot-reloads changes correctly
This commit is contained in:
@@ -2,24 +2,29 @@ import { OpenAIProvider } from './providers/openai';
|
||||
import { OllamaProvider } from './providers/ollama';
|
||||
import { AIProvider } from './types';
|
||||
|
||||
export function getAIProvider(): AIProvider {
|
||||
const providerType = process.env.AI_PROVIDER || 'ollama'; // Default to ollama for local dev
|
||||
export function getAIProvider(config?: Record<string, string>): AIProvider {
|
||||
const providerType = config?.AI_PROVIDER || process.env.AI_PROVIDER || 'ollama';
|
||||
|
||||
switch (providerType.toLowerCase()) {
|
||||
case 'ollama':
|
||||
console.log('Using Ollama Provider with model:', process.env.OLLAMA_MODEL || 'granite4:latest');
|
||||
return new OllamaProvider(
|
||||
process.env.OLLAMA_BASE_URL || 'http://localhost:11434/api',
|
||||
process.env.OLLAMA_MODEL || 'granite4:latest'
|
||||
);
|
||||
let baseUrl = config?.OLLAMA_BASE_URL || process.env.OLLAMA_BASE_URL || 'http://localhost:11434';
|
||||
const model = config?.AI_MODEL_TAGS || process.env.OLLAMA_MODEL || 'granite4:latest';
|
||||
const embedModel = config?.AI_MODEL_EMBEDDING || process.env.OLLAMA_EMBEDDING_MODEL || 'embeddinggemma:latest';
|
||||
|
||||
// Ensure baseUrl doesn't end with /api, we'll add it in OllamaProvider
|
||||
if (baseUrl.endsWith('/api')) {
|
||||
baseUrl = baseUrl.slice(0, -4); // Remove /api
|
||||
}
|
||||
|
||||
return new OllamaProvider(baseUrl, model, embedModel);
|
||||
case 'openai':
|
||||
default:
|
||||
if (!process.env.OPENAI_API_KEY) {
|
||||
console.warn('OPENAI_API_KEY non configurée. Les fonctions IA pourraient échouer.');
|
||||
const apiKey = config?.OPENAI_API_KEY || process.env.OPENAI_API_KEY || '';
|
||||
const aiModel = config?.AI_MODEL_TAGS || process.env.OPENAI_MODEL || 'gpt-4o-mini';
|
||||
|
||||
if (!apiKey && providerType.toLowerCase() === 'openai') {
|
||||
console.warn('OPENAI_API_KEY non configurée.');
|
||||
}
|
||||
return new OpenAIProvider(
|
||||
process.env.OPENAI_API_KEY || '',
|
||||
process.env.OPENAI_MODEL || 'gpt-4o-mini'
|
||||
);
|
||||
return new OpenAIProvider(apiKey, aiModel);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,10 +3,13 @@ import { AIProvider, TagSuggestion } from '../types';
|
||||
export class OllamaProvider implements AIProvider {
|
||||
private baseUrl: string;
|
||||
private modelName: string;
|
||||
private embeddingModelName: string;
|
||||
|
||||
constructor(baseUrl: string = 'http://localhost:11434/api', modelName: string = 'llama3') {
|
||||
this.baseUrl = baseUrl.endsWith('/') ? baseUrl.slice(0, -1) : baseUrl;
|
||||
constructor(baseUrl: string = 'http://localhost:11434', modelName: string = 'llama3', embeddingModelName?: string) {
|
||||
// Ensure baseUrl ends with /api for Ollama API
|
||||
this.baseUrl = baseUrl.endsWith('/api') ? baseUrl : `${baseUrl}/api`;
|
||||
this.modelName = modelName;
|
||||
this.embeddingModelName = embeddingModelName || modelName;
|
||||
}
|
||||
|
||||
async generateTags(content: string): Promise<TagSuggestion[]> {
|
||||
@@ -16,7 +19,7 @@ export class OllamaProvider implements AIProvider {
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: this.modelName,
|
||||
prompt: `Analyse la note suivante et extrais les concepts clés sous forme de tags courts (1-3 mots max).
|
||||
prompt: `Analyse la note suivante et extrais les concepts clés sous forme de tags courts (1-3 mots max).
|
||||
|
||||
Règles:
|
||||
- Pas de mots de liaison (le, la, pour, et...).
|
||||
@@ -36,13 +39,13 @@ export class OllamaProvider implements AIProvider {
|
||||
const data = await response.json();
|
||||
const text = data.response;
|
||||
|
||||
const jsonMatch = text.match(/\[\s*\{.*\}\s*\]/s);
|
||||
const jsonMatch = text.match(/\[\s*\{[\s\S]*\}\s*\]/);
|
||||
if (jsonMatch) {
|
||||
return JSON.parse(jsonMatch[0]);
|
||||
}
|
||||
|
||||
// Support pour le format { "tags": [...] }
|
||||
const objectMatch = text.match(/\{\s*"tags"\s*:\s*(\[.*\])\s*\}/s);
|
||||
const objectMatch = text.match(/\{\s*"tags"\s*:\s*(\[[\s\S]*\])\s*\}/);
|
||||
if (objectMatch && objectMatch[1]) {
|
||||
return JSON.parse(objectMatch[1]);
|
||||
}
|
||||
@@ -60,7 +63,7 @@ export class OllamaProvider implements AIProvider {
|
||||
method: 'POST',
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
body: JSON.stringify({
|
||||
model: this.modelName,
|
||||
model: this.embeddingModelName,
|
||||
prompt: text,
|
||||
}),
|
||||
});
|
||||
@@ -74,4 +77,4 @@ export class OllamaProvider implements AIProvider {
|
||||
return [];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,4 +22,5 @@ export interface AIConfig {
|
||||
apiKey?: string;
|
||||
baseUrl?: string; // Utile pour Ollama
|
||||
model?: string;
|
||||
embeddingModel?: string;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user