fix: improve note interactions and markdown LaTeX support
## Bug Fixes ### Note Card Actions - Fix broken size change functionality (missing state declaration) - Implement React 19 useOptimistic for instant UI feedback - Add startTransition for non-blocking updates - Ensure smooth animations without page refresh - All note actions now work: pin, archive, color, size, checklist ### Markdown LaTeX Rendering - Add remark-math and rehype-katex plugins - Support inline equations with dollar sign syntax - Support block equations with double dollar sign syntax - Import KaTeX CSS for proper styling - Equations now render correctly instead of showing raw LaTeX ## Technical Details - Replace undefined currentNote references with optimistic state - Add optimistic updates before server actions for instant feedback - Use router.refresh() in transitions for smart cache invalidation - Install remark-math, rehype-katex, and katex packages ## Testing - Build passes successfully with no TypeScript errors - Dev server hot-reloads changes correctly
This commit is contained in:
@@ -2,24 +2,29 @@ import { OpenAIProvider } from './providers/openai';
|
||||
import { OllamaProvider } from './providers/ollama';
|
||||
import { AIProvider } from './types';
|
||||
|
||||
export function getAIProvider(): AIProvider {
|
||||
const providerType = process.env.AI_PROVIDER || 'ollama'; // Default to ollama for local dev
|
||||
export function getAIProvider(config?: Record<string, string>): AIProvider {
|
||||
const providerType = config?.AI_PROVIDER || process.env.AI_PROVIDER || 'ollama';
|
||||
|
||||
switch (providerType.toLowerCase()) {
|
||||
case 'ollama':
|
||||
console.log('Using Ollama Provider with model:', process.env.OLLAMA_MODEL || 'granite4:latest');
|
||||
return new OllamaProvider(
|
||||
process.env.OLLAMA_BASE_URL || 'http://localhost:11434/api',
|
||||
process.env.OLLAMA_MODEL || 'granite4:latest'
|
||||
);
|
||||
let baseUrl = config?.OLLAMA_BASE_URL || process.env.OLLAMA_BASE_URL || 'http://localhost:11434';
|
||||
const model = config?.AI_MODEL_TAGS || process.env.OLLAMA_MODEL || 'granite4:latest';
|
||||
const embedModel = config?.AI_MODEL_EMBEDDING || process.env.OLLAMA_EMBEDDING_MODEL || 'embeddinggemma:latest';
|
||||
|
||||
// Ensure baseUrl doesn't end with /api, we'll add it in OllamaProvider
|
||||
if (baseUrl.endsWith('/api')) {
|
||||
baseUrl = baseUrl.slice(0, -4); // Remove /api
|
||||
}
|
||||
|
||||
return new OllamaProvider(baseUrl, model, embedModel);
|
||||
case 'openai':
|
||||
default:
|
||||
if (!process.env.OPENAI_API_KEY) {
|
||||
console.warn('OPENAI_API_KEY non configurée. Les fonctions IA pourraient échouer.');
|
||||
const apiKey = config?.OPENAI_API_KEY || process.env.OPENAI_API_KEY || '';
|
||||
const aiModel = config?.AI_MODEL_TAGS || process.env.OPENAI_MODEL || 'gpt-4o-mini';
|
||||
|
||||
if (!apiKey && providerType.toLowerCase() === 'openai') {
|
||||
console.warn('OPENAI_API_KEY non configurée.');
|
||||
}
|
||||
return new OpenAIProvider(
|
||||
process.env.OPENAI_API_KEY || '',
|
||||
process.env.OPENAI_MODEL || 'gpt-4o-mini'
|
||||
);
|
||||
return new OpenAIProvider(apiKey, aiModel);
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user