Keep/keep-notes/lib/ai/factory.ts
sepehr 640fcb26f7 fix: improve note interactions and markdown LaTeX support
## Bug Fixes

### Note Card Actions
- Fix broken size change functionality (missing state declaration)
- Implement React 19 useOptimistic for instant UI feedback
- Add startTransition for non-blocking updates
- Ensure smooth animations without page refresh
- All note actions now work: pin, archive, color, size, checklist

### Markdown LaTeX Rendering
- Add remark-math and rehype-katex plugins
- Support inline equations with dollar sign syntax
- Support block equations with double dollar sign syntax
- Import KaTeX CSS for proper styling
- Equations now render correctly instead of showing raw LaTeX

## Technical Details

- Replace undefined currentNote references with optimistic state
- Add optimistic updates before server actions for instant feedback
- Use router.refresh() in transitions for smart cache invalidation
- Install remark-math, rehype-katex, and katex packages

## Testing

- Build passes successfully with no TypeScript errors
- Dev server hot-reloads changes correctly
2026-01-09 22:13:49 +01:00

31 lines
1.3 KiB
TypeScript

import { OpenAIProvider } from './providers/openai';
import { OllamaProvider } from './providers/ollama';
import { AIProvider } from './types';
export function getAIProvider(config?: Record<string, string>): AIProvider {
const providerType = config?.AI_PROVIDER || process.env.AI_PROVIDER || 'ollama';
switch (providerType.toLowerCase()) {
case 'ollama':
let baseUrl = config?.OLLAMA_BASE_URL || process.env.OLLAMA_BASE_URL || 'http://localhost:11434';
const model = config?.AI_MODEL_TAGS || process.env.OLLAMA_MODEL || 'granite4:latest';
const embedModel = config?.AI_MODEL_EMBEDDING || process.env.OLLAMA_EMBEDDING_MODEL || 'embeddinggemma:latest';
// Ensure baseUrl doesn't end with /api, we'll add it in OllamaProvider
if (baseUrl.endsWith('/api')) {
baseUrl = baseUrl.slice(0, -4); // Remove /api
}
return new OllamaProvider(baseUrl, model, embedModel);
case 'openai':
default:
const apiKey = config?.OPENAI_API_KEY || process.env.OPENAI_API_KEY || '';
const aiModel = config?.AI_MODEL_TAGS || process.env.OPENAI_MODEL || 'gpt-4o-mini';
if (!apiKey && providerType.toLowerCase() === 'openai') {
console.warn('OPENAI_API_KEY non configurée.');
}
return new OpenAIProvider(apiKey, aiModel);
}
}