feat: add reminders page, BMad skills upgrade, MCP server refactor

- Add reminders page with navigation support
- Upgrade BMad builder module to skills-based architecture
- Refactor MCP server: extract tools and auth into separate modules
- Add connections cache, custom AI provider support
- Update prisma schema and generated client
- Various UI/UX improvements and i18n updates
- Add service worker for PWA support

Made-with: Cursor
This commit is contained in:
Sepehr Ramezani
2026-04-13 21:02:53 +02:00
parent 18ed116e0d
commit fa7e166f3e
3099 changed files with 397228 additions and 14584 deletions

View File

@@ -0,0 +1,114 @@
'use server'
interface OpenAIModel {
id: string
object: string
created?: number
owned_by?: string
}
interface OpenAIModelsResponse {
object: string
data: OpenAIModel[]
}
async function fetchModelsFromEndpoint(
endpoint: string,
apiKey?: string
): Promise<{ success: boolean; models: string[]; error?: string }> {
try {
const headers: Record<string, string> = {
'Content-Type': 'application/json',
}
if (apiKey) {
headers['Authorization'] = `Bearer ${apiKey}`
}
const response = await fetch(endpoint, {
method: 'GET',
headers,
signal: AbortSignal.timeout(8000),
})
if (!response.ok) {
throw new Error(`API returned ${response.status}: ${response.statusText}`)
}
const data = await response.json() as OpenAIModelsResponse
const modelIds = (data.data || [])
.map((m) => m.id)
.filter(Boolean)
.sort()
return { success: true, models: modelIds }
} catch (error: any) {
console.error('Failed to fetch provider models:', error)
return {
success: false,
models: [],
error: error.message || 'Failed to connect to provider',
}
}
}
/**
* Fetch all models from a custom OpenAI-compatible provider.
* Uses GET /v1/models (standard endpoint).
*/
export async function getCustomModels(
baseUrl: string,
apiKey?: string
): Promise<{ success: boolean; models: string[]; error?: string }> {
if (!baseUrl) {
return { success: false, models: [], error: 'Base URL is required' }
}
const cleanUrl = baseUrl.replace(/\/$/, '').replace(/\/v1$/, '')
return fetchModelsFromEndpoint(`${cleanUrl}/v1/models`, apiKey)
}
/**
* Fetch embedding-specific models from a custom provider.
* Tries GET /v1/embeddings/models first (OpenRouter-specific endpoint that returns
* only embedding models). Falls back to GET /v1/models filtered by common
* embedding model name patterns if the dedicated endpoint is unavailable.
*/
export async function getCustomEmbeddingModels(
baseUrl: string,
apiKey?: string
): Promise<{ success: boolean; models: string[]; error?: string }> {
if (!baseUrl) {
return { success: false, models: [], error: 'Base URL is required' }
}
const cleanUrl = baseUrl.replace(/\/$/, '').replace(/\/v1$/, '')
// Try the OpenRouter-specific embeddings models endpoint first
const embeddingsEndpoint = await fetchModelsFromEndpoint(
`${cleanUrl}/v1/embeddings/models`,
apiKey
)
if (embeddingsEndpoint.success && embeddingsEndpoint.models.length > 0) {
return embeddingsEndpoint
}
// Fallback: fetch all models and filter by common embedding name patterns
const allModels = await fetchModelsFromEndpoint(`${cleanUrl}/v1/models`, apiKey)
if (!allModels.success) {
return allModels
}
const embeddingKeywords = ['embed', 'embedding', 'ada', 'e5', 'bge', 'gte', 'minilm']
const filtered = allModels.models.filter((id) =>
embeddingKeywords.some((kw) => id.toLowerCase().includes(kw))
)
// If the filter finds nothing, return all models so the user can still pick
return {
success: true,
models: filtered.length > 0 ? filtered : allModels.models,
}
}