fix(ai): prevent OpenAI credential error at module load time

- Moved OpenAI client initialization to a lazy getter function
- Added explicit check for NEBIUS_API_KEY in the getter
- Ensures Server Actions return structured errors instead of crashing when keys are missing
This commit is contained in:
2025-12-19 14:30:38 +01:00
parent e2c9bef8f4
commit d9b44a0ec5
2 changed files with 20 additions and 6 deletions

View File

@@ -1,6 +1,19 @@
import { OpenAI } from 'openai';
export const aiClient = new OpenAI({
/**
* Creates a Nebius AI client using the provided API key.
* This is wrapped in a function to avoid top-level initialization errors
* if the environment variables are not yet loaded or are missing.
*/
export function getNebiusClient() {
const apiKey = process.env.NEBIUS_API_KEY;
if (!apiKey) {
throw new Error('NEBIUS_API_KEY is not configured.');
}
return new OpenAI({
baseURL: 'https://api.tokenfactory.nebius.com/v1/',
apiKey: process.env.NEBIUS_API_KEY,
});
apiKey: apiKey,
});
}

View File

@@ -1,6 +1,6 @@
'use server';
import { aiClient } from '@/lib/ai-client';
import { getNebiusClient } from '@/lib/ai-client';
import { SYSTEM_INSTRUCTION as GEMINI_SYSTEM_INSTRUCTION } from '@/lib/gemini';
import { BottleMetadataSchema, AnalysisResponse, BottleMetadata } from '@/types/whisky';
import { createServerActionClient } from '@supabase/auth-helpers-nextjs';
@@ -56,7 +56,8 @@ export async function analyzeBottleNebius(base64Image: string, tags?: string[],
console.log(`[analyzeBottleNebius] Instruction prepared for AI: ${instruction.substring(0, 100)}...`);
const response = await aiClient.chat.completions.create({
const client = getNebiusClient();
const response = await client.chat.completions.create({
model: "Qwen/Qwen2.5-VL-72B-Instruct",
messages: [
{