nutrition: pre-download HuggingFace models at build time
All checks were successful
CI / update (push) Successful in 4m26s

The deployment server couldn't fetch transformer models at runtime due to
restricted network access and permission errors writing to node_modules.
Add a prebuild script to download models during build and document
TRANSFORMERS_CACHE env var for configuring a shared writable cache path.
This commit is contained in:
2026-04-02 20:47:04 +02:00
parent 7935ac6b75
commit 61336823b3
3 changed files with 21 additions and 1 deletions

View File

@@ -33,3 +33,6 @@ DEEPL_API_URL="https://api-free.deepl.com/v2/translate" # Use https://api.deepl
# AI Vision Service (Ollama for Alt Text Generation)
OLLAMA_URL="http://localhost:11434" # Local Ollama server URL
# HuggingFace Transformers Model Cache (for nutrition embedding models)
TRANSFORMERS_CACHE="/var/cache/transformers" # Must be writable by build and runtime user

View File

@@ -5,7 +5,7 @@
"type": "module",
"scripts": {
"dev": "vite dev",
"prebuild": "bash scripts/subset-emoji-font.sh && pnpm exec vite-node scripts/generate-mystery-verses.ts",
"prebuild": "bash scripts/subset-emoji-font.sh && pnpm exec vite-node scripts/generate-mystery-verses.ts && pnpm exec vite-node scripts/download-models.ts",
"build": "vite build",
"preview": "vite preview",
"check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",

View File

@@ -0,0 +1,17 @@
/**
* Pre-downloads HuggingFace transformer models so they're cached for runtime.
* Run with: pnpm exec vite-node scripts/download-models.ts
*/
import { pipeline } from '@huggingface/transformers';
const MODELS = [
'Xenova/all-MiniLM-L6-v2',
'Xenova/multilingual-e5-small',
];
for (const name of MODELS) {
console.log(`Downloading ${name}...`);
const p = await pipeline('feature-extraction', name, { dtype: 'q8' });
await p.dispose();
console.log(` done`);
}