Spaces:
Sleeping
Sleeping
File size: 1,239 Bytes
9a3142d de61a28 9a3142d a2a5828 45f84c5 2c1da5f e9de246 e544167 2c1da5f e544167 9a3142d fb51816 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 |
FROM python:3.10-slim
WORKDIR /app
RUN apt-get update && apt-get install -y git curl && rm -rf /var/lib/apt/lists/*
COPY requirements.txt /app/requirements.txt
RUN pip install --no-cache-dir -r /app/requirements.txt
COPY app.py /app/app.py
COPY model_handler.py /app/model_handler.py
ARG HF_MODEL_REPO=Jaja-09/authorcheck-model
# Download model snapshot from HF model repo
RUN python -c "from huggingface_hub import snapshot_download; snapshot_download(repo_id='${HF_MODEL_REPO}', local_dir='/app/model')"
# Use writable caches inside /app and pre-download NLTK + sentiment model
ENV NLTK_DATA=/app/nltk_data
ENV TRANSFORMERS_CACHE=/app/hf_cache
RUN mkdir -p /app/nltk_data /app/hf_cache && \
python -c "import nltk; nltk.download('punkt', download_dir='/app/nltk_data', quiet=True); nltk.download('punkt_tab', download_dir='/app/nltk_data', quiet=True)" && \
python -c "from transformers import AutoTokenizer, AutoModelForSequenceClassification; m='distilbert-base-uncased-finetuned-sst-2-english'; AutoTokenizer.from_pretrained(m, cache_dir='/app/hf_cache'); AutoModelForSequenceClassification.from_pretrained(m, cache_dir='/app/hf_cache')"
EXPOSE 7860
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
# |