Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -8,13 +8,14 @@ import torch
|
|
| 8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
| 9 |
|
| 10 |
DESCRIPTION = """\
|
| 11 |
-
# Gemma 2 2B Neogenesis ITA
|
| 12 |
|
| 13 |
Fine-tuned version of Google/gemma-2-2b-it to improve the performance on the Italian language.
|
| 14 |
Small (2.6 B parameters) but good model, with 8k context length.
|
| 15 |
|
| 16 |
[๐ชช **Model card**](https://huggingface.co/anakin87/gemma-2-2b-neogenesis-ita)
|
| 17 |
-
|
|
|
|
| 18 |
"""
|
| 19 |
|
| 20 |
MAX_MAX_NEW_TOKENS = 2048
|
|
|
|
| 8 |
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
|
| 9 |
|
| 10 |
DESCRIPTION = """\
|
| 11 |
+
# Gemma 2 2B Neogenesis ITA ๐๐๐ฎ๐น
|
| 12 |
|
| 13 |
Fine-tuned version of Google/gemma-2-2b-it to improve the performance on the Italian language.
|
| 14 |
Small (2.6 B parameters) but good model, with 8k context length.
|
| 15 |
|
| 16 |
[๐ชช **Model card**](https://huggingface.co/anakin87/gemma-2-2b-neogenesis-ita)
|
| 17 |
+
|
| 18 |
+
[๐ **Kaggle notebook**](https://www.kaggle.com/code/anakin87/post-training-gemma-for-italian-and-beyond) - Learn how this model was trained.
|
| 19 |
"""
|
| 20 |
|
| 21 |
MAX_MAX_NEW_TOKENS = 2048
|