Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,14 +2,14 @@ import torch
|
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 3 |
import gradio as gr
|
| 4 |
|
| 5 |
-
# Load DeepSeek Coder
|
| 6 |
model_id = "deepseek-ai/deepseek-coder-1.3b-base"
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 8 |
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
|
| 9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 10 |
model.to(device)
|
| 11 |
|
| 12 |
-
#
|
| 13 |
def debug_code(code, language):
|
| 14 |
prompt = f"### Task: Fix and explain the following {language} code:\n{code}\n### Output:\n"
|
| 15 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
|
@@ -17,16 +17,40 @@ def debug_code(code, language):
|
|
| 17 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 18 |
return response[len(prompt):].strip()
|
| 19 |
|
| 20 |
-
# Gradio UI
|
| 21 |
-
gr.
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 3 |
import gradio as gr
|
| 4 |
|
| 5 |
+
# Load DeepSeek Coder model
|
| 6 |
model_id = "deepseek-ai/deepseek-coder-1.3b-base"
|
| 7 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
| 8 |
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
|
| 9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 10 |
model.to(device)
|
| 11 |
|
| 12 |
+
# Debug function
|
| 13 |
def debug_code(code, language):
|
| 14 |
prompt = f"### Task: Fix and explain the following {language} code:\n{code}\n### Output:\n"
|
| 15 |
inputs = tokenizer(prompt, return_tensors="pt").to(device)
|
|
|
|
| 17 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
| 18 |
return response[len(prompt):].strip()
|
| 19 |
|
| 20 |
+
# Gradio UI with white theme and modern layout
|
| 21 |
+
with gr.Blocks(css="""
|
| 22 |
+
#logo-container {
|
| 23 |
+
display: flex;
|
| 24 |
+
justify-content: center;
|
| 25 |
+
margin-top: 10px;
|
| 26 |
+
}
|
| 27 |
+
.title-text {
|
| 28 |
+
text-align: center;
|
| 29 |
+
font-size: 28px;
|
| 30 |
+
font-weight: bold;
|
| 31 |
+
color: #222;
|
| 32 |
+
margin-top: 10px;
|
| 33 |
+
margin-bottom: 30px;
|
| 34 |
+
}
|
| 35 |
+
.gr-button {
|
| 36 |
+
background-color: #2F80ED !important;
|
| 37 |
+
color: white !important;
|
| 38 |
+
font-weight: bold;
|
| 39 |
+
border-radius: 8px;
|
| 40 |
+
padding: 10px 16px;
|
| 41 |
+
}
|
| 42 |
+
""") as demo:
|
| 43 |
+
with gr.Column():
|
| 44 |
+
with gr.Row(elem_id="logo-container"):
|
| 45 |
+
gr.Image(value="763f8a67-91ed-4f76-9296-d64a30676efd.png", width=100, height=100, show_label=False, show_download_button=False)
|
| 46 |
+
gr.Markdown("<div class='title-text'> Eternos Beta — The Coder's friend</div>")
|
| 47 |
+
|
| 48 |
+
language = gr.Dropdown(["Python", "C", "C++", "JavaScript"], label="Select Programming Language", value="Python")
|
| 49 |
+
code_input = gr.Textbox(lines=12, label="Paste Buggy Code", placeholder="Paste or write buggy code here...")
|
| 50 |
+
output = gr.Textbox(label="Fixed Code & Explanation", lines=14)
|
| 51 |
|
| 52 |
+
debug_btn = gr.Button("🧠 Debug Code")
|
| 53 |
+
debug_btn.click(fn=debug_code, inputs=[code_input, language], outputs=output)
|
| 54 |
+
|
| 55 |
+
# Launch the app
|
| 56 |
+
demo.launch()
|