Tanveerooooooo commited on
Commit
559574c
·
verified ·
1 Parent(s): 52eb9ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -8
app.py CHANGED
@@ -2,20 +2,20 @@ import torch
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
 
5
- # Load model
6
- model_id = "bigcode/starcoderbase"
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
  model.to(device)
11
 
12
- # Inference function
13
  def debug_code(code, language):
14
- prompt = f"### Debug the following {language} code:\n{code}\n### Fixed Code and Explanation:\n"
15
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
16
  outputs = model.generate(**inputs, max_new_tokens=300, pad_token_id=tokenizer.eos_token_id)
17
- result = tokenizer.decode(outputs[0], skip_special_tokens=True)
18
- return result[len(prompt):].strip()
19
 
20
  # Gradio UI
21
  gr.Interface(
@@ -25,7 +25,8 @@ gr.Interface(
25
  gr.Dropdown(["Python", "C", "C++", "JavaScript"], label="Code Language", value="Python")
26
  ],
27
  outputs=gr.Textbox(label="Fixed Code & Explanation"),
28
- title="🛠 Eternos Debugger — Powered by StarCoder",
29
- description="Paste buggy code, select language, and get a fix + explanation.",
30
  theme="default"
31
  ).launch()
 
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import gradio as gr
4
 
5
+ # Load DeepSeek Coder
6
+ model_id = "deepseek-ai/deepseek-coder-1.3b-base"
7
  tokenizer = AutoTokenizer.from_pretrained(model_id)
8
  model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
  model.to(device)
11
 
12
+ # Debugging function
13
  def debug_code(code, language):
14
+ prompt = f"### Task: Fix and explain the following {language} code:\n{code}\n### Output:\n"
15
  inputs = tokenizer(prompt, return_tensors="pt").to(device)
16
  outputs = model.generate(**inputs, max_new_tokens=300, pad_token_id=tokenizer.eos_token_id)
17
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
18
+ return response[len(prompt):].strip()
19
 
20
  # Gradio UI
21
  gr.Interface(
 
25
  gr.Dropdown(["Python", "C", "C++", "JavaScript"], label="Code Language", value="Python")
26
  ],
27
  outputs=gr.Textbox(label="Fixed Code & Explanation"),
28
+ title="🛠 Eternos Debugger — DeepSeek Edition",
29
+ description="Paste code, select language, and get a fix + explanation. Powered by DeepSeek Coder.",
30
  theme="default"
31
  ).launch()
32
+