Shushmita commited on
Commit
3b2f6fb
·
verified ·
1 Parent(s): 7a9dcd0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -21
app.py CHANGED
@@ -5,28 +5,15 @@ import torch
5
  # Load model and tokenizer
6
  @st.cache_resource()
7
  def load_model():
8
- model_name = "deepseek-ai/deepseek-coder-6.7b-instruct"
 
9
  tokenizer = AutoTokenizer.from_pretrained(model_name)
10
- model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, device_map="auto")
 
 
 
 
 
11
  return model, tokenizer
12
 
13
  model, tokenizer = load_model()
14
-
15
- # Streamlit UI
16
- st.title("CodeCorrect AI")
17
- st.subheader("AI-powered Code Autocorrect Tool")
18
-
19
- code_input = st.text_area("Enter your buggy code here:", height=200)
20
-
21
- if st.button("Correct Code"):
22
- if code_input.strip():
23
- prompt = f"### Fix the following code:\n{code_input}\n### Corrected version:\n"
24
- inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True, max_length=512).to("cuda" if torch.cuda.is_available() else "cpu")
25
- outputs = model.generate(**inputs, max_length=512)
26
- corrected_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
27
-
28
- st.text_area("Corrected Code:", corrected_code, height=200)
29
- else:
30
- st.warning("Please enter some code.")
31
-
32
- st.markdown("Powered by Hugging Face 🤗")
 
5
  # Load model and tokenizer
6
  @st.cache_resource()
7
  def load_model():
8
+ model_name = "deepseek-ai/deepseek-coder-1.3b-instruct" # Smaller version
9
+
10
  tokenizer = AutoTokenizer.from_pretrained(model_name)
11
+ model = AutoModelForCausalLM.from_pretrained(
12
+ model_name,
13
+ torch_dtype=torch.float16,
14
+ device_map="auto" # Automatically assigns to CPU/GPU if available
15
+ )
16
+
17
  return model, tokenizer
18
 
19
  model, tokenizer = load_model()