LeenAnabtawe commited on
Commit
a2c79e6
ยท
verified ยท
1 Parent(s): b11f128

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -11
app.py CHANGED
@@ -2,28 +2,31 @@ import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
5
- # ู†ู…ุงุฐุฌ ู…ูุชูˆุญุฉ ู„ุชูˆู„ูŠุฏ ุงู„ูƒูˆุฏ
6
  models = {
7
- "CodeGen 2B (Salesforce)": "Salesforce/codegen-2B-multi",
8
- "WizardCoder 1B": "WizardLM/WizardCoder-1B-V1.0",
9
- "CodeParrot Small": "codeparrot/codeparrot-small",
10
- "GPT-J-6B (Python)": "EleutherAI/gpt-j-6B" # ุจุฏูŠู„ ู…ูุชูˆุญ ู„ู€ Phind LLaMA
11
  }
12
 
13
  # ุชุญู…ูŠู„ ุงู„ู†ู…ุงุฐุฌ
14
  loaded_models = {}
15
  for name, model_id in models.items():
16
  tokenizer = AutoTokenizer.from_pretrained(model_id)
17
- model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", torch_dtype=torch.float16)
 
 
 
 
18
  loaded_models[name] = (tokenizer, model)
19
 
20
- # ุฏุงู„ุฉ ุชูˆู„ูŠุฏ ุงู„ูƒูˆุฏ
21
  def generate_code(prompt, model_name):
22
  tokenizer, model = loaded_models[model_name]
23
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
24
  outputs = model.generate(**inputs, max_new_tokens=150)
25
- code = tokenizer.decode(outputs[0], skip_special_tokens=True)
26
- return code
27
 
28
  # ูˆุงุฌู‡ุฉ Gradio
29
  demo = gr.Interface(
@@ -33,8 +36,9 @@ demo = gr.Interface(
33
  gr.Radio(choices=list(models.keys()), label="ุงุฎุชุฑ ุงู„ู†ู…ูˆุฐุฌ")
34
  ],
35
  outputs=gr.Code(label="ุงู„ูƒูˆุฏ ุงู„ู†ุงุชุฌ"),
36
- title="Code Generation with AI Models",
37
- description="ุงุฎุชุฑ ู†ู…ูˆุฐุฌ AI ูˆุงุฏุฎู„ ูˆุตู ุงู„ูƒูˆุฏ ู„ูŠุชู… ุชูˆู„ูŠุฏู‡ ุชู„ู‚ุงุฆูŠู‹ุง"
38
  )
39
 
40
  demo.launch()
 
 
2
  from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
 
5
+ # ู†ู…ุงุฐุฌ ุฃูƒูŠุฏุฉ ูˆู…ูุชูˆุญุฉ
6
  models = {
7
+ "CodeGen 2B": "Salesforce/codegen-2B-multi",
8
+ "CodeParrot": "codeparrot/codeparrot-small",
9
+ "GPT-J-6B": "EleutherAI/gpt-j-6B",
10
+ "GPT2": "gpt2" # ู†ู…ูˆุฐุฌ ุจุณูŠุท ูƒู€ fallback
11
  }
12
 
13
  # ุชุญู…ูŠู„ ุงู„ู†ู…ุงุฐุฌ
14
  loaded_models = {}
15
  for name, model_id in models.items():
16
  tokenizer = AutoTokenizer.from_pretrained(model_id)
17
+ model = AutoModelForCausalLM.from_pretrained(
18
+ model_id,
19
+ device_map="auto",
20
+ torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32
21
+ )
22
  loaded_models[name] = (tokenizer, model)
23
 
24
+ # ุฏุงู„ุฉ ุงู„ุชูˆู„ูŠุฏ
25
  def generate_code(prompt, model_name):
26
  tokenizer, model = loaded_models[model_name]
27
  inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
28
  outputs = model.generate(**inputs, max_new_tokens=150)
29
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
 
30
 
31
  # ูˆุงุฌู‡ุฉ Gradio
32
  demo = gr.Interface(
 
36
  gr.Radio(choices=list(models.keys()), label="ุงุฎุชุฑ ุงู„ู†ู…ูˆุฐุฌ")
37
  ],
38
  outputs=gr.Code(label="ุงู„ูƒูˆุฏ ุงู„ู†ุงุชุฌ"),
39
+ title="Code Generation with Open AI Models",
40
+ description="ุงุฎุชุฑ ู†ู…ูˆุฐุฌู‹ุง ู…ูุชูˆุญู‹ุง ูˆุฃุฏุฎู„ ูˆุตูู‹ุง ู„ูŠุชู… ุชูˆู„ูŠุฏ ุงู„ูƒูˆุฏ ุชู„ู‚ุงุฆูŠู‹ุง"
41
  )
42
 
43
  demo.launch()
44
+