A B Vijay Kumar commited on
Commit
cd152af
·
1 Parent(s): 6a22981

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -23
app.py CHANGED
@@ -1,35 +1,37 @@
1
  import gradio as gr
2
- #from transformers import AutoTokenizer, AutoModelForCausalLM
3
- from transformers import pipeline
4
-
5
  model_name = "vijjuk/codegen-350M-mono-python-18k-alpaca"
6
- pipe = pipeline("python-fine-tuning", model=model_name)
 
 
7
 
8
 
9
 
10
 
11
 
 
 
 
 
12
  #base_model = AutoModelForCausalLM.from_pretrained(model_name)
13
  #tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
14
  #tokenizer.pad_token = tokenizer.eos_token
15
  #tokenizer.padding_side = "right"
16
 
17
- def query(instruction, input):
18
- prompt = f"""### Instruction:
19
- Use the Task below and the Input given to write the Response, which is a programming code that can solve the Task.
20
- ### Task:
21
- {instruction}
22
- ### Input:
23
- {input}
24
- ### Response:
25
- """
26
- #input_ids = tokenizer(prompt, return_tensors="pt", truncation=True)
27
- #output_base = base_model.generate(input_ids=input_ids, max_new_tokens=500, do_sample=True, top_p=0.9,temperature=0.5)
28
- #response = "{tokenizer.batch_decode(output_base.detach().cpu().numpy(), skip_special_tokens=True)[0][len(prompt):]}"
29
- #return response
30
- return pipe(prompt)[0]["prompt"]
31
-
32
- inputs = ["text", "text"]
33
- outputs = "text"
34
- iface = gr.Interface(fn=query, inputs=inputs, outputs=outputs)
35
- iface.launch()
 
1
  import gradio as gr
 
 
 
2
  model_name = "vijjuk/codegen-350M-mono-python-18k-alpaca"
3
+ demo = gr.load(model_name, src="models")
4
+
5
+ demo.launch()
6
 
7
 
8
 
9
 
10
 
11
+ #import gradio as gr
12
+ #from transformers import AutoTokenizer, AutoModelForCausalLM
13
+
14
+
15
  #base_model = AutoModelForCausalLM.from_pretrained(model_name)
16
  #tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
17
  #tokenizer.pad_token = tokenizer.eos_token
18
  #tokenizer.padding_side = "right"
19
 
20
+ # def query(instruction, input):
21
+ # prompt = f"""### Instruction:
22
+ # Use the Task below and the Input given to write the Response, which is a programming code that can solve the Task.
23
+ # ### Task:
24
+ # {instruction}
25
+ # ### Input:
26
+ # {input}
27
+ # ### Response:
28
+ # """
29
+ # input_ids = tokenizer(prompt, return_tensors="pt", truncation=True)
30
+ # output_base = base_model.generate(input_ids=input_ids, max_new_tokens=500, do_sample=True, top_p=0.9,temperature=0.5)
31
+ # response = "{tokenizer.batch_decode(output_base.detach().cpu().numpy(), skip_special_tokens=True)[0][len(prompt):]}"
32
+ # return response
33
+
34
+ #inputs = ["text", "text"]
35
+ #outputs = "text"
36
+ #iface = gr.Interface(fn=query, inputs=inputs, outputs=outputs)
37
+ #iface.launch()