curiouscurrent commited on
Commit
8a7fda2
·
verified ·
1 Parent(s): e96e835

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -32
app.py CHANGED
@@ -1,35 +1,26 @@
1
- import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer
 
3
 
4
- # Load your text generation model from Hugging Face using its identifier
5
- model_identifier = "curiouscurrent/omnicode"
6
- model = AutoModelForCausalLM.from_pretrained(model_identifier)
7
- tokenizer = AutoTokenizer.from_pretrained(model_identifier)
8
 
9
- history = []
 
 
 
 
 
 
10
 
11
- def generate_response(prompt):
12
- history.append(prompt)
13
- final_prompt = "\n".join(history)
14
-
15
- # Tokenize input prompt
16
- input_ids = tokenizer.encode(final_prompt, return_tensors="pt", max_length=512, truncation=True)
17
-
18
- # Generate response
19
- output_ids = model.generate(input_ids, max_length=100, num_return_sequences=1)
20
- response = tokenizer.decode(output_ids[0], skip_special_tokens=True)
21
-
22
- return response
23
-
24
- # Create Gradio interface
25
- input_prompt = gr.inputs.Textbox(lines=4, label="Input Prompt")
26
- output_text = gr.outputs.Textbox(label="Response")
27
-
28
- gr.Interface(
29
- generate_response,
30
- inputs=input_prompt,
31
- outputs=output_text,
32
- title="OmniCode",
33
- description="Multi programming coding assistant",
34
- theme="compact"
35
- ).launch()
 
1
+ from transformers import AutoTokenizer
2
+ import transformers
3
+ import torch
4
 
5
+ model = "codellama/CodeLlama-7b-hf"
 
 
 
6
 
7
+ tokenizer = AutoTokenizer.from_pretrained(model)
8
+ pipeline = transformers.pipeline(
9
+ "text-generation",
10
+ model=model,
11
+ torch_dtype=torch.float16,
12
+ device_map="auto",
13
+ )
14
 
15
+ sequences = pipeline(
16
+ 'import socket\n\ndef ping_exponential_backoff(host: str):',
17
+ do_sample=True,
18
+ top_k=10,
19
+ temperature=0.1,
20
+ top_p=0.95,
21
+ num_return_sequences=1,
22
+ eos_token_id=tokenizer.eos_token_id,
23
+ max_length=200,
24
+ )
25
+ for seq in sequences:
26
+ print(f"Result: {seq['generated_text']}")