SnehaPriyaaMP commited on
Commit
dc7e529
·
verified ·
1 Parent(s): f00d671

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -13
app.py CHANGED
@@ -5,7 +5,6 @@ import os
5
  import gradio as gr
6
  import sentencepiece
7
 
8
-
9
  os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:120'
10
  model_id = "thesven/Llama3-8B-SFT-code_bagel-bnb-4bit"
11
  tokenizer_path = "./"
@@ -14,11 +13,15 @@ DESCRIPTION = """
14
  # thesven/Llama3-8B-SFT-code_bagel-bnb-4bit
15
  """
16
 
 
 
 
 
17
  tokenizer = AutoTokenizer.from_pretrained(model_id, device_map="auto", trust_remote_code=True)
18
- model = AutoModelForCausalLM.from_pretrained(model_id, device_map="cuda", torch_dtype=torch.bfloat16, trust_remote_code=True)
19
 
20
- def format_prompt(user_message, system_message="You are an expert developer in all programming languages. Help me with my code. Answer any questions I have with code examples."):
21
- prompt = f"<|im_start|>assistant\n{system_message}<|im_end|>\n<|im_start|>\nuser\n{user_message}<|im_end|>\nassistant\n"
22
  return prompt
23
 
24
  @spaces.GPU
@@ -26,30 +29,30 @@ def predict(message, system_message, max_new_tokens=600, temperature=3.5, top_p=
26
  formatted_prompt = format_prompt(message, system_message)
27
 
28
  input_ids = tokenizer.encode(formatted_prompt, return_tensors='pt')
29
- input_ids = input_ids.to(model.device)
30
 
31
  response_ids = model.generate(
32
  input_ids,
33
  max_length=max_new_tokens + input_ids.shape[1],
34
- temperature=temperature,
35
- top_p=top_p,
36
- top_k=top_k,
37
  no_repeat_ngram_size=9,
38
  pad_token_id=tokenizer.eos_token_id,
39
  do_sample=do_sample
40
  )
41
 
42
  response = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
43
- truncate_str = "<|im_end|>"
44
  if truncate_str and truncate_str in response:
45
  response = response.split(truncate_str)[0]
46
 
47
- return [("bot", response)]
48
-
49
  with gr.Blocks() as demo:
50
  gr.Markdown(DESCRIPTION)
51
  with gr.Group():
52
- system_prompt = gr.Textbox(placeholder='Provide a System Prompt In The First Person', label='System Prompt', lines=2, value="You are an expert developer in all programming languages. Help me with my code. Answer any questions I have with code examples.")
53
 
54
  with gr.Group():
55
  chatbot = gr.Chatbot(label='thesven/Llama3-8B-SFT-code_bagel-bnb-4bit')
@@ -59,7 +62,7 @@ with gr.Blocks() as demo:
59
  submit_button = gr.Button('Submit', variant='primary')
60
 
61
  with gr.Accordion(label='Advanced options', open=False):
62
- max_new_tokens = gr.Slider(label='Max New Tokens', minimum=1, maximum=55000, step=1, value=512)
63
  temperature = gr.Slider(label='Temperature', minimum=0.1, maximum=4.0, step=0.1, value=0.1)
64
  top_p = gr.Slider(label='Top-P (nucleus sampling)', minimum=0.05, maximum=1.0, step=0.05, value=0.9)
65
  top_k = gr.Slider(label='Top-K', minimum=1, maximum=1000, step=1, value=40)
 
5
  import gradio as gr
6
  import sentencepiece
7
 
 
8
  os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'max_split_size_mb:120'
9
  model_id = "thesven/Llama3-8B-SFT-code_bagel-bnb-4bit"
10
  tokenizer_path = "./"
 
13
  # thesven/Llama3-8B-SFT-code_bagel-bnb-4bit
14
  """
15
 
16
+ # Check if CUDA is available and set device accordingly
17
+ device = "cuda" if torch.cuda.is_available() else "cpu"
18
+ print(f"Using device: {device}")
19
+
20
  tokenizer = AutoTokenizer.from_pretrained(model_id, device_map="auto", trust_remote_code=True)
21
+ model = AutoModelForCausalLM.from_pretrained(model_id, device_map=device, torch_dtype=torch.bfloat16 if device == "cuda" else torch.float32, trust_remote_code=True)
22
 
23
+ def format_prompt(user_message, system_message="You are an expert developer in all programming languages. Help me with my code. Answer any questions I have with code examples."):
24
+ prompt = f"assistant\n{system_message}\n\nuser\n{user_message}\nassistant\n"
25
  return prompt
26
 
27
  @spaces.GPU
 
29
  formatted_prompt = format_prompt(message, system_message)
30
 
31
  input_ids = tokenizer.encode(formatted_prompt, return_tensors='pt')
32
+ input_ids = input_ids.to(device)
33
 
34
  response_ids = model.generate(
35
  input_ids,
36
  max_length=max_new_tokens + input_ids.shape[1],
37
+ temperature=temperature,
38
+ top_p=top_p,
39
+ top_k=top_k,
40
  no_repeat_ngram_size=9,
41
  pad_token_id=tokenizer.eos_token_id,
42
  do_sample=do_sample
43
  )
44
 
45
  response = tokenizer.decode(response_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
46
+ truncate_str = ""
47
  if truncate_str and truncate_str in response:
48
  response = response.split(truncate_str)[0]
49
 
50
+ return [("bot", response)]
51
+
52
  with gr.Blocks() as demo:
53
  gr.Markdown(DESCRIPTION)
54
  with gr.Group():
55
+ system_prompt = gr.Textbox(placeholder='Provide a System Prompt In The First Person', label='System Prompt', lines=2, value="You are an expert developer in all programming languages. Help me with my code. Answer any questions I have with code examples.")
56
 
57
  with gr.Group():
58
  chatbot = gr.Chatbot(label='thesven/Llama3-8B-SFT-code_bagel-bnb-4bit')
 
62
  submit_button = gr.Button('Submit', variant='primary')
63
 
64
  with gr.Accordion(label='Advanced options', open=False):
65
+ max_new_tokens = gr.Slider(label='Max New Tokens', minimum=1, maximum 55000, step=1, value=512)
66
  temperature = gr.Slider(label='Temperature', minimum=0.1, maximum=4.0, step=0.1, value=0.1)
67
  top_p = gr.Slider(label='Top-P (nucleus sampling)', minimum=0.05, maximum=1.0, step=0.05, value=0.9)
68
  top_k = gr.Slider(label='Top-K', minimum=1, maximum=1000, step=1, value=40)