DevBM commited on
Commit
3733b71
·
verified ·
1 Parent(s): d1f0550

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -11
app.py CHANGED
@@ -1,11 +1,34 @@
1
- import streamlit as st
2
- from transformers import pipeline
3
-
4
- pipe = pipeline(
5
- task='text-generation',
6
- model = 'meta-llama/Meta-Llama-3-8B'
7
- )
8
-
9
- x = st.chat_input(placeholder='Your Message')
10
- o = pipe("Hi")
11
- st.write(o[0]['generated_text'])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
+
4
+ torch.random.manual_seed(0)
5
+
6
+ model = AutoModelForCausalLM.from_pretrained(
7
+ "microsoft/Phi-3-mini-4k-instruct",
8
+ device_map="cuda",
9
+ torch_dtype="auto",
10
+ trust_remote_code=True,
11
+ )
12
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
13
+
14
+ messages = [
15
+ {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"},
16
+ {"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."},
17
+ {"role": "user", "content": "What about solving an 2x + 3 = 7 equation?"},
18
+ ]
19
+
20
+ pipe = pipeline(
21
+ "text-generation",
22
+ model=model,
23
+ tokenizer=tokenizer,
24
+ )
25
+
26
+ generation_args = {
27
+ "max_new_tokens": 500,
28
+ "return_full_text": False,
29
+ "temperature": 0.0,
30
+ "do_sample": False,
31
+ }
32
+
33
+ output = pipe(messages, **generation_args)
34
+ print(output[0]['generated_text'])