ahmadouna commited on
Commit
dee15f4
·
1 Parent(s): f017d85

update app.py

Browse files
Files changed (1) hide show
  1. app.py +34 -2
app.py CHANGED
@@ -1,7 +1,8 @@
 
1
  import streamlit as st
2
  from transformers import pipeline
3
  from textblob import TextBlob
4
-
5
  pipe = pipeline('sentiment-analysis')
6
  st.title("Analyse de sentiment")
7
  #Textbox for text user is entering
@@ -10,4 +11,35 @@ text = st.text_input('Entrer votre texte') #text is stored in this variable
10
  out = pipe(text)
11
 
12
  st.write("Sentiment du text: ")
13
- st.write(out)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
  import streamlit as st
3
  from transformers import pipeline
4
  from textblob import TextBlob
5
+ """"
6
  pipe = pipeline('sentiment-analysis')
7
  st.title("Analyse de sentiment")
8
  #Textbox for text user is entering
 
11
  out = pipe(text)
12
 
13
  st.write("Sentiment du text: ")
14
+ st.write(out)
15
+ """
16
+
17
+ import transformers
18
+ import torch
19
+
20
+ model_name = "OpenLLM-France/Claire-7B-0.1"
21
+
22
+ tokenizer = transformers.AutoTokenizer.from_pretrained(model_name)
23
+ model = transformers.AutoModelForCausalLM.from_pretrained(model_name,
24
+ device_map="auto",
25
+ torch_dtype=torch.bfloat16,
26
+ load_in_4bit=True # For efficient inference, if supported by the GPU card
27
+ )
28
+
29
+ pipeline = transformers.pipeline("text-generation", model=model, tokenizer=tokenizer)
30
+ generation_kwargs = dict(
31
+ num_return_sequences=1, # Number of variants to generate.
32
+ return_full_text= False, # Do not include the prompt in the generated text.
33
+ max_new_tokens=200, # Maximum length for the output text.
34
+ do_sample=True, top_k=10, temperature=1.0, # Sampling parameters.
35
+ pad_token_id=tokenizer.eos_token_id, # Just to avoid a harmless warning.
36
+ )
37
+
38
+ prompt = """\
39
+ - Bonjour Dominique, qu'allez-vous nous cuisiner aujourd'hui ?
40
+ - Bonjour Camille,\
41
+ """
42
+ completions = pipeline(prompt, **generation_kwargs)
43
+ for completion in completions:
44
+ print(prompt + " […]" + completion['generated_text'])
45
+