JaphetHernandez commited on
Commit
ff1d6d5
verified
1 Parent(s): 612500f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -27
app.py CHANGED
@@ -2,7 +2,6 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStream
2
  import streamlit as st
3
  from huggingface_hub import login
4
  import pandas as pd
5
- from threading import Thread
6
 
7
  # Token Secret de Hugging Face
8
  huggingface_token = st.secrets["HUGGINGFACEHUB_API_TOKEN"]
@@ -33,22 +32,14 @@ def generate_response(input_text, temperature=0.7, max_new_tokens=20):
33
  top_p=0.9,
34
  temperature=temperature,
35
  num_return_sequences=3,
36
- eos_token_id=[tokenizer.eos_token_id]
37
  )
38
 
39
  try:
40
- t = Thread(target=model.generate, kwargs=generate_kwargs)
41
- t.start()
42
- t.join() # Asegura que la generaci贸n haya terminado
43
 
44
- outputs = []
45
- for text in streamer:
46
- outputs.append(text)
47
- if not outputs:
48
- raise ValueError("No se gener贸 ninguna respuesta.")
49
-
50
- response = "".join(outputs).strip().split("\n")[0]
51
- return response
52
  except Exception as e:
53
  st.error(f"Error durante la generaci贸n: {e}")
54
  return "Error en la generaci贸n de texto."
@@ -61,28 +52,20 @@ def main():
61
  if uploaded_file is not None:
62
  df = pd.read_csv(uploaded_file)
63
  query = 'aspiring human resources specialist'
64
- value = 0.00
65
  if 'job_title' in df.columns:
66
- job_titles = df['job_title']
67
 
68
  # Definir el prompt con in-context learning
69
  initial_prompt = (
70
- "Step 1: Extract the first record from the dataframe df.\n"
71
- f" {df.iloc[0]['job_title']}\n"
72
- #f"List: {job_titles}\n"
73
- #"First job title: \n"
74
- #"\n"
75
- "Step 2: Calculate the cosine similarity score between the job_title of the extracted record {df.iloc[0]['job_title']} and the given {query} and assign it to {value}.\n"
76
- f"Query: '{query}'\n"
77
- "Cosine similarity score: \n"
78
- "Step 3: Print the value of the calculated cosine similarity"
79
- f"Result: {value}"
80
  )
81
 
82
-
83
  st.write("Prompt inicial con In-context Learning:\n")
84
  st.write(initial_prompt)
85
- st.write(query)
86
 
87
  if st.button("Generar respuesta"):
88
  with st.spinner("Generando respuesta..."):
 
2
  import streamlit as st
3
  from huggingface_hub import login
4
  import pandas as pd
 
5
 
6
  # Token Secret de Hugging Face
7
  huggingface_token = st.secrets["HUGGINGFACEHUB_API_TOKEN"]
 
32
  top_p=0.9,
33
  temperature=temperature,
34
  num_return_sequences=3,
35
+ eos_token_id=tokenizer.eos_token_id # Cambiado a un entero
36
  )
37
 
38
  try:
39
+ outputs = model.generate(**generate_kwargs)
 
 
40
 
41
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True).strip()
42
+ return response.split("\n")[0]
 
 
 
 
 
 
43
  except Exception as e:
44
  st.error(f"Error durante la generaci贸n: {e}")
45
  return "Error en la generaci贸n de texto."
 
52
  if uploaded_file is not None:
53
  df = pd.read_csv(uploaded_file)
54
  query = 'aspiring human resources specialist'
55
+
56
  if 'job_title' in df.columns:
57
+ job_titles = df['job_title'].tolist()
58
 
59
  # Definir el prompt con in-context learning
60
  initial_prompt = (
61
+ f"Extract the first record from the dataframe df.\n"
62
+ f"First job title: '{df.iloc[0]['job_title']}'\n"
63
+ f"Calculate the cosine similarity between this job title and the query: '{query}'.\n"
64
+ "Print the cosine similarity score."
 
 
 
 
 
 
65
  )
66
 
 
67
  st.write("Prompt inicial con In-context Learning:\n")
68
  st.write(initial_prompt)
 
69
 
70
  if st.button("Generar respuesta"):
71
  with st.spinner("Generando respuesta..."):