JaphetHernandez commited on
Commit
fde1d1c
verified
1 Parent(s): 58a4111

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -14,22 +14,22 @@ tokenizer = AutoTokenizer.from_pretrained(model_id)
14
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
15
  tokenizer.pad_token = tokenizer.eos_token
16
 
17
- MAX_INPUT_TOKEN_LENGTH = 4096
18
 
19
- def generate_response(input_text, temperature=0.5, max_new_tokens=50):
20
  input_ids = tokenizer.encode(input_text, return_tensors='pt').to(model.device)
21
 
22
  if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
23
  input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
24
  st.warning(f"Se recort贸 la entrada porque excedi贸 el l铆mite de {MAX_INPUT_TOKEN_LENGTH} tokens.")
25
 
26
- streamer = TextIteratorStreamer(tokenizer, timeout=30.0, skip_prompt=True, skip_special_tokens=True)
27
  generate_kwargs = dict(
28
  input_ids=input_ids,
29
  streamer=streamer,
30
  max_new_tokens=max_new_tokens,
31
  do_sample=True,
32
- top_k=40,
33
  top_p=0.9,
34
  temperature=temperature,
35
  eos_token_id=[tokenizer.eos_token_id]
 
14
  model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto")
15
  tokenizer.pad_token = tokenizer.eos_token
16
 
17
+ MAX_INPUT_TOKEN_LENGTH = 10000
18
 
19
+ def generate_response(input_text, temperature=0.2, max_new_tokens=20):
20
  input_ids = tokenizer.encode(input_text, return_tensors='pt').to(model.device)
21
 
22
  if input_ids.shape[1] > MAX_INPUT_TOKEN_LENGTH:
23
  input_ids = input_ids[:, -MAX_INPUT_TOKEN_LENGTH:]
24
  st.warning(f"Se recort贸 la entrada porque excedi贸 el l铆mite de {MAX_INPUT_TOKEN_LENGTH} tokens.")
25
 
26
+ streamer = TextIteratorStreamer(tokenizer, timeout=120.0, skip_prompt=True, skip_special_tokens=True)
27
  generate_kwargs = dict(
28
  input_ids=input_ids,
29
  streamer=streamer,
30
  max_new_tokens=max_new_tokens,
31
  do_sample=True,
32
+ top_k=20,
33
  top_p=0.9,
34
  temperature=temperature,
35
  eos_token_id=[tokenizer.eos_token_id]