Akjava commited on
Commit
6c228f1
Β·
verified Β·
1 Parent(s): 25a7a48

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -210,7 +210,7 @@ def trans(text):
210
  return None
211
 
212
  # γƒ†γ‚­γ‚Ήγƒˆγ«θ¨€θͺžγ‚Ώγ‚°γ‚’δ»˜δΈŽγ—γ€γƒγ‚€γƒˆεˆ—γ«ε€‰ζ›
213
- input_text = f"<2ja>{text}".encode('utf-8')
214
 
215
  # γƒˆγƒΌγ‚―γƒŠγ‚€γ‚Ί
216
  tokens = llm.tokenize(input_text)
@@ -284,12 +284,12 @@ def respond(
284
  repeat_penalty: float,
285
  ):
286
  llama = Llama("models/madlad400-3b-mt-q8_0.gguf")
287
- #tokens = llama.tokenize(f"{message}".encode('utf-8'))#
288
  tokens = llama.tokenize(b"What is the capital of France?")
289
  llama.encode(tokens)
290
  tokens = [llama.decoder_start_token()]
291
  for token in llama.generate(tokens, top_k=40, top_p=0.95, temp=1, repeat_penalty=1.0):
292
- yield (llama.detokenize([token]))
293
  if token == llama.token_eos():
294
  break
295
 
 
210
  return None
211
 
212
  # γƒ†γ‚­γ‚Ήγƒˆγ«θ¨€θͺžγ‚Ώγ‚°γ‚’δ»˜δΈŽγ—γ€γƒγ‚€γƒˆεˆ—γ«ε€‰ζ›
213
+ input_text = bf"<2ja>{text}"
214
 
215
  # γƒˆγƒΌγ‚―γƒŠγ‚€γ‚Ί
216
  tokens = llm.tokenize(input_text)
 
284
  repeat_penalty: float,
285
  ):
286
  llama = Llama("models/madlad400-3b-mt-q8_0.gguf")
287
+ #tokens = llama.tokenize(bf"<2ja>{message}")#
288
  tokens = llama.tokenize(b"What is the capital of France?")
289
  llama.encode(tokens)
290
  tokens = [llama.decoder_start_token()]
291
  for token in llama.generate(tokens, top_k=40, top_p=0.95, temp=1, repeat_penalty=1.0):
292
+ yield (llama.detokenize([token]).decode())
293
  if token == llama.token_eos():
294
  break
295